diff --git a/INSTALL.md b/INSTALL.md new file mode 100755 index 0000000..42fcf02 --- /dev/null +++ b/INSTALL.md @@ -0,0 +1,7 @@ +# Installation + +See http://caffe.berkeleyvision.org/installation.html for the latest +installation instructions. + +Check the issue tracker in case you need help: +https://github.com/BVLC/caffe/issues diff --git a/LICENSE b/LICENSE new file mode 100755 index 0000000..70ca21d --- /dev/null +++ b/LICENSE @@ -0,0 +1,52 @@ +COPYRIGHT + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. + +Caffe uses a shared copyright model: each contributor holds copyright over +their contributions to Caffe. The project versioning records all such +contribution and copyright details. If a contributor wants to further mark +their specific copyright on a particular contribution, they should indicate +their copyright solely in the commit message of the change when it is +committed. + +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +CONTRIBUTION AGREEMENT + +By contributing to the BVLC/caffe repository through pull-request, comment, +or otherwise, the contributor releases their content to the +license and copyright terms herein. + +INTEL LICENSE ON THE SURGETY PARTS + +Intel Corporation holds license for the surgery related code. + +LICENSE ON THE OPTIMIZATION PARTS + +University of Science and Technolgoy of China holds license for the optimizaiton related code. diff --git a/Makefile b/Makefile new file mode 100755 index 0000000..f66d7a6 --- /dev/null +++ b/Makefile @@ -0,0 +1,633 @@ +PROJECT := caffe + +CONFIG_FILE := Makefile.config +# Explicitly check for the config file, otherwise make -k will proceed anyway. +ifeq ($(wildcard $(CONFIG_FILE)),) +$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.) +endif +include $(CONFIG_FILE) + +BUILD_DIR_LINK := $(BUILD_DIR) +ifeq ($(RELEASE_BUILD_DIR),) + RELEASE_BUILD_DIR := .$(BUILD_DIR)_release +endif +ifeq ($(DEBUG_BUILD_DIR),) + DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug +endif + +DEBUG ?= 0 +ifeq ($(DEBUG), 1) + BUILD_DIR := $(DEBUG_BUILD_DIR) + OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR) +else + BUILD_DIR := $(RELEASE_BUILD_DIR) + OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) +endif + +# All of the directories containing code. +SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \ + \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print) + +# The target shared library name +LIB_BUILD_DIR := $(BUILD_DIR)/lib +STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a +DYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so + +############################## +# Get all source files +############################## +# CXX_SRCS are the source files excluding the test ones. +CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp") +# CU_SRCS are the cuda source files +CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu") +# TEST_SRCS are the test source files +TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp +TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp") +TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS)) +TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu") +GTEST_SRC := src/gtest/gtest-all.cpp +# TOOL_SRCS are the source files for the tool binaries +TOOL_SRCS := $(shell find tools -name "*.cpp") +# EXAMPLE_SRCS are the source files for the example binaries +EXAMPLE_SRCS := $(shell find examples -name "*.cpp") +# BUILD_INCLUDE_DIR contains any generated header files we want to include. +BUILD_INCLUDE_DIR := $(BUILD_DIR)/src +# PROTO_SRCS are the protocol buffer definitions +PROTO_SRC_DIR := src/$(PROJECT)/proto +PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto) +# PROTO_BUILD_DIR will contain the .cc and obj files generated from +# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files +PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR) +PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto +# NONGEN_CXX_SRCS includes all source/header files except those generated +# automatically (e.g., by proto). +NONGEN_CXX_SRCS := $(shell find \ + src/$(PROJECT) \ + include/$(PROJECT) \ + python/$(PROJECT) \ + matlab/+$(PROJECT)/private \ + examples \ + tools \ + -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh") +LINT_SCRIPT := scripts/cpp_lint.py +LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint +LINT_EXT := lint.txt +LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS))) +EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT) +NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT) +# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT) +PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp +PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so +PY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp +# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT) +MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp +ifneq ($(MATLAB_DIR),) + MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext) +endif +MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT) + +############################## +# Derive generated files +############################## +# The generated files for protocol buffers +PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc}) +PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto +PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py +PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \ + $(PY_PROTO_BUILD_DIR)/$(notdir $(file))) +# The objects corresponding to the source files +# These objects will be linked into the final shared library, so we +# exclude the tool, example, and test objects. +CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o}) +CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o}) +PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o} +OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS) +# tool, example, and test objects +TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o}) +TOOL_BUILD_DIR := $(BUILD_DIR)/tools +TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test +TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test +TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o}) +TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o}) +TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS) +GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o}) +EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o}) +# Output files for automatic dependency generation +DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \ + ${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d} +# tool, example, and test bins +TOOL_BINS := ${TOOL_OBJS:.o=.bin} +EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin} +# symlinks to tool bins without the ".bin" extension +TOOL_BIN_LINKS := ${TOOL_BINS:.bin=} +# Put the test binaries in build/test for convenience. +TEST_BIN_DIR := $(BUILD_DIR)/test +TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ + $(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj)))))) +TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ + $(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj)))))) +TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS) +# TEST_ALL_BIN is the test binary that links caffe dynamically. +TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin + +############################## +# Derive compiler warning dump locations +############################## +WARNS_EXT := warnings.txt +CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)}) +CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)}) +TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)}) +EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)}) +TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)}) +TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)}) +ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS) +ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS) +ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS) + +EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT) +NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT) + +############################## +# Derive include and lib directories +############################## +CUDA_INCLUDE_DIR := $(CUDA_DIR)/include + +CUDA_LIB_DIR := +# add /lib64 only if it exists +ifneq ("$(wildcard $(CUDA_DIR)/lib64)","") + CUDA_LIB_DIR += $(CUDA_DIR)/lib64 +endif +CUDA_LIB_DIR += $(CUDA_DIR)/lib + +INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include +ifneq ($(CPU_ONLY), 1) + INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) + LIBRARY_DIRS += $(CUDA_LIB_DIR) + LIBRARIES := cudart cublas curand +endif +LIBRARIES += glog gflags protobuf leveldb snappy \ + lmdb boost_system hdf5_serial_hl hdf5_serial m \ + opencv_core opencv_highgui opencv_imgproc +PYTHON_LIBRARIES := boost_python python2.7 +WARNINGS := -Wall -Wno-sign-compare + +############################## +# Set build directories +############################## + +DISTRIBUTE_DIR ?= distribute +DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib +DIST_ALIASES := dist +ifneq ($(strip $(DISTRIBUTE_DIR)),distribute) + DIST_ALIASES += distribute +endif + +ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \ + $(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \ + $(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \ + $(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR)) + +############################## +# Set directory for Doxygen-generated documentation +############################## +DOXYGEN_CONFIG_FILE ?= ./.Doxyfile +# should be the same as OUTPUT_DIRECTORY in the .Doxyfile +DOXYGEN_OUTPUT_DIR ?= ./doxygen +DOXYGEN_COMMAND ?= doxygen +# All the files that might have Doxygen documentation. +DOXYGEN_SOURCES := $(shell find \ + src/$(PROJECT) \ + include/$(PROJECT) \ + python/ \ + matlab/ \ + examples \ + tools \ + -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \ + -name "*.py" -or -name "*.m") +DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE) + + +############################## +# Configure build +############################## + +# Determine platform +UNAME := $(shell uname -s) +ifeq ($(UNAME), Linux) + LINUX := 1 +else ifeq ($(UNAME), Darwin) + OSX := 1 +endif + +# Linux +ifeq ($(LINUX), 1) + CXX ?= /usr/bin/g++ + GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) + # older versions of gcc are too dumb to build boost with -Wuninitalized + ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1) + WARNINGS += -Wno-uninitialized + endif + # boost::thread is reasonably called boost_thread (compare OS X) + # We will also explicitly add stdc++ to the link target. + LIBRARIES += boost_thread stdc++ +endif + +# OS X: +# clang++ instead of g++ +# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0 +ifeq ($(OSX), 1) + CXX := /usr/bin/clang++ + ifneq ($(CPU_ONLY), 1) + CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') + ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) + CXXFLAGS += -stdlib=libstdc++ + LINKFLAGS += -stdlib=libstdc++ + endif + # clang throws this warning for cuda headers + WARNINGS += -Wno-unneeded-internal-declaration + endif + # gtest needs to use its own tuple to not conflict with clang + COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1 + # boost::thread is called boost_thread-mt to mark multithreading on OS X + LIBRARIES += boost_thread-mt + # we need to explicitly ask for the rpath to be obeyed + DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so + ORIGIN := @loader_path +else + ORIGIN := \$$ORIGIN +endif + +# Custom compiler +ifdef CUSTOM_CXX + CXX := $(CUSTOM_CXX) +endif + +# Static linking +ifneq (,$(findstring clang++,$(CXX))) + STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME) +else ifneq (,$(findstring g++,$(CXX))) + STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive +else + # The following line must not be indented with a tab, since we are not inside a target + $(error Cannot static link with the $(CXX) compiler) +endif + +# Debugging +ifeq ($(DEBUG), 1) + COMMON_FLAGS += -DDEBUG -g -O0 + NVCCFLAGS += -G +else + COMMON_FLAGS += -DNDEBUG -O2 +endif + +# cuDNN acceleration configuration. +ifeq ($(USE_CUDNN), 1) + LIBRARIES += cudnn + COMMON_FLAGS += -DUSE_CUDNN +endif + +# CPU-only configuration +ifeq ($(CPU_ONLY), 1) + OBJS := $(PROTO_OBJS) $(CXX_OBJS) + TEST_OBJS := $(TEST_CXX_OBJS) + TEST_BINS := $(TEST_CXX_BINS) + ALL_WARNS := $(ALL_CXX_WARNS) + TEST_FILTER := --gtest_filter="-*GPU*" + COMMON_FLAGS += -DCPU_ONLY +endif + +# Python layer support +ifeq ($(WITH_PYTHON_LAYER), 1) + COMMON_FLAGS += -DWITH_PYTHON_LAYER + LIBRARIES += $(PYTHON_LIBRARIES) +endif + +# BLAS configuration (default = ATLAS) +BLAS ?= atlas +ifeq ($(BLAS), mkl) + # MKL + LIBRARIES += mkl_rt + COMMON_FLAGS += -DUSE_MKL + MKL_DIR ?= /opt/intel/mkl + BLAS_INCLUDE ?= $(MKL_DIR)/include + BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64 +else ifeq ($(BLAS), open) + # OpenBLAS + LIBRARIES += openblas +else + # ATLAS + ifeq ($(LINUX), 1) + ifeq ($(BLAS), atlas) + # Linux simply has cblas and atlas + LIBRARIES += cblas atlas + endif + else ifeq ($(OSX), 1) + # OS X packages atlas as the vecLib framework + LIBRARIES += cblas + # 10.10 has accelerate while 10.9 has veclib + XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6') + ifneq (,$(findstring version: 6,$(XCODE_CLT_VER))) + BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ + LDFLAGS += -framework Accelerate + else + BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/ + LDFLAGS += -framework vecLib + endif + endif +endif +INCLUDE_DIRS += $(BLAS_INCLUDE) +LIBRARY_DIRS += $(BLAS_LIB) + +LIBRARY_DIRS += $(LIB_BUILD_DIR) + +# Automatic dependency generation (nvcc is handled separately) +CXXFLAGS += -MMD -MP + +# Complete build flags. +COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) +CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) +NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) +# mex may invoke an older gcc that is too liberal with -Wuninitalized +MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized +LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) + +USE_PKG_CONFIG ?= 0 +ifeq ($(USE_PKG_CONFIG), 1) + PKG_CONFIG := $(shell pkg-config opencv --libs) +else + PKG_CONFIG := +endif +LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \ + $(foreach library,$(LIBRARIES),-l$(library)) +PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library)) + +# 'superclean' target recursively* deletes all files ending with an extension +# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older +# versions of Caffe that do not place all generated files in a location known +# to the 'clean' target. +# +# 'supercleanlist' will list the files to be deleted by make superclean. +# +# * Recursive with the exception that symbolic links are never followed, per the +# default behavior of 'find'. +SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo + +# Set the sub-targets of the 'everything' target. +EVERYTHING_TARGETS := all py$(PROJECT) test warn lint +# Only build matcaffe as part of "everything" if MATLAB_DIR is specified. +ifneq ($(MATLAB_DIR),) + EVERYTHING_TARGETS += mat$(PROJECT) +endif + +############################## +# Define build targets +############################## +.PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ + py mat py$(PROJECT) mat$(PROJECT) proto runtest \ + superclean supercleanlist supercleanfiles warn everything + +all: lib tools examples + +lib: $(STATIC_NAME) $(DYNAMIC_NAME) + +everything: $(EVERYTHING_TARGETS) + +linecount: + cloc --read-lang-def=$(PROJECT).cloc \ + src/$(PROJECT) include/$(PROJECT) tools examples \ + python matlab + +lint: $(EMPTY_LINT_REPORT) + +lintclean: + @ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT) + +docs: $(DOXYGEN_OUTPUT_DIR) + @ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen + +$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES) + $(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE) + +$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR) + @ cat $(LINT_OUTPUTS) > $@ + @ if [ -s "$@" ]; then \ + cat $@; \ + mv $@ $(NONEMPTY_LINT_REPORT); \ + echo "Found one or more lint errors."; \ + exit 1; \ + fi; \ + $(RM) $(NONEMPTY_LINT_REPORT); \ + echo "No lint errors!"; + +$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR) + @ mkdir -p $(dir $@) + @ python $(LINT_SCRIPT) $< 2>&1 \ + | grep -v "^Done processing " \ + | grep -v "^Total errors found: 0" \ + > $@ \ + || true + +test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS) + +tools: $(TOOL_BINS) $(TOOL_BIN_LINKS) + +examples: $(EXAMPLE_BINS) + +py$(PROJECT): py + +py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY) + +$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ $< + $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \ + -o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../../build/lib + +mat$(PROJECT): mat + +mat: $(MAT$(PROJECT)_SO) + +$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME) + @ if [ -z "$(MATLAB_DIR)" ]; then \ + echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \ + "to build mat$(PROJECT)."; \ + exit 1; \ + fi + @ echo MEX $< + $(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \ + CXX="$(CXX)" \ + CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \ + CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@ + @ if [ -f "$(PROJECT)_.d" ]; then \ + mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \ + fi + +runtest: $(TEST_ALL_BIN) + $(TOOL_BUILD_DIR)/caffe + $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER) + +pytest: py + cd python; python -m unittest discover -s caffe/test + +mattest: mat + cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()' + +warn: $(EMPTY_WARN_REPORT) + +$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR) + @ cat $(ALL_WARNS) > $@ + @ if [ -s "$@" ]; then \ + cat $@; \ + mv $@ $(NONEMPTY_WARN_REPORT); \ + echo "Compiler produced one or more warnings."; \ + exit 1; \ + fi; \ + $(RM) $(NONEMPTY_WARN_REPORT); \ + echo "No compiler warnings!"; + +$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o + +$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked + +# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link +# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it +# exists and $(DEBUG) is toggled later. +$(BUILD_DIR)/.linked: + @ mkdir -p $(BUILD_DIR) + @ $(RM) $(OTHER_BUILD_DIR)/.linked + @ $(RM) -r $(BUILD_DIR_LINK) + @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK) + @ touch $@ + +$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK) + @ mkdir -p $@ + +$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) + @ echo LD -o $@ + $(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS) + +$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) + @ echo AR -o $@ + $(Q)ar rcs $@ $(OBJS) + +$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ + | $(PROTO_BUILD_DIR) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) + @ echo NVCC $< + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ + -odir $(@D) + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ + | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo CXX/LD -o $@ $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \ + $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo LD $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \ + $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo LD $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +# Target for extension-less symlinks to tool binaries with extension '*.bin'. +$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR) + @ $(RM) $@ + @ ln -s $(abspath $<) $@ + +$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ + $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../lib + +$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ + $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../../lib + +proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) + +$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \ + $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR) + @ echo PROTOC $< + $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $< + +$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \ + $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR) + @ echo PROTOC \(python\) $< + $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $< + +$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR) + touch $(PY_PROTO_INIT) + +clean: + @- $(RM) -rf $(ALL_BUILD_DIRS) + @- $(RM) -rf $(OTHER_BUILD_DIR) + @- $(RM) -rf $(BUILD_DIR_LINK) + @- $(RM) -rf $(DISTRIBUTE_DIR) + @- $(RM) $(PY$(PROJECT)_SO) + @- $(RM) $(MAT$(PROJECT)_SO) + +supercleanfiles: + $(eval SUPERCLEAN_FILES := $(strip \ + $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \ + -not -path './data/*')))) + +supercleanlist: supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + fi + +superclean: clean supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo "Deleting the following generated files:"; \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + $(RM) $(SUPERCLEAN_FILES); \ + fi + +$(DIST_ALIASES): $(DISTRIBUTE_DIR) + +$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS) + # add include + cp -r include $(DISTRIBUTE_DIR)/ + mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto + cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto + # add tool and example binaries + cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin + cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin + # add libraries + cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib + cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib + # add python - it's not the standard way, indeed... + cp -r python $(DISTRIBUTE_DIR)/python + +-include $(DEPS) diff --git a/Makefile.config b/Makefile.config new file mode 100755 index 0000000..4824e8e --- /dev/null +++ b/Makefile.config @@ -0,0 +1,95 @@ +## Refer to http://caffe.berkeleyvision.org/installation.html +# Contributions simplifying and improving our build system are welcome! + +# cuDNN acceleration switch (uncomment to build with cuDNN). +#USE_CUDNN := 1 + +# CPU-only switch (uncomment to build without GPU support). +# CPU_ONLY := 1 + +# To customize your choice of compiler, uncomment and set the following. +# N.B. the default for Linux is g++ and the default for OSX is clang++ +CUSTOM_CXX := g++-5 + +# CUDA directory contains bin/ and lib/ directories that we need. +CUDA_DIR := /usr/local/cuda +# On Ubuntu 14.04, if cuda tools are installed via +# "sudo apt-get install nvidia-cuda-toolkit" then use this instead: +# CUDA_DIR := /usr + +# CUDA architecture setting: going with all of them. +# For CUDA < 6.0, comment the *_50 lines for compatibility. +CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \ + -gencode arch=compute_20,code=sm_21 \ + -gencode arch=compute_30,code=sm_30 \ + -gencode arch=compute_35,code=sm_35 \ + -gencode arch=compute_50,code=sm_50 \ + -gencode arch=compute_50,code=compute_50 \ + -gencode arch=compute_60,code=sm_60 \ + -gencode arch=compute_60,code=compute_60 \ + +# BLAS choice: +# atlas for ATLAS (default) +# mkl for MKL +# open for OpenBlas +BLAS := atlas +# Custom (MKL/ATLAS/OpenBLAS) include and lib directories. +# Leave commented to accept the defaults for your choice of BLAS +# (which should work)! +# BLAS_INCLUDE := /path/to/your/blas +# BLAS_LIB := /path/to/your/blas + +# Homebrew puts openblas in a directory that is not on the standard search path +# BLAS_INCLUDE := $(shell brew --prefix openblas)/include +# BLAS_LIB := $(shell brew --prefix openblas)/lib + +# This is required only if you will compile the matlab interface. +# MATLAB directory should contain the mex binary in /bin. +# MATLAB_DIR := /usr/local +# MATLAB_DIR := /Applications/MATLAB_R2012b.app + +# NOTE: this is required only if you will compile the python interface. +# We need to be able to find Python.h and numpy/arrayobject.h. +PYTHON_INCLUDE := /usr/include/python2.7 \ + /usr/lib/python2.7/dist-packages/numpy/core/include +# Anaconda Python distribution is quite popular. Include path: +# Verify anaconda location, sometimes it's in root. +# ANACONDA_HOME := $(HOME)/anaconda +# PYTHON_INCLUDE := $(ANACONDA_HOME)/include \ + # $(ANACONDA_HOME)/include/python2.7 \ + # $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include \ + +# We need to be able to find libpythonX.X.so or .dylib. +PYTHON_LIB := /usr/lib +# PYTHON_LIB := $(ANACONDA_HOME)/lib + +# Homebrew installs numpy in a non standard path (keg only) +# PYTHON_INCLUDE += $(dir $(shell python -c 'import numpy.core; print(numpy.core.__file__)'))/include +# PYTHON_LIB += $(shell brew --prefix numpy)/lib + +# Uncomment to support layers written in Python (will link against Python libs) +WITH_PYTHON_LAYER := 1 + +# Whatever else you find you need goes here. +INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include /usr/include/hdf5/serial/ +LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib + +# If Homebrew is installed at a non standard location (for example your home directory) and you use it for general dependencies +# INCLUDE_DIRS += $(shell brew --prefix)/include +# LIBRARY_DIRS += $(shell brew --prefix)/lib + +# Uncomment to use `pkg-config` to specify OpenCV library paths. +# (Usually not necessary -- OpenCV libraries are normally installed in one of the above $LIBRARY_DIRS.) +# USE_PKG_CONFIG := 1 + +BUILD_DIR := build +DISTRIBUTE_DIR := distribute + +# Uncomment for debugging. Does not work on OSX due to https://github.com/BVLC/caffe/issues/171 +# DEBUG := 1 + +# The ID of the GPU that 'make runtest' will use to run unit tests. +TEST_GPUID := 0 + +# enable pretty build (comment to see full commands) +Q ?= @ diff --git a/Makefile.config.example b/Makefile.config.example new file mode 100755 index 0000000..a873502 --- /dev/null +++ b/Makefile.config.example @@ -0,0 +1,93 @@ +## Refer to http://caffe.berkeleyvision.org/installation.html +# Contributions simplifying and improving our build system are welcome! + +# cuDNN acceleration switch (uncomment to build with cuDNN). +# USE_CUDNN := 1 + +# CPU-only switch (uncomment to build without GPU support). +# CPU_ONLY := 1 + +# To customize your choice of compiler, uncomment and set the following. +# N.B. the default for Linux is g++ and the default for OSX is clang++ +# CUSTOM_CXX := g++ + +# CUDA directory contains bin/ and lib/ directories that we need. +CUDA_DIR := /usr/local/cuda +# On Ubuntu 14.04, if cuda tools are installed via +# "sudo apt-get install nvidia-cuda-toolkit" then use this instead: +# CUDA_DIR := /usr + +# CUDA architecture setting: going with all of them. +# For CUDA < 6.0, comment the *_50 lines for compatibility. +CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \ + -gencode arch=compute_20,code=sm_21 \ + -gencode arch=compute_30,code=sm_30 \ + -gencode arch=compute_35,code=sm_35 \ + -gencode arch=compute_50,code=sm_50 \ + -gencode arch=compute_50,code=compute_50 + +# BLAS choice: +# atlas for ATLAS (default) +# mkl for MKL +# open for OpenBlas +BLAS := atlas +# Custom (MKL/ATLAS/OpenBLAS) include and lib directories. +# Leave commented to accept the defaults for your choice of BLAS +# (which should work)! +# BLAS_INCLUDE := /path/to/your/blas +# BLAS_LIB := /path/to/your/blas + +# Homebrew puts openblas in a directory that is not on the standard search path +# BLAS_INCLUDE := $(shell brew --prefix openblas)/include +# BLAS_LIB := $(shell brew --prefix openblas)/lib + +# This is required only if you will compile the matlab interface. +# MATLAB directory should contain the mex binary in /bin. +# MATLAB_DIR := /usr/local +# MATLAB_DIR := /Applications/MATLAB_R2012b.app + +# NOTE: this is required only if you will compile the python interface. +# We need to be able to find Python.h and numpy/arrayobject.h. +PYTHON_INCLUDE := /usr/include/python2.7 \ + /usr/lib/python2.7/dist-packages/numpy/core/include +# Anaconda Python distribution is quite popular. Include path: +# Verify anaconda location, sometimes it's in root. +# ANACONDA_HOME := $(HOME)/anaconda +# PYTHON_INCLUDE := $(ANACONDA_HOME)/include \ + # $(ANACONDA_HOME)/include/python2.7 \ + # $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include \ + +# We need to be able to find libpythonX.X.so or .dylib. +PYTHON_LIB := /usr/lib +# PYTHON_LIB := $(ANACONDA_HOME)/lib + +# Homebrew installs numpy in a non standard path (keg only) +# PYTHON_INCLUDE += $(dir $(shell python -c 'import numpy.core; print(numpy.core.__file__)'))/include +# PYTHON_LIB += $(shell brew --prefix numpy)/lib + +# Uncomment to support layers written in Python (will link against Python libs) +# WITH_PYTHON_LAYER := 1 + +# Whatever else you find you need goes here. +INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include +LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib + +# If Homebrew is installed at a non standard location (for example your home directory) and you use it for general dependencies +# INCLUDE_DIRS += $(shell brew --prefix)/include +# LIBRARY_DIRS += $(shell brew --prefix)/lib + +# Uncomment to use `pkg-config` to specify OpenCV library paths. +# (Usually not necessary -- OpenCV libraries are normally installed in one of the above $LIBRARY_DIRS.) +# USE_PKG_CONFIG := 1 + +BUILD_DIR := build +DISTRIBUTE_DIR := distribute + +# Uncomment for debugging. Does not work on OSX due to https://github.com/BVLC/caffe/issues/171 +# DEBUG := 1 + +# The ID of the GPU that 'make runtest' will use to run unit tests. +TEST_GPUID := 0 + +# enable pretty build (comment to see full commands) +Q ?= @ diff --git a/README.md b/README.md new file mode 100755 index 0000000..6cf1c4c --- /dev/null +++ b/README.md @@ -0,0 +1,251 @@ +# Optimization based Layer-wise Magnitude-based Pruning for DNN Compression Thank you for everyone who are intereted in our work. +This repository is the implementation of OLMP. In experiments of LeNet-5 and LeNet-300-100, we have fixed the random seeds in python scripts for the purpose of reproducting the results shown in our paper. For AlexNet-Caltech, unfortunately, it has the dropout layers with the random seed inside Caffe framework which is the random seed we did not recorded during our experiments. Instead, We provide the compressed model of AlexNet-Caltech whoes results are reported in our paper. Users can also run the script of AlexNet-Caltech several times to reproduce a similar result compared to the one in our paper. + +This project is based on [Caffe](https://github.com/BVLC/caffe) and [Dynamic surgery](https://github.com/yiwenguo/Dynamic-Network-Surgery). Thanks to the authors of these two projects. + +## Testing enviroment +- Docker image: kaixhin/cuda-caffe:8.0 + - Ubuntu 16.04.2 LTS + - g++ 5.4.0 + - python 2.7.12 +- 1 x NVIDIA TITANX pascal +- 2 x Intel(R) Xeon(R) CPU E5-2683 v3 @ 2.00GHz +- 64 GB Memory + +## Requiremetns +- The requirements is the same as Caffe. +- easydict package of python + +## Installation +- Install all the requirements of Caffe. You can all download a docker image of Caffe directly. +- Go into the first level of the project folder, namely ./OLMP. +- Check the file "Makefile.config" to insure all the settings are suitable for your own enviroment, like the version of g++. +- make all +- make pycaffe +- pip install easydict (version 1.9 was tested) + +### Problems +Most of the problems in making are caused by the settings of enviroment. Please refer to https://github.com/BVLC/caffe/issues for help. + +## Data +We upload it to Baidu Wangpan (Usage: past the link to the internet explorer and use the password to download the file). + +For MNIST: + +link: https://pan.baidu.com/s/17lem8wVV9nd\_dZxVd8FsHA + +password: 40fa + +For Caltech-256: + +link: https://pan.baidu.com/s/1eezA0uCKHy0OLCz34XBHUQ + +password: v5s8 + +## Tutorial +To all the experiments below, the user should eitd the address of the data in .prototxt. + +* To compress the model LeNet-300-100, it firstly needs to make sure the data address in ./models/lenet300100/lenet\_train\_test.prototxt is the one in correct. + +Please Run: +``` +python exp_lenet300100.py +``` + +* To compress the model LeNet-5, please Run: +``` +python exp_lenet5.py +``` + +* To compress the model AlexNet-Caltech, please Run: +``` +python exp_caltech.py +``` +Note that the reference model is too large for uploading to github, so that we upload it to Baidu Wangpan: + +Reference model of AlexNet-Caltech: + +link: https://pan.baidu.com/s/1cWrgx29icUR680U1mm9YoA + +password: 8r48 + +(Usage: past the link to the internet explorer and use the password to download the file) + +### Check the compressed model + +For Lenet-300-100 and Lenet-5, the user can find the compression results are the same as that reported in our paper. Or, the user can run sparsity\_lenet5.py and sparsity\_lenet300100.py to check the sparsity of the model compressed by us. + +For Lenet-300-100, the model compressed by use is provided at: +``` +./models/lenet300100/compressed_lenet300100.caffemodel +``` +Run sparsity\_lenet300100.py to check the sparsity. + +For Lenet-5, the model compressed by us is provided at: +``` +./models/lenet5/compressed_lenet5.caffemodel +``` + +For AlexNet-Caltech, it may not. Since we do not fixed the random seed for droupout operation, it can not guarantee the result are the same as that in our paper. Consider about this, we provide the model compressed by us. + +Compressed model of AlexNet-Caltech: + +link: https://pan.baidu.com/s/1qdsAEsBYFe6zTnmX\_yO8ZA + +password: 3ygh + +(Usage: past the link to the internet explorer and use the password to download the file) + + +### Output format +Take the output of exp\_lenet5.py as an example: +``` +I1129 04:04:49.392139 6877 solver.cpp:226] Iteration 29600, loss = 0.152239 +I1129 04:04:49.392174 6877 solver.cpp:242] Train net output #0: accuracy = 0.96875 +I1129 04:04:49.392191 6877 solver.cpp:242] Train net output #1: loss = 0.152239 (\* 1 = 0.152239 loss) +I1129 04:04:49.392267 6877 solver.cpp:521] Iteration 29600, lr = 0.00356228 +I1129 04:04:50.325364 6877 solver.cpp:226] Iteration 29700, loss = 0.00853293 +I1129 04:04:50.325392 6877 solver.cpp:242] Train net output #0: accuracy = 1 +I1129 04:04:50.325405 6877 solver.cpp:242] Train net output #1: loss = 0.00853293 (\* 1 = 0.00853293 loss) +I1129 04:04:50.325415 6877 solver.cpp:521] Iteration 29700, lr = 0.00355555 +I1129 04:04:51.243219 6877 solver.cpp:226] Iteration 29800, loss = 0.0735124 +I1129 04:04:51.243247 6877 solver.cpp:242] Train net output #0: accuracy = 0.96875 +I1129 04:04:51.243260 6877 solver.cpp:242] Train net output #1: loss = 0.0735124 (\* 1 = 0.0735124 loss) +I1129 04:04:51.243270 6877 solver.cpp:521] Iteration 29800, lr = 0.00354885 +I1129 04:04:52.162196 6877 solver.cpp:226] Iteration 29900, loss = 0.0591469 +I1129 04:04:52.162223 6877 solver.cpp:242] Train net output #0: accuracy = 0.984375 +I1129 04:04:52.162238 6877 solver.cpp:242] Train net output #1: loss = 0.0591469 (\* 1 = 0.0591469 loss) +I1129 04:04:52.162248 6877 solver.cpp:521] Iteration 29900, lr = 0.00354218 +I1129 04:04:53.071841 6877 solver.cpp:399] Snapshotting to binary proto file models/lenet5/10_lenet_iter_30000.caffemodel +I1129 04:04:53.084738 6877 solver.cpp:684] Snapshotting solver state to binary proto filemodels/lenet5/10_lenet_iter_30000.solverstate +I1129 04:04:53.091256 6877 solver.cpp:314] Iteration 30000, Testing net (#0) +I1129 04:04:53.717361 6877 solver.cpp:363] Test net output #0: accuracy = 0.9909 +I1129 04:04:53.717402 6877 solver.cpp:363] Test net output #1: loss = 0.0321025 (\* 1 = 0.0321025 loss) +I1129 04:04:53.724666 6877 solver.cpp:226] Iteration 30000, loss = 0.00549194 +I1129 04:04:53.724690 6877 solver.cpp:242] Train net output #0: accuracy = 1 +I1129 04:04:53.724704 6877 solver.cpp:242] Train net output #1: loss = 0.00549194 (\* 1 = 0.00549194 loss) +I1129 04:04:53.724714 6877 solver.cpp:521] Iteration 30000, lr = 0.00353553 +Compression:297.70718232, Accuracy:1.0 +random seed:981118 +``` +In the output text, "random seed" indicates the random seed used in python script. Note that the random seed in C++ code is not restricted, so that if the network contain random operations like dropout, the setting of "random seed" is useless. + +"Compression: xxx, Accuracy: xxx" indicates the current Pruning Ratio and the accuracy of the pruned model in the current batch. For example +``` +I1129 04:04:53.724714 6877 solver.cpp:521] Iteration 30000, lr = 0.00353553 +Compression:297.70718232, Accuracy:1.0 +``` +means the Pruning Ration is 297.7 and the accuracy of the pruned model on the batch of iteration 30000 is 100%. + +``` +I1129 04:04:53.091256 6877 solver.cpp:314] Iteration 30000, Testing net (#0) +I1129 04:04:53.717361 6877 solver.cpp:363] Test net output #0: accuracy = 0.9909 +I1129 04:04:53.717402 6877 solver.cpp:363] Test net output #1: loss = 0.0321025 (\* 1 = 0.0321025 loss) +``` +This indicates the accuracy of the pruned model on the whole testing set. Here the testing accuracy is 0.9909 which is the same to the accuracy of the reference model. + +### How to customize +- The project is based on Dynamic Surgery, so that the framework is similar. +- Firstly, the user should edit the prototxt of model, and change the type of convlutional layers and innerproduct layers to "CConvolution" and "CInnerProduct". Note that the cinner\_product\_param and cconvolution\_param should also be specified, but the values can be arbitrary becuase these values do not affect the pruning actually. For this step, take models/lenet5/lenet\_train\_test.prototxt, models/lenet300100/lenet\_train\_test.prototxt and models/caltech\_caffenet/train\_val\_caltech.prototxt. This is similar to Dynamic Surgey. +- Secondly, the user shold write a python file to compress the models. Take exp\_lenet300100.py, exp\_lenent5.py and exp\_caltech.py as examples. All the values of hyper parameters of pruning are specified in the python scripts. + +## Explanation for code +For the python scirpts, we have already wrote detailed comments inside the scripts. + +For the editing of the C++ code. We have edited ./src/caffe/layers/compress\_inner\_product\_layer.cu and ./src/caffe/layers/compress\_conv\_layer.cu. In the forwarding passing: +``` + template + void CConvolutionLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { +... +// added by Guiying Li +180 bool _update = false; +181 Dtype* tmp_weightMask = this->blobs_[2]->mutable_cpu_data(); +182 if (tmp_weightMask[0] > 1){ +183 _update = true; +184 this->crate = tmp_weightMask[0] - 1; +185 tmp_weightMask[0] = 1; +186 } else if (tmp_weightMask[0] < 0){ +187 _update = true; +188 this->crate = -tmp_weightMask[0]; +189 tmp_weightMask[0] = 0; +190 } +191 weightMask = this->blobs_[2]->mutable_gpu_data();//update data +192 // -------Guiying------ +... +} +``` +The first value of the mask is extracted, if the value is larger than 0, than means the value is composed of (the current crate of this layer) + (the mask element is 1); else if the value is smaller than 0, that means the value is composed of (current crate of this layer)\*(-1) + (the mask element is 0). Note that the first value of the mask can be edited by users using the python api, so that the user can use python code to control the pruning. Take the exp\_lenet300100.py as example: +``` +# Set the crates of each layer, the pruning will happen in the next forward action +def apply_prune(thenet, _crates): + ''' + thenet: the model to be pruned + _crates: the list of crates for layers + ''' + for _id in range(len(layer_name)): + if _crates[_id] < 0: + continue + layer_id = layer_name[_id] + mask0 = thenet.params[layer_id][2].data.ravel()[0] + if mask0 == 0: + thenet.params[layer_id][2].data.ravel()[0] = -_crates[_id] + elif mask0 == 1: + thenet.params[layer_id][2].data.ravel()[0] = 1+_crates[_id] + else: + pdb.set_trace() + +``` +Here, when the algorithm has chosen the crates for each layer (the pruning related hyper-parameters), the python script transfer these crates to pruning process by encode them into the first element of the mask in each layer. + +## Citation +Please cite our work as: + +` +@inproceedings{li2018olmp, + title = {Optimization based Layer-wise Magnitude-based Pruning for DNN Compression}, + author = {Guiying Li and Chao Qian and Chunhui Jiang and Xiaofen Lu and Ke Tang}, + booktitle = {International Joint Conference on Artificial Intelligence (IJCAI)}, + address={Stockholm, Sweden}, + pages={2383--2389}, + year = {2018} +} +` +and the other citations may also be needed: + +Caffe + +` +@article{jia2014caffe, + Author = {Yangqing Jia and Evan Shelhamer and Jeff Donahue and Sergey Karayev and Jonathan Long and Ross Girshick and Sergio Guadarrama and Trevor Darrell}, + Journal = {arXiv:1408.5093}, + Title = {Caffe: Convolutional Architecture for Fast Feature Embedding}, + Year = {2014} +} +` + +Dynamic surgery: + +` +@inproceedings{guo2016dynamic, + title = {Dynamic Network Surgery for Efficient DNNs}, + author = {Yiwen Guo and Anbang Yao and Yurong Chen}, + booktitle = {Advances in neural information processing systems (NIPS)}, + address={Barcelona, Spain}, + pages={1379--1387}, + year = {2016} +} +` + +Negatively Correlated Search: +` +@article{tang2016negatively, + author={Ke Tang and Peng Yang and Xin Yao}, + title={Negatively correlated search}, + journal={IEEE Journal on Selected Areas in Communications}, + volume={34}, + number={3}, + pages={542--550}, + year={2016} +} +` diff --git a/check_sparsity.py b/check_sparsity.py new file mode 100755 index 0000000..0dbe14d --- /dev/null +++ b/check_sparsity.py @@ -0,0 +1,54 @@ +import sys +sys.path.insert(0, './python/') +import caffe +import numpy as np +import pdb +#weights='/home/gitProject/Dynamic-Network-Surgery/models/lenet5/10_lenet_iter_28000.caffemodel' +weights='/home/gitProject/Dynamic-Network-Surgery/models/caltech_caffenet/caltech_caffenet_train_iter_15000.caffemodel' +#weights='/home/gitProject/Dynamic-Network-Surgery/models/lenet5/caffe_lenet5_original.caffemodel' +#weights='/home/gitProject/Dynamic-Network-Surgery/models/lenet5/caffe_lenet5_sparse.caffemodel' +proto='/home/gitProject/Dynamic-Network-Surgery/models/caltech_caffenet/train_val_caltech.prototxt' +net=caffe.Net(proto, weights, caffe.TEST) +total=0 +aa=0 +w_m=2 +b_m=3 + +a1=len(np.where(net.params['conv1'][b_m].data != 0)[0]) +a2=len(np.where(net.params['conv1'][w_m].data != 0)[0]) +a3=len(np.where(net.params['conv2'][w_m].data != 0)[0]) +a4=len(np.where(net.params['conv2'][b_m].data != 0)[0]) +a5=len(np.where(net.params['conv3'][w_m].data != 0)[0]) +a6=len(np.where(net.params['conv3'][b_m].data != 0)[0]) +a7=len(np.where(net.params['conv4'][w_m].data != 0)[0]) +a8=len(np.where(net.params['conv4'][b_m].data != 0)[0]) +a9=len(np.where(net.params['conv5'][w_m].data != 0)[0]) +a10=len(np.where(net.params['conv5'][b_m].data != 0)[0]) +a11=len(np.where(net.params['fc6'][b_m].data != 0)[0]) +a12=len(np.where(net.params['fc6'][w_m].data != 0)[0]) +a13=len(np.where(net.params['fc7'][w_m].data != 0)[0]) +a14=len(np.where(net.params['fc7'][b_m].data != 0)[0]) +a15=len(np.where(net.params['fc8*'][w_m].data != 0)[0]) +a16=len(np.where(net.params['fc8*'][b_m].data != 0)[0]) + +b1=net.params['conv1'][0].data.size+net.params['conv1'][1].data.size +b2=net.params['conv2'][0].data.size+net.params['conv2'][1].data.size +b3=net.params['conv3'][0].data.size+net.params['conv3'][1].data.size +b4=net.params['conv4'][0].data.size+net.params['conv4'][1].data.size +b5=net.params['conv5'][0].data.size+net.params['conv5'][1].data.size +b6=net.params['fc6'][0].data.size+net.params['fc6'][1].data.size +b7=net.params['fc7'][0].data.size+net.params['fc7'][1].data.size +b8=net.params['fc8*'][0].data.size+net.params['fc8*'][1].data.size + +aa = a1+a2+a3+a4+a5+a6+a7+a8+a9+a10+a11+a12+a13+a14+a15+a16 +total = b1+b2+b3+b4+b5+b6+b7+b8 + +print 'Compression rate :{}% ({}x)'.format(100.- aa*100./total,total*1./aa) +print 'conv1:{}%'.format((a1+a2)*100./b1) +print 'conv2:{}%'.format((a3+a4)*100./b2) +print 'conv3:{}%'.format((a5+a6)*100./b3) +print 'conv4:{}%'.format((a7+a8)*100./b4) +print 'conv5:{}%'.format((a9+a10)*100./b5) +print 'fc6:{}%'.format((a11+a12)*100./b6) +print 'fc7:{}%'.format((a13+a14)*100./b7) +print 'fc8*:{}%'.format((a15+a16)*100./b8) diff --git a/cmake/ConfigGen.cmake b/cmake/ConfigGen.cmake new file mode 100755 index 0000000..566d6ca --- /dev/null +++ b/cmake/ConfigGen.cmake @@ -0,0 +1,104 @@ + +################################################################################################ +# Helper function to fetch caffe includes which will be passed to dependent projects +# Usage: +# caffe_get_current_includes() +function(caffe_get_current_includes includes_variable) + get_property(current_includes DIRECTORY PROPERTY INCLUDE_DIRECTORIES) + caffe_convert_absolute_paths(current_includes) + + # remove at most one ${PROJECT_BINARY_DIR} include added for caffe_config.h + list(FIND current_includes ${PROJECT_BINARY_DIR} __index) + list(REMOVE_AT current_includes ${__index}) + + # removing numpy includes (since not required for client libs) + set(__toremove "") + foreach(__i ${current_includes}) + if(${__i} MATCHES "python") + list(APPEND __toremove ${__i}) + endif() + endforeach() + if(__toremove) + list(REMOVE_ITEM current_includes ${__toremove}) + endif() + + caffe_list_unique(current_includes) + set(${includes_variable} ${current_includes} PARENT_SCOPE) +endfunction() + +################################################################################################ +# Helper function to get all list items that begin with given prefix +# Usage: +# caffe_get_items_with_prefix( ) +function(caffe_get_items_with_prefix prefix list_variable output_variable) + set(__result "") + foreach(__e ${${list_variable}}) + if(__e MATCHES "^${prefix}.*") + list(APPEND __result ${__e}) + endif() + endforeach() + set(${output_variable} ${__result} PARENT_SCOPE) +endfunction() + +################################################################################################ +# Function for generation Caffe build- and install- tree export config files +# Usage: +# caffe_generate_export_configs() +function(caffe_generate_export_configs) + set(install_cmake_suffix "share/Caffe") + + # ---[ Configure build-tree CaffeConfig.cmake file ]--- + caffe_get_current_includes(Caffe_INCLUDE_DIRS) + + set(Caffe_DEFINITIONS "") + if(NOT HAVE_CUDA) + set(HAVE_CUDA FALSE) + list(APPEND Caffe_DEFINITIONS -DCPU_ONLY) + endif() + + if(NOT HAVE_CUDNN) + set(HAVE_CUDNN FALSE) + else() + list(APPEND DEFINITIONS -DUSE_CUDNN) + endif() + + if(BLAS STREQUAL "MKL" OR BLAS STREQUAL "mkl") + list(APPEND Caffe_DEFINITIONS -DUSE_MKL) + endif() + + configure_file("cmake/Templates/CaffeConfig.cmake.in" "${PROJECT_BINARY_DIR}/CaffeConfig.cmake" @ONLY) + + # Add targets to the build-tree export set + export(TARGETS caffe proto FILE "${PROJECT_BINARY_DIR}/CaffeTargets.cmake") + export(PACKAGE Caffe) + + # ---[ Configure install-tree CaffeConfig.cmake file ]--- + + # remove source and build dir includes + caffe_get_items_with_prefix(${PROJECT_SOURCE_DIR} Caffe_INCLUDE_DIRS __insource) + caffe_get_items_with_prefix(${PROJECT_BINARY_DIR} Caffe_INCLUDE_DIRS __inbinary) + list(REMOVE_ITEM Caffe_INCLUDE_DIRS ${__insource} ${__inbinary}) + + # add `install` include folder + set(lines + "get_filename_component(__caffe_include \"\${Caffe_CMAKE_DIR}/../../include\" ABSOLUTE)\n" + "list(APPEND Caffe_INCLUDE_DIRS \${__caffe_include})\n" + "unset(__caffe_include)\n") + string(REPLACE ";" "" Caffe_INSTALL_INCLUDE_DIR_APPEND_COMMAND ${lines}) + + configure_file("cmake/Templates/CaffeConfig.cmake.in" "${PROJECT_BINARY_DIR}/cmake/CaffeConfig.cmake" @ONLY) + + # Install the CaffeConfig.cmake and export set to use with install-tree + install(FILES "${PROJECT_BINARY_DIR}/cmake/CaffeConfig.cmake" DESTINATION ${install_cmake_suffix}) + install(EXPORT CaffeTargets DESTINATION ${install_cmake_suffix}) + + # ---[ Configure and install version file ]--- + + # TODO: Lines below are commented because Caffe does't declare its version in headers. + # When the declarations are added, modify `caffe_extract_caffe_version()` macro and uncomment + + # configure_file(cmake/Templates/CaffeConfigVersion.cmake.in "${PROJECT_BINARY_DIR}/CaffeConfigVersion.cmake" @ONLY) + # install(FILES "${PROJECT_BINARY_DIR}/CaffeConfigVersion.cmake" DESTINATION ${install_cmake_suffix}) +endfunction() + + diff --git a/cmake/Cuda.cmake b/cmake/Cuda.cmake new file mode 100755 index 0000000..ff58d31 --- /dev/null +++ b/cmake/Cuda.cmake @@ -0,0 +1,254 @@ +if(CPU_ONLY) + return() +endif() + +# Known NVIDIA GPU achitectures Caffe can be compiled for. +# This list will be used for CUDA_ARCH_NAME = All option +set(Caffe_known_gpu_archs "20 21(20) 30 35 50") + +################################################################################################ +# A function for automatic detection of GPUs installed (if autodetection is enabled) +# Usage: +# caffe_detect_installed_gpus(out_variable) +function(caffe_detect_installed_gpus out_variable) + if(NOT CUDA_gpu_detect_output) + set(__cufile ${PROJECT_BINARY_DIR}/detect_cuda_archs.cu) + + file(WRITE ${__cufile} "" + "#include \n" + "int main()\n" + "{\n" + " int count = 0;\n" + " if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n" + " if (count == 0) return -1;\n" + " for (int device = 0; device < count; ++device)\n" + " {\n" + " cudaDeviceProp prop;\n" + " if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n" + " std::printf(\"%d.%d \", prop.major, prop.minor);\n" + " }\n" + " return 0;\n" + "}\n") + + execute_process(COMMAND "${CUDA_NVCC_EXECUTABLE}" "--run" "${__cufile}" + WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/CMakeFiles/" + RESULT_VARIABLE __nvcc_res OUTPUT_VARIABLE __nvcc_out + ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) + + if(__nvcc_res EQUAL 0) + string(REPLACE "2.1" "2.1(2.0)" __nvcc_out "${__nvcc_out}") + set(CUDA_gpu_detect_output ${__nvcc_out} CACHE INTERNAL "Returned GPU architetures from caffe_detect_gpus tool" FORCE) + endif() + endif() + + if(NOT CUDA_gpu_detect_output) + message(STATUS "Automatic GPU detection failed. Building for all known architectures.") + set(${out_variable} ${Caffe_known_gpu_archs} PARENT_SCOPE) + else() + set(${out_variable} ${CUDA_gpu_detect_output} PARENT_SCOPE) + endif() +endfunction() + + +################################################################################################ +# Function for selecting GPU arch flags for nvcc based on CUDA_ARCH_NAME +# Usage: +# caffe_select_nvcc_arch_flags(out_variable) +function(caffe_select_nvcc_arch_flags out_variable) + # List of arch names + set(__archs_names "Fermi" "Kepler" "Maxwell" "All" "Manual") + set(__archs_name_default "All") + if(NOT CMAKE_CROSSCOMPILING) + list(APPEND __archs_names "Auto") + set(__archs_name_default "Auto") + endif() + + # set CUDA_ARCH_NAME strings (so it will be seen as dropbox in CMake-Gui) + set(CUDA_ARCH_NAME ${__archs_name_default} CACHE STRING "Select target NVIDIA GPU achitecture.") + set_property( CACHE CUDA_ARCH_NAME PROPERTY STRINGS "" ${__archs_names} ) + mark_as_advanced(CUDA_ARCH_NAME) + + # verify CUDA_ARCH_NAME value + if(NOT ";${__archs_names};" MATCHES ";${CUDA_ARCH_NAME};") + string(REPLACE ";" ", " __archs_names "${__archs_names}") + message(FATAL_ERROR "Only ${__archs_names} architeture names are supported.") + endif() + + if(${CUDA_ARCH_NAME} STREQUAL "Manual") + set(CUDA_ARCH_BIN ${Caffe_known_gpu_archs} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported") + set(CUDA_ARCH_PTX "50" CACHE STRING "Specify 'virtual' PTX architectures to build PTX intermediate code for") + mark_as_advanced(CUDA_ARCH_BIN CUDA_ARCH_PTX) + else() + unset(CUDA_ARCH_BIN CACHE) + unset(CUDA_ARCH_PTX CACHE) + endif() + + if(${CUDA_ARCH_NAME} STREQUAL "Fermi") + set(__cuda_arch_bin "20 21(20)") + elseif(${CUDA_ARCH_NAME} STREQUAL "Kepler") + set(__cuda_arch_bin "30 35") + elseif(${CUDA_ARCH_NAME} STREQUAL "Maxwell") + set(__cuda_arch_bin "50") + elseif(${CUDA_ARCH_NAME} STREQUAL "All") + set(__cuda_arch_bin ${Caffe_known_gpu_archs}) + elseif(${CUDA_ARCH_NAME} STREQUAL "Auto") + caffe_detect_installed_gpus(__cuda_arch_bin) + else() # (${CUDA_ARCH_NAME} STREQUAL "Manual") + set(__cuda_arch_bin ${CUDA_ARCH_BIN}) + endif() + + # remove dots and convert to lists + string(REGEX REPLACE "\\." "" __cuda_arch_bin "${__cuda_arch_bin}") + string(REGEX REPLACE "\\." "" __cuda_arch_ptx "${CUDA_ARCH_PTX}") + string(REGEX MATCHALL "[0-9()]+" __cuda_arch_bin "${__cuda_arch_bin}") + string(REGEX MATCHALL "[0-9]+" __cuda_arch_ptx "${__cuda_arch_ptx}") + caffe_list_unique(__cuda_arch_bin __cuda_arch_ptx) + + set(__nvcc_flags "") + set(__nvcc_archs_readable "") + + # Tell NVCC to add binaries for the specified GPUs + foreach(__arch ${__cuda_arch_bin}) + if(__arch MATCHES "([0-9]+)\\(([0-9]+)\\)") + # User explicitly specified PTX for the concrete BIN + list(APPEND __nvcc_flags -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1}) + list(APPEND __nvcc_archs_readable sm_${CMAKE_MATCH_1}) + else() + # User didn't explicitly specify PTX for the concrete BIN, we assume PTX=BIN + list(APPEND __nvcc_flags -gencode arch=compute_${__arch},code=sm_${__arch}) + list(APPEND __nvcc_archs_readable sm_${__arch}) + endif() + endforeach() + + # Tell NVCC to add PTX intermediate code for the specified architectures + foreach(__arch ${__cuda_arch_ptx}) + list(APPEND __nvcc_flags -gencode arch=compute_${__arch},code=compute_${__arch}) + list(APPEND __nvcc_archs_readable compute_${__arch}) + endforeach() + + string(REPLACE ";" " " __nvcc_archs_readable "${__nvcc_archs_readable}") + set(${out_variable} ${__nvcc_flags} PARENT_SCOPE) + set(${out_variable}_readable ${__nvcc_archs_readable} PARENT_SCOPE) +endfunction() + +################################################################################################ +# Short command for cuda comnpilation +# Usage: +# caffe_cuda_compile( ) +macro(caffe_cuda_compile objlist_variable) + foreach(var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG) + set(${var}_backup_in_cuda_compile_ "${${var}}") + + # we remove /EHa as it generates warnings under windows + string(REPLACE "/EHa" "" ${var} "${${var}}") + + endforeach() + + if(UNIX OR APPLE) + list(APPEND CUDA_NVCC_FLAGS -Xcompiler -fPIC) + endif() + + if(APPLE) + list(APPEND CUDA_NVCC_FLAGS -Xcompiler -Wno-unused-function) + endif() + + cuda_compile(cuda_objcs ${ARGN}) + + foreach(var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG) + set(${var} "${${var}_backup_in_cuda_compile_}") + unset(${var}_backup_in_cuda_compile_) + endforeach() + + set(${objlist_variable} ${cuda_objcs}) +endmacro() + +################################################################################################ +# Short command for cuDNN detection. Believe it soon will be a part of CUDA toolkit distribution. +# That's why not FindcuDNN.cmake file, but just the macro +# Usage: +# detect_cuDNN() +function(detect_cuDNN) + set(CUDNN_ROOT "" CACHE PATH "CUDNN root folder") + + find_path(CUDNN_INCLUDE cudnn.h + PATHS ${CUDNN_ROOT} $ENV{CUDNN_ROOT} ${CUDA_TOOLKIT_INCLUDE} + DOC "Path to cuDNN include directory." ) + + get_filename_component(__libpath_hist ${CUDA_CUDART_LIBRARY} PATH) + find_library(CUDNN_LIBRARY NAMES libcudnn.so # libcudnn_static.a + PATHS ${CUDNN_ROOT} $ENV{CUDNN_ROOT} ${CUDNN_INCLUDE} ${__libpath_hist} + DOC "Path to cuDNN library.") + + if(CUDNN_INCLUDE AND CUDNN_LIBRARY) + set(HAVE_CUDNN TRUE PARENT_SCOPE) + set(CUDNN_FOUND TRUE PARENT_SCOPE) + + mark_as_advanced(CUDNN_INCLUDE CUDNN_LIBRARY CUDNN_ROOT) + message(STATUS "Found cuDNN (include: ${CUDNN_INCLUDE}, library: ${CUDNN_LIBRARY})") + endif() +endfunction() + + +################################################################################################ +### Non macro section +################################################################################################ + +find_package(CUDA 5.5 QUIET) +find_cuda_helper_libs(curand) # cmake 2.8.7 compartibility which doesn't search for curand + +if(NOT CUDA_FOUND) + return() +endif() + +set(HAVE_CUDA TRUE) +message(STATUS "CUDA detected: " ${CUDA_VERSION}) +include_directories(SYSTEM ${CUDA_INCLUDE_DIRS}) +list(APPEND Caffe_LINKER_LIBS ${CUDA_CUDART_LIBRARY} + ${CUDA_curand_LIBRARY} ${CUDA_CUBLAS_LIBRARIES}) + +# cudnn detection +if(USE_CUDNN) + detect_cuDNN() + if(HAVE_CUDNN) + add_definitions(-DUSE_CUDNN) + include_directories(SYSTEM ${CUDNN_INCLUDE}) + list(APPEND Caffe_LINKER_LIBS ${CUDNN_LIBRARY}) + endif() +endif() + +# setting nvcc arch flags +caffe_select_nvcc_arch_flags(NVCC_FLAGS_EXTRA) +list(APPEND CUDA_NVCC_FLAGS ${NVCC_FLAGS_EXTRA}) +message(STATUS "Added CUDA NVCC flags for: ${NVCC_FLAGS_EXTRA_readable}") + +# Boost 1.55 workaround, see https://svn.boost.org/trac/boost/ticket/9392 or +# https://github.com/ComputationalRadiationPhysics/picongpu/blob/master/src/picongpu/CMakeLists.txt +if(Boost_VERSION EQUAL 105500) + message(STATUS "Cuda + Boost 1.55: Applying noinline work around") + # avoid warning for CMake >= 2.8.12 + set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} \"-DBOOST_NOINLINE=__attribute__((noinline))\" ") +endif() + +# disable some nvcc diagnostic that apears in boost, glog, glags, opencv, etc. +foreach(diag cc_clobber_ignored integer_sign_change useless_using_declaration set_but_not_used) + list(APPEND CUDA_NVCC_FLAGS -Xcudafe --diag_suppress=${diag}) +endforeach() + +# setting default testing device +if(NOT CUDA_TEST_DEVICE) + set(CUDA_TEST_DEVICE -1) +endif() + +mark_as_advanced(CUDA_BUILD_CUBIN CUDA_BUILD_EMULATION CUDA_VERBOSE_BUILD) +mark_as_advanced(CUDA_SDK_ROOT_DIR CUDA_SEPARABLE_COMPILATION) + +# Handle clang/libc++ issue +if(APPLE) + caffe_detect_darwin_version(OSX_VERSION) + + # OSX 10.9 and higher uses clang/libc++ by default which is incompartible with old CUDA toolkits + if(OSX_VERSION VERSION_GREATER 10.8) + # enabled by default if and only if CUDA version is less than 7.0 + caffe_option(USE_libstdcpp "Use libstdc++ instead of libc++" (CUDA_VERSION VERSION_LESS 7.0)) + endif() +endif() diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake new file mode 100755 index 0000000..7c86dd5 --- /dev/null +++ b/cmake/Dependencies.cmake @@ -0,0 +1,158 @@ +# This list is required for static linking and exported to CaffeConfig.cmake +set(Caffe_LINKER_LIBS "") + +# ---[ Boost +find_package(Boost 1.46 REQUIRED COMPONENTS system thread) +include_directories(SYSTEM ${Boost_INCLUDE_DIR}) +list(APPEND Caffe_LINKER_LIBS ${Boost_LIBRARIES}) + +# ---[ Threads +find_package(Threads REQUIRED) +list(APPEND Caffe_LINKER_LIBS ${CMAKE_THREAD_LIBS_INIT}) + +# ---[ Google-glog +include("cmake/External/glog.cmake") +include_directories(SYSTEM ${GLOG_INCLUDE_DIRS}) +list(APPEND Caffe_LINKER_LIBS ${GLOG_LIBRARIES}) + +# ---[ Google-gflags +include("cmake/External/gflags.cmake") +include_directories(SYSTEM ${GFLAGS_INCLUDE_DIRS}) +list(APPEND Caffe_LINKER_LIBS ${GFLAGS_LIBRARIES}) + +# ---[ Google-protobuf +include(cmake/ProtoBuf.cmake) + +# ---[ HDF5 +find_package(HDF5 COMPONENTS HL REQUIRED) +include_directories(SYSTEM ${HDF5_INCLUDE_DIRS} ${HDF5_HL_INCLUDE_DIR}) +list(APPEND Caffe_LINKER_LIBS ${HDF5_LIBRARIES}) + +# ---[ LMDB +find_package(LMDB REQUIRED) +include_directories(SYSTEM ${LMDB_INCLUDE_DIR}) +list(APPEND Caffe_LINKER_LIBS ${LMDB_LIBRARIES}) + +# ---[ LevelDB +find_package(LevelDB REQUIRED) +include_directories(SYSTEM ${LevelDB_INCLUDE}) +list(APPEND Caffe_LINKER_LIBS ${LevelDB_LIBRARIES}) + +# ---[ Snappy +find_package(Snappy REQUIRED) +include_directories(SYSTEM ${Snappy_INCLUDE_DIR}) +list(APPEND Caffe_LINKER_LIBS ${Snappy_LIBRARIES}) + +# ---[ CUDA +include(cmake/Cuda.cmake) +if(NOT HAVE_CUDA) + if(CPU_ONLY) + message("-- CUDA is disabled. Building without it...") + else() + message("-- CUDA is not detected by cmake. Building without it...") + endif() + + # TODO: remove this not cross platform define in future. Use caffe_config.h instead. + add_definitions(-DCPU_ONLY) +endif() + +# ---[ OpenCV +find_package(OpenCV QUIET COMPONENTS core highgui imgproc imgcodecs) +if(NOT OpenCV_FOUND) # if not OpenCV 3.x, then imgcodecs are not found + find_package(OpenCV REQUIRED COMPONENTS core highgui imgproc) +endif() +include_directories(SYSTEM ${OpenCV_INCLUDE_DIRS}) +list(APPEND Caffe_LINKER_LIBS ${OpenCV_LIBS}) +message(STATUS "OpenCV found (${OpenCV_CONFIG_PATH})") + +# ---[ BLAS +if(NOT APPLE) + set(BLAS "Atlas" CACHE STRING "Selected BLAS library") + set_property(CACHE BLAS PROPERTY STRINGS "Atlas;Open;MKL") + + if(BLAS STREQUAL "Atlas" OR BLAS STREQUAL "atlas") + find_package(Atlas REQUIRED) + include_directories(SYSTEM ${Atlas_INCLUDE_DIR}) + list(APPEND Caffe_LINKER_LIBS ${Atlas_LIBRARIES}) + elseif(BLAS STREQUAL "Open" OR BLAS STREQUAL "open") + find_package(OpenBLAS REQUIRED) + include_directories(SYSTEM ${OpenBLAS_INCLUDE_DIR}) + list(APPEND Caffe_LINKER_LIBS ${OpenBLAS_LIB}) + elseif(BLAS STREQUAL "MKL" OR BLAS STREQUAL "mkl") + find_package(MKL REQUIRED) + include_directories(SYSTEM ${MKL_INCLUDE_DIR}) + list(APPEND Caffe_LINKER_LIBS ${MKL_LIBRARIES}) + add_definitions(-DUSE_MKL) + endif() +elseif(APPLE) + find_package(vecLib REQUIRED) + include_directories(SYSTEM ${vecLib_INCLUDE_DIR}) + list(APPEND Caffe_LINKER_LIBS ${vecLib_LINKER_LIBS}) +endif() + +# ---[ Python +if(BUILD_python) + if(NOT "${python_version}" VERSION_LESS "3.0.0") + # use python3 + find_package(PythonInterp 3.0) + find_package(PythonLibs 3.0) + find_package(NumPy 1.7.1) + # Find the matching boost python implementation + set(version ${PYTHONLIBS_VERSION_STRING}) + + STRING( REPLACE "." "" boost_py_version ${version} ) + find_package(Boost 1.46 COMPONENTS "python-py${boost_py_version}") + set(Boost_PYTHON_FOUND ${Boost_PYTHON-PY${boost_py_version}_FOUND}) + + while(NOT "${version}" STREQUAL "" AND NOT Boost_PYTHON_FOUND) + STRING( REGEX REPLACE "([0-9.]+).[0-9]+" "\\1" version ${version} ) + + STRING( REPLACE "." "" boost_py_version ${version} ) + find_package(Boost 1.46 COMPONENTS "python-py${boost_py_version}") + set(Boost_PYTHON_FOUND ${Boost_PYTHON-PY${boost_py_version}_FOUND}) + + STRING( REGEX MATCHALL "([0-9.]+).[0-9]+" has_more_version ${version} ) + if("${has_more_version}" STREQUAL "") + break() + endif() + endwhile() + if(NOT Boost_PYTHON_FOUND) + find_package(Boost 1.46 COMPONENTS python) + endif() + else() + # disable Python 3 search + find_package(PythonInterp 2.7) + find_package(PythonLibs 2.7) + find_package(NumPy 1.7.1) + find_package(Boost 1.46 COMPONENTS python) + endif() + if(PYTHONLIBS_FOUND AND NUMPY_FOUND AND Boost_PYTHON_FOUND) + set(HAVE_PYTHON TRUE) + if(BUILD_python_layer) + add_definitions(-DWITH_PYTHON_LAYER) + include_directories(SYSTEM ${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} ${Boost_INCLUDE_DIRS}) + list(APPEND Caffe_LINKER_LIBS ${PYTHON_LIBRARIES} ${Boost_LIBRARIES}) + endif() + endif() +endif() + +# ---[ Matlab +if(BUILD_matlab) + find_package(MatlabMex) + if(MATLABMEX_FOUND) + set(HAVE_MATLAB TRUE) + endif() + + # sudo apt-get install liboctave-dev + find_program(Octave_compiler NAMES mkoctfile DOC "Octave C++ compiler") + + if(HAVE_MATLAB AND Octave_compiler) + set(Matlab_build_mex_using "Matlab" CACHE STRING "Select Matlab or Octave if both detected") + set_property(CACHE Matlab_build_mex_using PROPERTY STRINGS "Matlab;Octave") + endif() +endif() + +# ---[ Doxygen +if(BUILD_docs) + find_package(Doxygen) +endif() diff --git a/cmake/External/gflags.cmake b/cmake/External/gflags.cmake new file mode 100755 index 0000000..e3dba04 --- /dev/null +++ b/cmake/External/gflags.cmake @@ -0,0 +1,56 @@ +if (NOT __GFLAGS_INCLUDED) # guard against multiple includes + set(__GFLAGS_INCLUDED TRUE) + + # use the system-wide gflags if present + find_package(GFlags) + if (GFLAGS_FOUND) + set(GFLAGS_EXTERNAL FALSE) + else() + # gflags will use pthreads if it's available in the system, so we must link with it + find_package(Threads) + + # build directory + set(gflags_PREFIX ${CMAKE_BINARY_DIR}/external/gflags-prefix) + # install directory + set(gflags_INSTALL ${CMAKE_BINARY_DIR}/external/gflags-install) + + # we build gflags statically, but want to link it into the caffe shared library + # this requires position-independent code + if (UNIX) + set(GFLAGS_EXTRA_COMPILER_FLAGS "-fPIC") + endif() + + set(GFLAGS_CXX_FLAGS ${CMAKE_CXX_FLAGS} ${GFLAGS_EXTRA_COMPILER_FLAGS}) + set(GFLAGS_C_FLAGS ${CMAKE_C_FLAGS} ${GFLAGS_EXTRA_COMPILER_FLAGS}) + + ExternalProject_Add(gflags + PREFIX ${gflags_PREFIX} + GIT_REPOSITORY "https://github.com/gflags/gflags.git" + GIT_TAG "v2.1.2" + UPDATE_COMMAND "" + INSTALL_DIR ${gflags_INSTALL} + CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + -DCMAKE_INSTALL_PREFIX=${gflags_INSTALL} + -DBUILD_SHARED_LIBS=OFF + -DBUILD_STATIC_LIBS=ON + -DBUILD_PACKAGING=OFF + -DBUILD_TESTING=OFF + -DBUILD_NC_TESTS=OFF + -BUILD_CONFIG_TESTS=OFF + -DINSTALL_HEADERS=ON + -DCMAKE_C_FLAGS=${GFLAGS_C_FLAGS} + -DCMAKE_CXX_FLAGS=${GFLAGS_CXX_FLAGS} + LOG_DOWNLOAD 1 + LOG_INSTALL 1 + ) + + set(GFLAGS_FOUND TRUE) + set(GFLAGS_INCLUDE_DIRS ${gflags_INSTALL}/include) + set(GFLAGS_LIBRARIES ${gflags_INSTALL}/lib/libgflags.a ${CMAKE_THREAD_LIBS_INIT}) + set(GFLAGS_LIBRARY_DIRS ${gflags_INSTALL}/lib) + set(GFLAGS_EXTERNAL TRUE) + + list(APPEND external_project_dependencies gflags) + endif() + +endif() diff --git a/cmake/External/glog.cmake b/cmake/External/glog.cmake new file mode 100755 index 0000000..a44672f --- /dev/null +++ b/cmake/External/glog.cmake @@ -0,0 +1,56 @@ +# glog depends on gflags +include("cmake/External/gflags.cmake") + +if (NOT __GLOG_INCLUDED) + set(__GLOG_INCLUDED TRUE) + + # try the system-wide glog first + find_package(Glog) + if (GLOG_FOUND) + set(GLOG_EXTERNAL FALSE) + else() + # fetch and build glog from github + + # build directory + set(glog_PREFIX ${CMAKE_BINARY_DIR}/external/glog-prefix) + # install directory + set(glog_INSTALL ${CMAKE_BINARY_DIR}/external/glog-install) + + # we build glog statically, but want to link it into the caffe shared library + # this requires position-independent code + if (UNIX) + set(GLOG_EXTRA_COMPILER_FLAGS "-fPIC") + endif() + + set(GLOG_CXX_FLAGS ${CMAKE_CXX_FLAGS} ${GLOG_EXTRA_COMPILER_FLAGS}) + set(GLOG_C_FLAGS ${CMAKE_C_FLAGS} ${GLOG_EXTRA_COMPILER_FLAGS}) + + # depend on gflags if we're also building it + if (GFLAGS_EXTERNAL) + set(GLOG_DEPENDS gflags) + endif() + + ExternalProject_Add(glog + DEPENDS ${GLOG_DEPENDS} + PREFIX ${glog_PREFIX} + GIT_REPOSITORY "https://github.com/google/glog" + GIT_TAG "v0.3.4" + UPDATE_COMMAND "" + INSTALL_DIR ${gflags_INSTALL} + CONFIGURE_COMMAND env "CFLAGS=${GLOG_C_FLAGS}" "CXXFLAGS=${GLOG_CXX_FLAGS}" ${glog_PREFIX}/src/glog/configure --prefix=${glog_INSTALL} --enable-shared=no --enable-static=yes --with-gflags=${GFLAGS_LIBRARY_DIRS}/.. + LOG_DOWNLOAD 1 + LOG_CONFIGURE 1 + LOG_INSTALL 1 + ) + + set(GLOG_FOUND TRUE) + set(GLOG_INCLUDE_DIRS ${glog_INSTALL}/include) + set(GLOG_LIBRARIES ${GFLAGS_LIBRARIES} ${glog_INSTALL}/lib/libglog.a) + set(GLOG_LIBRARY_DIRS ${glog_INSTALL}/lib) + set(GLOG_EXTERNAL TRUE) + + list(APPEND external_project_dependencies glog) + endif() + +endif() + diff --git a/cmake/Misc.cmake b/cmake/Misc.cmake new file mode 100755 index 0000000..7676754 --- /dev/null +++ b/cmake/Misc.cmake @@ -0,0 +1,52 @@ +# ---[ Configuration types +set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "Possible configurations" FORCE) +mark_as_advanced(CMAKE_CONFIGURATION_TYPES) + +if(DEFINED CMAKE_BUILD_TYPE) + set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${CMAKE_CONFIGURATION_TYPES}) +endif() + +# --[ If user doesn't specify build type then assume release +if("${CMAKE_BUILD_TYPE}" STREQUAL "") + set(CMAKE_BUILD_TYPE Release) +endif() + +if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") + set(CMAKE_COMPILER_IS_CLANGXX TRUE) +endif() + +# ---[ Solution folders +caffe_option(USE_PROJECT_FOLDERS "IDE Solution folders" (MSVC_IDE OR CMAKE_GENERATOR MATCHES Xcode) ) + +if(USE_PROJECT_FOLDERS) + set_property(GLOBAL PROPERTY USE_FOLDERS ON) + set_property(GLOBAL PROPERTY PREDEFINED_TARGETS_FOLDER "CMakeTargets") +endif() + +# ---[ Install options +if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) + set(CMAKE_INSTALL_PREFIX "${PROJECT_BINARY_DIR}/install" CACHE PATH "Default install path" FORCE) +endif() + +# ---[ RPATH settings +set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE CACHE BOOLEAN "Use link paths for shared library rpath") +set(CMAKE_MACOSX_RPATH TRUE) + +list(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES ${CMAKE_INSTALL_PREFIX}/lib __is_systtem_dir) +if(${__is_systtem_dir} STREQUAL -1) + set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_PREFIX}/lib) +endif() + +# ---[ Funny target +if(UNIX OR APPLE) + add_custom_target(symlink_to_build COMMAND "ln" "-sf" "${PROJECT_BINARY_DIR}" "${PROJECT_SOURCE_DIR}/build" + COMMENT "Adding symlink: /build -> ${PROJECT_BINARY_DIR}" ) +endif() + +# ---[ Set debug postfix +set(Caffe_DEBUG_POSTFIX "-d") + +set(CAffe_POSTFIX "") +if(CMAKE_BUILD_TYPE MATCHES "Debug") + set(CAffe_POSTFIX ${Caffe_DEBUG_POSTFIX}) +endif() diff --git a/cmake/Modules/FindAtlas.cmake b/cmake/Modules/FindAtlas.cmake new file mode 100755 index 0000000..6e15643 --- /dev/null +++ b/cmake/Modules/FindAtlas.cmake @@ -0,0 +1,52 @@ +# Find the Atlas (and Lapack) libraries +# +# The following variables are optionally searched for defaults +# Atlas_ROOT_DIR: Base directory where all Atlas components are found +# +# The following are set after configuration is done: +# Atlas_FOUND +# Atlas_INCLUDE_DIRS +# Atlas_LIBRARIES +# Atlas_LIBRARYRARY_DIRS + +set(Atlas_INCLUDE_SEARCH_PATHS + /usr/include/atlas + /usr/include/atlas-base + $ENV{Atlas_ROOT_DIR} + $ENV{Atlas_ROOT_DIR}/include +) + +set(Atlas_LIB_SEARCH_PATHS + /usr/lib/atlas + /usr/lib/atlas-base + $ENV{Atlas_ROOT_DIR} + $ENV{Atlas_ROOT_DIR}/lib +) + +find_path(Atlas_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS}) +find_path(Atlas_CLAPACK_INCLUDE_DIR NAMES clapack.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS}) + +find_library(Atlas_CBLAS_LIBRARY NAMES ptcblas_r ptcblas cblas_r cblas PATHS ${Atlas_LIB_SEARCH_PATHS}) +find_library(Atlas_BLAS_LIBRARY NAMES atlas_r atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) +find_library(Atlas_LAPACK_LIBRARY NAMES alapack_r alapack lapack_atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) + +set(LOOKED_FOR + Atlas_CBLAS_INCLUDE_DIR + Atlas_CLAPACK_INCLUDE_DIR + + Atlas_CBLAS_LIBRARY + Atlas_BLAS_LIBRARY + Atlas_LAPACK_LIBRARY +) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(Atlas DEFAULT_MSG ${LOOKED_FOR}) + +if(ATLAS_FOUND) + set(Atlas_INCLUDE_DIR ${Atlas_CBLAS_INCLUDE_DIR} ${Atlas_CLAPACK_INCLUDE_DIR}) + set(Atlas_LIBRARIES ${Atlas_LAPACK_LIBRARY} ${Atlas_CBLAS_LIBRARY} ${Atlas_BLAS_LIBRARY}) + mark_as_advanced(${LOOKED_FOR}) + + message(STATUS "Found Atlas (include: ${Atlas_CBLAS_INCLUDE_DIR}, library: ${Atlas_BLAS_LIBRARY})") +endif(ATLAS_FOUND) + diff --git a/cmake/Modules/FindGFlags.cmake b/cmake/Modules/FindGFlags.cmake new file mode 100755 index 0000000..29b60f0 --- /dev/null +++ b/cmake/Modules/FindGFlags.cmake @@ -0,0 +1,50 @@ +# - Try to find GFLAGS +# +# The following variables are optionally searched for defaults +# GFLAGS_ROOT_DIR: Base directory where all GFLAGS components are found +# +# The following are set after configuration is done: +# GFLAGS_FOUND +# GFLAGS_INCLUDE_DIRS +# GFLAGS_LIBRARIES +# GFLAGS_LIBRARYRARY_DIRS + +include(FindPackageHandleStandardArgs) + +set(GFLAGS_ROOT_DIR "" CACHE PATH "Folder contains Gflags") + +# We are testing only a couple of files in the include directories +if(WIN32) + find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h + PATHS ${GFLAGS_ROOT_DIR}/src/windows) +else() + find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h + PATHS ${GFLAGS_ROOT_DIR}) +endif() + +if(MSVC) + find_library(GFLAGS_LIBRARY_RELEASE + NAMES libgflags + PATHS ${GFLAGS_ROOT_DIR} + PATH_SUFFIXES Release) + + find_library(GFLAGS_LIBRARY_DEBUG + NAMES libgflags-debug + PATHS ${GFLAGS_ROOT_DIR} + PATH_SUFFIXES Debug) + + set(GFLAGS_LIBRARY optimized ${GFLAGS_LIBRARY_RELEASE} debug ${GFLAGS_LIBRARY_DEBUG}) +else() + find_library(GFLAGS_LIBRARY gflags) +endif() + +find_package_handle_standard_args(GFlags DEFAULT_MSG GFLAGS_INCLUDE_DIR GFLAGS_LIBRARY) + + +if(GFLAGS_FOUND) + set(GFLAGS_INCLUDE_DIRS ${GFLAGS_INCLUDE_DIR}) + set(GFLAGS_LIBRARIES ${GFLAGS_LIBRARY}) + message(STATUS "Found gflags (include: ${GFLAGS_INCLUDE_DIR}, library: ${GFLAGS_LIBRARY})") + mark_as_advanced(GFLAGS_LIBRARY_DEBUG GFLAGS_LIBRARY_RELEASE + GFLAGS_LIBRARY GFLAGS_INCLUDE_DIR GFLAGS_ROOT_DIR) +endif() diff --git a/cmake/Modules/FindGlog.cmake b/cmake/Modules/FindGlog.cmake new file mode 100755 index 0000000..99abbe4 --- /dev/null +++ b/cmake/Modules/FindGlog.cmake @@ -0,0 +1,48 @@ +# - Try to find Glog +# +# The following variables are optionally searched for defaults +# GLOG_ROOT_DIR: Base directory where all GLOG components are found +# +# The following are set after configuration is done: +# GLOG_FOUND +# GLOG_INCLUDE_DIRS +# GLOG_LIBRARIES +# GLOG_LIBRARYRARY_DIRS + +include(FindPackageHandleStandardArgs) + +set(GLOG_ROOT_DIR "" CACHE PATH "Folder contains Google glog") + +if(WIN32) + find_path(GLOG_INCLUDE_DIR glog/logging.h + PATHS ${GLOG_ROOT_DIR}/src/windows) +else() + find_path(GLOG_INCLUDE_DIR glog/logging.h + PATHS ${GLOG_ROOT_DIR}) +endif() + +if(MSVC) + find_library(GLOG_LIBRARY_RELEASE libglog_static + PATHS ${GLOG_ROOT_DIR} + PATH_SUFFIXES Release) + + find_library(GLOG_LIBRARY_DEBUG libglog_static + PATHS ${GLOG_ROOT_DIR} + PATH_SUFFIXES Debug) + + set(GLOG_LIBRARY optimized ${GLOG_LIBRARY_RELEASE} debug ${GLOG_LIBRARY_DEBUG}) +else() + find_library(GLOG_LIBRARY glog + PATHS ${GLOG_ROOT_DIR} + PATH_SUFFIXES lib lib64) +endif() + +find_package_handle_standard_args(Glog DEFAULT_MSG GLOG_INCLUDE_DIR GLOG_LIBRARY) + +if(GLOG_FOUND) + set(GLOG_INCLUDE_DIRS ${GLOG_INCLUDE_DIR}) + set(GLOG_LIBRARIES ${GLOG_LIBRARY}) + message(STATUS "Found glog (include: ${GLOG_INCLUDE_DIR}, library: ${GLOG_LIBRARY})") + mark_as_advanced(GLOG_ROOT_DIR GLOG_LIBRARY_RELEASE GLOG_LIBRARY_DEBUG + GLOG_LIBRARY GLOG_INCLUDE_DIR) +endif() diff --git a/cmake/Modules/FindLAPACK.cmake b/cmake/Modules/FindLAPACK.cmake new file mode 100755 index 0000000..9641c45 --- /dev/null +++ b/cmake/Modules/FindLAPACK.cmake @@ -0,0 +1,190 @@ +# - Find LAPACK library +# This module finds an installed fortran library that implements the LAPACK +# linear-algebra interface (see http://www.netlib.org/lapack/). +# +# The approach follows that taken for the autoconf macro file, acx_lapack.m4 +# (distributed at http://ac-archive.sourceforge.net/ac-archive/acx_lapack.html). +# +# This module sets the following variables: +# LAPACK_FOUND - set to true if a library implementing the LAPACK interface is found +# LAPACK_LIBRARIES - list of libraries (using full path name) for LAPACK + +# Note: I do not think it is a good idea to mixup different BLAS/LAPACK versions +# Hence, this script wants to find a Lapack library matching your Blas library + +# Do nothing if LAPACK was found before +IF(NOT LAPACK_FOUND) + +SET(LAPACK_LIBRARIES) +SET(LAPACK_INFO) + +IF(LAPACK_FIND_QUIETLY OR NOT LAPACK_FIND_REQUIRED) + FIND_PACKAGE(BLAS) +ELSE(LAPACK_FIND_QUIETLY OR NOT LAPACK_FIND_REQUIRED) + FIND_PACKAGE(BLAS REQUIRED) +ENDIF(LAPACK_FIND_QUIETLY OR NOT LAPACK_FIND_REQUIRED) + +# Old search lapack script +include(CheckFortranFunctionExists) + +macro(Check_Lapack_Libraries LIBRARIES _prefix _name _flags _list _blas) + # This macro checks for the existence of the combination of fortran libraries + # given by _list. If the combination is found, this macro checks (using the + # Check_Fortran_Function_Exists macro) whether can link against that library + # combination using the name of a routine given by _name using the linker + # flags given by _flags. If the combination of libraries is found and passes + # the link test, LIBRARIES is set to the list of complete library paths that + # have been found. Otherwise, LIBRARIES is set to FALSE. + # N.B. _prefix is the prefix applied to the names of all cached variables that + # are generated internally and marked advanced by this macro. + set(_libraries_work TRUE) + set(${LIBRARIES}) + set(_combined_name) + foreach(_library ${_list}) + set(_combined_name ${_combined_name}_${_library}) + if(_libraries_work) + if (WIN32) + find_library(${_prefix}_${_library}_LIBRARY + NAMES ${_library} PATHS ENV LIB PATHS ENV PATH) + else (WIN32) + if(APPLE) + find_library(${_prefix}_${_library}_LIBRARY + NAMES ${_library} + PATHS /usr/local/lib /usr/lib /usr/local/lib64 /usr/lib64 + ENV DYLD_LIBRARY_PATH) + else(APPLE) + find_library(${_prefix}_${_library}_LIBRARY + NAMES ${_library} + PATHS /usr/local/lib /usr/lib /usr/local/lib64 /usr/lib64 + ENV LD_LIBRARY_PATH) + endif(APPLE) + endif(WIN32) + mark_as_advanced(${_prefix}_${_library}_LIBRARY) + set(${LIBRARIES} ${${LIBRARIES}} ${${_prefix}_${_library}_LIBRARY}) + set(_libraries_work ${${_prefix}_${_library}_LIBRARY}) + endif(_libraries_work) + endforeach(_library ${_list}) + if(_libraries_work) + # Test this combination of libraries. + set(CMAKE_REQUIRED_LIBRARIES ${_flags} ${${LIBRARIES}} ${_blas}) + if (CMAKE_Fortran_COMPILER_WORKS) + check_fortran_function_exists(${_name} ${_prefix}${_combined_name}_WORKS) + else (CMAKE_Fortran_COMPILER_WORKS) + check_function_exists("${_name}_" ${_prefix}${_combined_name}_WORKS) + endif (CMAKE_Fortran_COMPILER_WORKS) + set(CMAKE_REQUIRED_LIBRARIES) + mark_as_advanced(${_prefix}${_combined_name}_WORKS) + set(_libraries_work ${${_prefix}${_combined_name}_WORKS}) + endif(_libraries_work) + if(NOT _libraries_work) + set(${LIBRARIES} FALSE) + endif(NOT _libraries_work) +endmacro(Check_Lapack_Libraries) + + +if(BLAS_FOUND) + + # Intel MKL + IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "mkl")) + IF(MKL_LAPACK_LIBRARIES) + SET(LAPACK_LIBRARIES ${MKL_LAPACK_LIBRARIES} ${MKL_LIBRARIES}) + ELSE(MKL_LAPACK_LIBRARIES) + SET(LAPACK_LIBRARIES ${MKL_LIBRARIES}) + ENDIF(MKL_LAPACK_LIBRARIES) + SET(LAPACK_INCLUDE_DIR ${MKL_INCLUDE_DIR}) + SET(LAPACK_INFO "mkl") + ENDIF() + + # OpenBlas + IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "open")) + SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES}) + check_function_exists("cheev_" OPEN_LAPACK_WORKS) + if(OPEN_LAPACK_WORKS) + SET(LAPACK_INFO "open") + else() + message(STATUS "It seems OpenBlas has not been compiled with Lapack support") + endif() + endif() + + # GotoBlas + IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "goto")) + SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES}) + check_function_exists("cheev_" GOTO_LAPACK_WORKS) + if(GOTO_LAPACK_WORKS) + SET(LAPACK_INFO "goto") + else() + message(STATUS "It seems GotoBlas has not been compiled with Lapack support") + endif() + endif() + + # ACML + IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "acml")) + SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES}) + check_function_exists("cheev_" ACML_LAPACK_WORKS) + if(ACML_LAPACK_WORKS) + SET(LAPACK_INFO "acml") + else() + message(STATUS "Strangely, this ACML library does not support Lapack?!") + endif() + endif() + + # Accelerate + IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "accelerate")) + SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES}) + check_function_exists("cheev_" ACCELERATE_LAPACK_WORKS) + if(ACCELERATE_LAPACK_WORKS) + SET(LAPACK_INFO "accelerate") + else() + message(STATUS "Strangely, this Accelerate library does not support Lapack?!") + endif() + endif() + + # vecLib + IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "veclib")) + SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES}) + check_function_exists("cheev_" VECLIB_LAPACK_WORKS) + if(VECLIB_LAPACK_WORKS) + SET(LAPACK_INFO "veclib") + else() + message(STATUS "Strangely, this vecLib library does not support Lapack?!") + endif() + endif() + + # Generic LAPACK library? + IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "generic")) + check_lapack_libraries( + LAPACK_LIBRARIES + LAPACK + cheev + "" + "lapack" + "${BLAS_LIBRARIES}" + ) + if(LAPACK_LIBRARIES) + SET(LAPACK_INFO "generic") + endif(LAPACK_LIBRARIES) + endif() + +else(BLAS_FOUND) + message(STATUS "LAPACK requires BLAS") +endif(BLAS_FOUND) + +if(LAPACK_INFO) + set(LAPACK_FOUND TRUE) +else(LAPACK_INFO) + set(LAPACK_FOUND FALSE) +endif(LAPACK_INFO) + +IF (NOT LAPACK_FOUND AND LAPACK_FIND_REQUIRED) + message(FATAL_ERROR "Cannot find a library with LAPACK API. Please specify library location.") +ENDIF (NOT LAPACK_FOUND AND LAPACK_FIND_REQUIRED) +IF(NOT LAPACK_FIND_QUIETLY) + IF(LAPACK_FOUND) + MESSAGE(STATUS "Found a library with LAPACK API. (${LAPACK_INFO})") + ELSE(LAPACK_FOUND) + MESSAGE(STATUS "Cannot find a library with LAPACK API. Not using LAPACK.") + ENDIF(LAPACK_FOUND) +ENDIF(NOT LAPACK_FIND_QUIETLY) + +# Do nothing if LAPACK was found before +ENDIF(NOT LAPACK_FOUND) diff --git a/cmake/Modules/FindLMDB.cmake b/cmake/Modules/FindLMDB.cmake new file mode 100755 index 0000000..8a817fd --- /dev/null +++ b/cmake/Modules/FindLMDB.cmake @@ -0,0 +1,28 @@ +# Try to find the LMBD libraries and headers +# LMDB_FOUND - system has LMDB lib +# LMDB_INCLUDE_DIR - the LMDB include directory +# LMDB_LIBRARIES - Libraries needed to use LMDB + +# FindCWD based on FindGMP by: +# Copyright (c) 2006, Laurent Montel, +# +# Redistribution and use is allowed according to the terms of the BSD license. + +# Adapted from FindCWD by: +# Copyright 2013 Conrad Steenberg +# Aug 31, 2013 + +find_path(LMDB_INCLUDE_DIR NAMES lmdb.h PATHS "$ENV{LMDB_DIR}/include") +find_library(LMDB_LIBRARIES NAMES lmdb PATHS "$ENV{LMDB_DIR}/lib" ) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(LMDB DEFAULT_MSG LMDB_INCLUDE_DIR LMDB_LIBRARIES) + +if(LMDB_FOUND) + message(STATUS "Found lmdb (include: ${LMDB_INCLUDE_DIR}, library: ${LMDB_LIBRARIES})") + mark_as_advanced(LMDB_INCLUDE_DIR LMDB_LIBRARIES) + + caffe_parse_header(${LMDB_INCLUDE_DIR}/lmdb.h + LMDB_VERSION_LINES MDB_VERSION_MAJOR MDB_VERSION_MINOR MDB_VERSION_PATCH) + set(LMDB_VERSION "${MDB_VERSION_MAJOR}.${MDB_VERSION_MINOR}.${MDB_VERSION_PATCH}") +endif() diff --git a/cmake/Modules/FindLevelDB.cmake b/cmake/Modules/FindLevelDB.cmake new file mode 100755 index 0000000..97f08ac --- /dev/null +++ b/cmake/Modules/FindLevelDB.cmake @@ -0,0 +1,44 @@ +# - Find LevelDB +# +# LevelDB_INCLUDES - List of LevelDB includes +# LevelDB_LIBRARIES - List of libraries when using LevelDB. +# LevelDB_FOUND - True if LevelDB found. + +# Look for the header file. +find_path(LevelDB_INCLUDE NAMES leveldb/db.h + PATHS $ENV{LEVELDB_ROOT}/include /opt/local/include /usr/local/include /usr/include + DOC "Path in which the file leveldb/db.h is located." ) + +# Look for the library. +find_library(LevelDB_LIBRARY NAMES leveldb + PATHS /usr/lib $ENV{LEVELDB_ROOT}/lib + DOC "Path to leveldb library." ) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(LevelDB DEFAULT_MSG LevelDB_INCLUDE LevelDB_LIBRARY) + +if(LEVELDB_FOUND) + message(STATUS "Found LevelDB (include: ${LevelDB_INCLUDE}, library: ${LevelDB_LIBRARY})") + set(LevelDB_INCLUDES ${LevelDB_INCLUDE}) + set(LevelDB_LIBRARIES ${LevelDB_LIBRARY}) + mark_as_advanced(LevelDB_INCLUDE LevelDB_LIBRARY) + + if(EXISTS "${LevelDB_INCLUDE}/leveldb/db.h") + file(STRINGS "${LevelDB_INCLUDE}/leveldb/db.h" __version_lines + REGEX "static const int k[^V]+Version[ \t]+=[ \t]+[0-9]+;") + + foreach(__line ${__version_lines}) + if(__line MATCHES "[^k]+kMajorVersion[ \t]+=[ \t]+([0-9]+);") + set(LEVELDB_VERSION_MAJOR ${CMAKE_MATCH_1}) + elseif(__line MATCHES "[^k]+kMinorVersion[ \t]+=[ \t]+([0-9]+);") + set(LEVELDB_VERSION_MINOR ${CMAKE_MATCH_1}) + endif() + endforeach() + + if(LEVELDB_VERSION_MAJOR AND LEVELDB_VERSION_MINOR) + set(LEVELDB_VERSION "${LEVELDB_VERSION_MAJOR}.${LEVELDB_VERSION_MINOR}") + endif() + + caffe_clear_vars(__line __version_lines) + endif() +endif() diff --git a/cmake/Modules/FindMKL.cmake b/cmake/Modules/FindMKL.cmake new file mode 100755 index 0000000..774bdc6 --- /dev/null +++ b/cmake/Modules/FindMKL.cmake @@ -0,0 +1,110 @@ +# Find the MKL libraries +# +# Options: +# +# MKL_USE_SINGLE_DYNAMIC_LIBRARY : use single dynamic library interface +# MKL_USE_STATIC_LIBS : use static libraries +# MKL_MULTI_THREADED : use multi-threading +# +# This module defines the following variables: +# +# MKL_FOUND : True mkl is found +# MKL_INCLUDE_DIR : unclude directory +# MKL_LIBRARIES : the libraries to link against. + + +# ---[ Options +caffe_option(MKL_USE_SINGLE_DYNAMIC_LIBRARY "Use single dynamic library interface" ON) +caffe_option(MKL_USE_STATIC_LIBS "Use static libraries" OFF IF NOT MKL_USE_SINGLE_DYNAMIC_LIBRARY) +caffe_option(MKL_MULTI_THREADED "Use multi-threading" ON IF NOT MKL_USE_SINGLE_DYNAMIC_LIBRARY) + +# ---[ Root folders +set(INTEL_ROOT "/opt/intel" CACHE PATH "Folder contains intel libs") +find_path(MKL_ROOT include/mkl.h PATHS $ENV{MKL_ROOT} ${INTEL_ROOT}/mkl + DOC "Folder contains MKL") + +# ---[ Find include dir +find_path(MKL_INCLUDE_DIR mkl.h PATHS ${MKL_ROOT} PATH_SUFFIXES include) +set(__looked_for MKL_INCLUDE_DIR) + +# ---[ Find libraries +if(CMAKE_SIZEOF_VOID_P EQUAL 4) + set(__path_suffixes lib lib/ia32) +else() + set(__path_suffixes lib lib/intel64) +endif() + +set(__mkl_libs "") +if(MKL_USE_SINGLE_DYNAMIC_LIBRARY) + list(APPEND __mkl_libs rt) +else() + if(CMAKE_SIZEOF_VOID_P EQUAL 4) + if(WIN32) + list(APPEND __mkl_libs intel_c) + else() + list(APPEND __mkl_libs intel gf) + endif() + else() + list(APPEND __mkl_libs intel_lp64 gf_lp64) + endif() + + if(MKL_MULTI_THREADED) + list(APPEND __mkl_libs intel_thread) + else() + list(APPEND __mkl_libs sequential) + endif() + + list(APPEND __mkl_libs core cdft_core) +endif() + + +foreach (__lib ${__mkl_libs}) + set(__mkl_lib "mkl_${__lib}") + string(TOUPPER ${__mkl_lib} __mkl_lib_upper) + + if(MKL_USE_STATIC_LIBS) + set(__mkl_lib "lib${__mkl_lib}.a") + endif() + + find_library(${__mkl_lib_upper}_LIBRARY + NAMES ${__mkl_lib} + PATHS ${MKL_ROOT} "${MKL_INCLUDE_DIR}/.." + PATH_SUFFIXES ${__path_suffixes} + DOC "The path to Intel(R) MKL ${__mkl_lib} library") + mark_as_advanced(${__mkl_lib_upper}_LIBRARY) + + list(APPEND __looked_for ${__mkl_lib_upper}_LIBRARY) + list(APPEND MKL_LIBRARIES ${${__mkl_lib_upper}_LIBRARY}) +endforeach() + + +if(NOT MKL_USE_SINGLE_DYNAMIC_LIBRARY) + if (MKL_USE_STATIC_LIBS) + set(__iomp5_libs iomp5 libiomp5mt.lib) + else() + set(__iomp5_libs iomp5 libiomp5md.lib) + endif() + + if(WIN32) + find_path(INTEL_INCLUDE_DIR omp.h PATHS ${INTEL_ROOT} PATH_SUFFIXES include) + list(APPEND __looked_for INTEL_INCLUDE_DIR) + endif() + + find_library(MKL_RTL_LIBRARY ${__iomp5_libs} + PATHS ${INTEL_RTL_ROOT} ${INTEL_ROOT}/compiler ${MKL_ROOT}/.. ${MKL_ROOT}/../compiler + PATH_SUFFIXES ${__path_suffixes} + DOC "Path to Path to OpenMP runtime library") + + list(APPEND __looked_for MKL_RTL_LIBRARY) + list(APPEND MKL_LIBRARIES ${MKL_RTL_LIBRARY}) +endif() + + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(MKL DEFAULT_MSG ${__looked_for}) + +if(MKL_FOUND) + message(STATUS "Found MKL (include: ${MKL_INCLUDE_DIR}, lib: ${MKL_LIBRARIES}") +endif() + +caffe_clear_vars(__looked_for __mkl_libs __path_suffixes __lib_suffix __iomp5_libs) diff --git a/cmake/Modules/FindMatlabMex.cmake b/cmake/Modules/FindMatlabMex.cmake new file mode 100755 index 0000000..28ae65e --- /dev/null +++ b/cmake/Modules/FindMatlabMex.cmake @@ -0,0 +1,48 @@ +# This module looks for MatlabMex compiler +# Defines variables: +# Matlab_DIR - Matlab root dir +# Matlab_mex - path to mex compiler +# Matlab_mexext - path to mexext + +if(MSVC) + foreach(__ver "9.30" "7.14" "7.11" "7.10" "7.9" "7.8" "7.7") + get_filename_component(__matlab_root "[HKEY_LOCAL_MACHINE\\SOFTWARE\\MathWorks\\MATLAB\\${__ver};MATLABROOT]" ABSOLUTE) + if(__matlab_root) + break() + endif() + endforeach() +endif() + +if(APPLE) + foreach(__ver "R2014b" "R2014a" "R2013b" "R2013a" "R2012b" "R2012a" "R2011b" "R2011a" "R2010b" "R2010a") + if(EXISTS /Applications/MATLAB_${__ver}.app) + set(__matlab_root /Applications/MATLAB_${__ver}.app) + break() + endif() + endforeach() +endif() + +if(UNIX) + execute_process(COMMAND which matlab OUTPUT_STRIP_TRAILING_WHITESPACE + OUTPUT_VARIABLE __out RESULT_VARIABLE __res) + + if(__res MATCHES 0) # Suppress `readlink` warning if `which` returned nothing + execute_process(COMMAND which matlab COMMAND xargs readlink + COMMAND xargs dirname COMMAND xargs dirname COMMAND xargs echo -n + OUTPUT_VARIABLE __matlab_root OUTPUT_STRIP_TRAILING_WHITESPACE) + endif() +endif() + + +find_path(Matlab_DIR NAMES bin/mex bin/mexext PATHS ${__matlab_root} + DOC "Matlab directory" NO_DEFAULT_PATH) + +find_program(Matlab_mex NAMES mex mex.bat HINTS ${Matlab_DIR} PATH_SUFFIXES bin NO_DEFAULT_PATH) +find_program(Matlab_mexext NAMES mexext mexext.bat HINTS ${Matlab_DIR} PATH_SUFFIXES bin NO_DEFAULT_PATH) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(MatlabMex DEFAULT_MSG Matlab_mex Matlab_mexext) + +if(MATLABMEX_FOUND) + mark_as_advanced(Matlab_mex Matlab_mexext) +endif() diff --git a/cmake/Modules/FindNumPy.cmake b/cmake/Modules/FindNumPy.cmake new file mode 100755 index 0000000..a671494 --- /dev/null +++ b/cmake/Modules/FindNumPy.cmake @@ -0,0 +1,58 @@ +# - Find the NumPy libraries +# This module finds if NumPy is installed, and sets the following variables +# indicating where it is. +# +# TODO: Update to provide the libraries and paths for linking npymath lib. +# +# NUMPY_FOUND - was NumPy found +# NUMPY_VERSION - the version of NumPy found as a string +# NUMPY_VERSION_MAJOR - the major version number of NumPy +# NUMPY_VERSION_MINOR - the minor version number of NumPy +# NUMPY_VERSION_PATCH - the patch version number of NumPy +# NUMPY_VERSION_DECIMAL - e.g. version 1.6.1 is 10601 +# NUMPY_INCLUDE_DIR - path to the NumPy include files + +unset(NUMPY_VERSION) +unset(NUMPY_INCLUDE_DIR) + +if(PYTHONINTERP_FOUND) + execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" + "import numpy as n; print(n.__version__); print(n.get_include());" + RESULT_VARIABLE __result + OUTPUT_VARIABLE __output + OUTPUT_STRIP_TRAILING_WHITESPACE) + + if(__result MATCHES 0) + string(REGEX REPLACE ";" "\\\\;" __values ${__output}) + string(REGEX REPLACE "\r?\n" ";" __values ${__values}) + list(GET __values 0 NUMPY_VERSION) + list(GET __values 1 NUMPY_INCLUDE_DIR) + + string(REGEX MATCH "^([0-9])+\\.([0-9])+\\.([0-9])+" __ver_check "${NUMPY_VERSION}") + if(NOT "${__ver_check}" STREQUAL "") + set(NUMPY_VERSION_MAJOR ${CMAKE_MATCH_1}) + set(NUMPY_VERSION_MINOR ${CMAKE_MATCH_2}) + set(NUMPY_VERSION_PATCH ${CMAKE_MATCH_3}) + math(EXPR NUMPY_VERSION_DECIMAL + "(${NUMPY_VERSION_MAJOR} * 10000) + (${NUMPY_VERSION_MINOR} * 100) + ${NUMPY_VERSION_PATCH}") + string(REGEX REPLACE "\\\\" "/" NUMPY_INCLUDE_DIR ${NUMPY_INCLUDE_DIR}) + else() + unset(NUMPY_VERSION) + unset(NUMPY_INCLUDE_DIR) + message(STATUS "Requested NumPy version and include path, but got instead:\n${__output}\n") + endif() + endif() +else() + message(STATUS "To find NumPy Python interpretator is required to be found.") +endif() + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(NumPy REQUIRED_VARS NUMPY_INCLUDE_DIR NUMPY_VERSION + VERSION_VAR NUMPY_VERSION) + +if(NUMPY_FOUND) + message(STATUS "NumPy ver. ${NUMPY_VERSION} found (include: ${NUMPY_INCLUDE_DIR})") +endif() + +caffe_clear_vars(__result __output __error_value __values __ver_check __error_value) + diff --git a/cmake/Modules/FindOpenBLAS.cmake b/cmake/Modules/FindOpenBLAS.cmake new file mode 100755 index 0000000..b843492 --- /dev/null +++ b/cmake/Modules/FindOpenBLAS.cmake @@ -0,0 +1,62 @@ + + +SET(Open_BLAS_INCLUDE_SEARCH_PATHS + /usr/include + /usr/include/openblas-base + /usr/local/include + /usr/local/include/openblas-base + /opt/OpenBLAS/include + $ENV{OpenBLAS_HOME} + $ENV{OpenBLAS_HOME}/include +) + +SET(Open_BLAS_LIB_SEARCH_PATHS + /lib/ + /lib/openblas-base + /lib64/ + /usr/lib + /usr/lib/openblas-base + /usr/lib64 + /usr/local/lib + /usr/local/lib64 + /opt/OpenBLAS/lib + $ENV{OpenBLAS}cd + $ENV{OpenBLAS}/lib + $ENV{OpenBLAS_HOME} + $ENV{OpenBLAS_HOME}/lib + ) + +FIND_PATH(OpenBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Open_BLAS_INCLUDE_SEARCH_PATHS}) +FIND_LIBRARY(OpenBLAS_LIB NAMES openblas PATHS ${Open_BLAS_LIB_SEARCH_PATHS}) + +SET(OpenBLAS_FOUND ON) + +# Check include files +IF(NOT OpenBLAS_INCLUDE_DIR) + SET(OpenBLAS_FOUND OFF) + MESSAGE(STATUS "Could not find OpenBLAS include. Turning OpenBLAS_FOUND off") +ENDIF() + +# Check libraries +IF(NOT OpenBLAS_LIB) + SET(OpenBLAS_FOUND OFF) + MESSAGE(STATUS "Could not find OpenBLAS lib. Turning OpenBLAS_FOUND off") +ENDIF() + +IF (OpenBLAS_FOUND) + IF (NOT OpenBLAS_FIND_QUIETLY) + MESSAGE(STATUS "Found OpenBLAS libraries: ${OpenBLAS_LIB}") + MESSAGE(STATUS "Found OpenBLAS include: ${OpenBLAS_INCLUDE_DIR}") + ENDIF (NOT OpenBLAS_FIND_QUIETLY) +ELSE (OpenBLAS_FOUND) + IF (OpenBLAS_FIND_REQUIRED) + MESSAGE(FATAL_ERROR "Could not find OpenBLAS") + ENDIF (OpenBLAS_FIND_REQUIRED) +ENDIF (OpenBLAS_FOUND) + +MARK_AS_ADVANCED( + OpenBLAS_INCLUDE_DIR + OpenBLAS_LIB + OpenBLAS +) + diff --git a/cmake/Modules/FindSnappy.cmake b/cmake/Modules/FindSnappy.cmake new file mode 100755 index 0000000..eff2a86 --- /dev/null +++ b/cmake/Modules/FindSnappy.cmake @@ -0,0 +1,28 @@ +# Find the Snappy libraries +# +# The following variables are optionally searched for defaults +# Snappy_ROOT_DIR: Base directory where all Snappy components are found +# +# The following are set after configuration is done: +# SNAPPY_FOUND +# Snappy_INCLUDE_DIR +# Snappy_LIBRARIES + +find_path(Snappy_INCLUDE_DIR NAMES snappy.h + PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/include) + +find_library(Snappy_LIBRARIES NAMES snappy + PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/lib) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(Snappy DEFAULT_MSG Snappy_INCLUDE_DIR Snappy_LIBRARIES) + +if(SNAPPY_FOUND) + message(STATUS "Found Snappy (include: ${Snappy_INCLUDE_DIR}, library: ${Snappy_LIBRARIES})") + mark_as_advanced(Snappy_INCLUDE_DIR Snappy_LIBRARIES) + + caffe_parse_header(${Snappy_INCLUDE_DIR}/snappy-stubs-public.h + SNAPPY_VERION_LINES SNAPPY_MAJOR SNAPPY_MINOR SNAPPY_PATCHLEVEL) + set(Snappy_VERSION "${SNAPPY_MAJOR}.${SNAPPY_MINOR}.${SNAPPY_PATCHLEVEL}") +endif() + diff --git a/cmake/Modules/FindvecLib.cmake b/cmake/Modules/FindvecLib.cmake new file mode 100755 index 0000000..9600da4 --- /dev/null +++ b/cmake/Modules/FindvecLib.cmake @@ -0,0 +1,34 @@ +# Find the vecLib libraries as part of Accelerate.framework or as standalon framework +# +# The following are set after configuration is done: +# VECLIB_FOUND +# vecLib_INCLUDE_DIR +# vecLib_LINKER_LIBS + + +if(NOT APPLE) + return() +endif() + +set(__veclib_include_suffix "Frameworks/vecLib.framework/Versions/Current/Headers") + +find_path(vecLib_INCLUDE_DIR vecLib.h + DOC "vecLib include directory" + PATHS /System/Library/${__veclib_include_suffix} + /System/Library/Frameworks/Accelerate.framework/Versions/Current/${__veclib_include_suffix} + /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(vecLib DEFAULT_MSG vecLib_INCLUDE_DIR) + +if(VECLIB_FOUND) + if(vecLib_INCLUDE_DIR MATCHES "^/System/Library/Frameworks/vecLib.framework.*") + set(vecLib_LINKER_LIBS -lcblas "-framework vecLib") + message(STATUS "Found standalone vecLib.framework") + else() + set(vecLib_LINKER_LIBS -lcblas "-framework Accelerate") + message(STATUS "Found vecLib as part of Accelerate.framework") + endif() + + mark_as_advanced(vecLib_INCLUDE_DIR) +endif() diff --git a/cmake/ProtoBuf.cmake b/cmake/ProtoBuf.cmake new file mode 100755 index 0000000..fc799bd --- /dev/null +++ b/cmake/ProtoBuf.cmake @@ -0,0 +1,90 @@ +# Finds Google Protocol Buffers library and compilers and extends +# the standard cmake script with version and python generation support + +find_package( Protobuf REQUIRED ) +include_directories(SYSTEM ${PROTOBUF_INCLUDE_DIR}) +list(APPEND Caffe_LINKER_LIBS ${PROTOBUF_LIBRARIES}) + +# As of Ubuntu 14.04 protoc is no longer a part of libprotobuf-dev package +# and should be installed separately as in: sudo apt-get install protobuf-compiler +if(EXISTS ${PROTOBUF_PROTOC_EXECUTABLE}) + message(STATUS "Found PROTOBUF Compiler: ${PROTOBUF_PROTOC_EXECUTABLE}") +else() + message(FATAL_ERROR "Could not find PROTOBUF Compiler") +endif() + +if(PROTOBUF_FOUND) + # fetches protobuf version + caffe_parse_header(${PROTOBUF_INCLUDE_DIR}/google/protobuf/stubs/common.h VERION_LINE GOOGLE_PROTOBUF_VERSION) + string(REGEX MATCH "([0-9])00([0-9])00([0-9])" PROTOBUF_VERSION ${GOOGLE_PROTOBUF_VERSION}) + set(PROTOBUF_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}") + unset(GOOGLE_PROTOBUF_VERSION) +endif() + +# place where to generate protobuf sources +set(proto_gen_folder "${PROJECT_BINARY_DIR}/include/caffe/proto") +include_directories(SYSTEM "${PROJECT_BINARY_DIR}/include") + +set(PROTOBUF_GENERATE_CPP_APPEND_PATH TRUE) + +################################################################################################ +# Modification of standard 'protobuf_generate_cpp()' with output dir parameter and python support +# Usage: +# caffe_protobuf_generate_cpp_py( ) +function(caffe_protobuf_generate_cpp_py output_dir srcs_var hdrs_var python_var) + if(NOT ARGN) + message(SEND_ERROR "Error: caffe_protobuf_generate_cpp_py() called without any proto files") + return() + endif() + + if(PROTOBUF_GENERATE_CPP_APPEND_PATH) + # Create an include path for each file specified + foreach(fil ${ARGN}) + get_filename_component(abs_fil ${fil} ABSOLUTE) + get_filename_component(abs_path ${abs_fil} PATH) + list(FIND _protoc_include ${abs_path} _contains_already) + if(${_contains_already} EQUAL -1) + list(APPEND _protoc_include -I ${abs_path}) + endif() + endforeach() + else() + set(_protoc_include -I ${CMAKE_CURRENT_SOURCE_DIR}) + endif() + + if(DEFINED PROTOBUF_IMPORT_DIRS) + foreach(dir ${PROTOBUF_IMPORT_DIRS}) + get_filename_component(abs_path ${dir} ABSOLUTE) + list(FIND _protoc_include ${abs_path} _contains_already) + if(${_contains_already} EQUAL -1) + list(APPEND _protoc_include -I ${abs_path}) + endif() + endforeach() + endif() + + set(${srcs_var}) + set(${hdrs_var}) + set(${python_var}) + foreach(fil ${ARGN}) + get_filename_component(abs_fil ${fil} ABSOLUTE) + get_filename_component(fil_we ${fil} NAME_WE) + + list(APPEND ${srcs_var} "${output_dir}/${fil_we}.pb.cc") + list(APPEND ${hdrs_var} "${output_dir}/${fil_we}.pb.h") + list(APPEND ${python_var} "${output_dir}/${fil_we}_pb2.py") + + add_custom_command( + OUTPUT "${output_dir}/${fil_we}.pb.cc" + "${output_dir}/${fil_we}.pb.h" + "${output_dir}/${fil_we}_pb2.py" + COMMAND ${CMAKE_COMMAND} -E make_directory "${output_dir}" + COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --cpp_out ${output_dir} ${_protoc_include} ${abs_fil} + COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --python_out ${output_dir} ${_protoc_include} ${abs_fil} + DEPENDS ${abs_fil} + COMMENT "Running C++/Python protocol buffer compiler on ${fil}" VERBATIM ) + endforeach() + + set_source_files_properties(${${srcs_var}} ${${hdrs_var}} ${${python_var}} PROPERTIES GENERATED TRUE) + set(${srcs_var} ${${srcs_var}} PARENT_SCOPE) + set(${hdrs_var} ${${hdrs_var}} PARENT_SCOPE) + set(${python_var} ${${python_var}} PARENT_SCOPE) +endfunction() diff --git a/cmake/Summary.cmake b/cmake/Summary.cmake new file mode 100755 index 0000000..e094ac0 --- /dev/null +++ b/cmake/Summary.cmake @@ -0,0 +1,168 @@ +################################################################################################ +# Caffe status report function. +# Automatically align right column and selects text based on condition. +# Usage: +# caffe_status() +# caffe_status( [ ...]) +# caffe_status( THEN ELSE ) +function(caffe_status text) + set(status_cond) + set(status_then) + set(status_else) + + set(status_current_name "cond") + foreach(arg ${ARGN}) + if(arg STREQUAL "THEN") + set(status_current_name "then") + elseif(arg STREQUAL "ELSE") + set(status_current_name "else") + else() + list(APPEND status_${status_current_name} ${arg}) + endif() + endforeach() + + if(DEFINED status_cond) + set(status_placeholder_length 23) + string(RANDOM LENGTH ${status_placeholder_length} ALPHABET " " status_placeholder) + string(LENGTH "${text}" status_text_length) + if(status_text_length LESS status_placeholder_length) + string(SUBSTRING "${text}${status_placeholder}" 0 ${status_placeholder_length} status_text) + elseif(DEFINED status_then OR DEFINED status_else) + message(STATUS "${text}") + set(status_text "${status_placeholder}") + else() + set(status_text "${text}") + endif() + + if(DEFINED status_then OR DEFINED status_else) + if(${status_cond}) + string(REPLACE ";" " " status_then "${status_then}") + string(REGEX REPLACE "^[ \t]+" "" status_then "${status_then}") + message(STATUS "${status_text} ${status_then}") + else() + string(REPLACE ";" " " status_else "${status_else}") + string(REGEX REPLACE "^[ \t]+" "" status_else "${status_else}") + message(STATUS "${status_text} ${status_else}") + endif() + else() + string(REPLACE ";" " " status_cond "${status_cond}") + string(REGEX REPLACE "^[ \t]+" "" status_cond "${status_cond}") + message(STATUS "${status_text} ${status_cond}") + endif() + else() + message(STATUS "${text}") + endif() +endfunction() + + +################################################################################################ +# Function for fetching Caffe version from git and headers +# Usage: +# caffe_extract_caffe_version() +function(caffe_extract_caffe_version) + set(Caffe_GIT_VERSION "unknown") + find_package(Git) + if(GIT_FOUND) + execute_process(COMMAND ${GIT_EXECUTABLE} describe --tags --always --dirty + ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE + WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}" + OUTPUT_VARIABLE Caffe_GIT_VERSION + RESULT_VARIABLE __git_result) + if(NOT ${__git_result} EQUAL 0) + set(Caffe_GIT_VERSION "unknown") + endif() + endif() + + set(Caffe_GIT_VERSION ${Caffe_GIT_VERSION} PARENT_SCOPE) + set(Caffe_VERSION " (Caffe doesn't declare its version in headers)" PARENT_SCOPE) + + # caffe_parse_header(${Caffe_INCLUDE_DIR}/caffe/version.hpp Caffe_VERSION_LINES CAFFE_MAJOR CAFFE_MINOR CAFFE_PATCH) + # set(Caffe_VERSION "${CAFFE_MAJOR}.${CAFFE_MINOR}.${CAFFE_PATCH}" PARENT_SCOPE) + + # or for #define Caffe_VERSION "x.x.x" + # caffe_parse_header_single_define(Caffe ${Caffe_INCLUDE_DIR}/caffe/version.hpp Caffe_VERSION) + # set(Caffe_VERSION ${Caffe_VERSION_STRING} PARENT_SCOPE) + +endfunction() + + +################################################################################################ +# Prints accumulated caffe configuration summary +# Usage: +# caffe_print_configuration_summary() + +function(caffe_print_configuration_summary) + caffe_extract_caffe_version() + set(Caffe_VERSION ${Caffe_VERSION} PARENT_SCOPE) + + caffe_merge_flag_lists(__flags_rel CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS) + caffe_merge_flag_lists(__flags_deb CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS) + + caffe_status("") + caffe_status("******************* Caffe Configuration Summary *******************") + caffe_status("General:") + caffe_status(" Version : ${Caffe_VERSION}") + caffe_status(" Git : ${Caffe_GIT_VERSION}") + caffe_status(" System : ${CMAKE_SYSTEM_NAME}") + caffe_status(" C++ compiler : ${CMAKE_CXX_COMPILER}") + caffe_status(" Release CXX flags : ${__flags_rel}") + caffe_status(" Debug CXX flags : ${__flags_deb}") + caffe_status(" Build type : ${CMAKE_BUILD_TYPE}") + caffe_status("") + caffe_status(" BUILD_SHARED_LIBS : ${BUILD_SHARED_LIBS}") + caffe_status(" BUILD_python : ${BUILD_python}") + caffe_status(" BUILD_matlab : ${BUILD_matlab}") + caffe_status(" BUILD_docs : ${BUILD_docs}") + caffe_status(" CPU_ONLY : ${CPU_ONLY}") + caffe_status("") + caffe_status("Dependencies:") + caffe_status(" BLAS : " APPLE THEN "Yes (vecLib)" ELSE "Yes (${BLAS})") + caffe_status(" Boost : Yes (ver. ${Boost_MAJOR_VERSION}.${Boost_MINOR_VERSION})") + caffe_status(" glog : Yes") + caffe_status(" gflags : Yes") + caffe_status(" protobuf : " PROTOBUF_FOUND THEN "Yes (ver. ${PROTOBUF_VERSION})" ELSE "No" ) + caffe_status(" lmdb : " LMDB_FOUND THEN "Yes (ver. ${LMDB_VERSION})" ELSE "No") + caffe_status(" Snappy : " SNAPPY_FOUND THEN "Yes (ver. ${Snappy_VERSION})" ELSE "No" ) + caffe_status(" LevelDB : " LEVELDB_FOUND THEN "Yes (ver. ${LEVELDB_VERSION})" ELSE "No") + caffe_status(" OpenCV : Yes (ver. ${OpenCV_VERSION})") + caffe_status(" CUDA : " HAVE_CUDA THEN "Yes (ver. ${CUDA_VERSION})" ELSE "No" ) + caffe_status("") + if(HAVE_CUDA) + caffe_status("NVIDIA CUDA:") + caffe_status(" Target GPU(s) : ${CUDA_ARCH_NAME}" ) + caffe_status(" GPU arch(s) : ${NVCC_FLAGS_EXTRA_readable}") + if(USE_CUDNN) + caffe_status(" cuDNN : " HAVE_CUDNN THEN "Yes" ELSE "Not found") + else() + caffe_status(" cuDNN : Disabled") + endif() + caffe_status("") + endif() + if(HAVE_PYTHON) + caffe_status("Python:") + caffe_status(" Interpreter :" PYTHON_EXECUTABLE THEN "${PYTHON_EXECUTABLE} (ver. ${PYTHON_VERSION_STRING})" ELSE "No") + caffe_status(" Libraries :" PYTHONLIBS_FOUND THEN "${PYTHON_LIBRARIES} (ver ${PYTHONLIBS_VERSION_STRING})" ELSE "No") + caffe_status(" NumPy :" NUMPY_FOUND THEN "${NUMPY_INCLUDE_DIR} (ver ${NUMPY_VERSION})" ELSE "No") + caffe_status("") + endif() + if(BUILD_matlab) + caffe_status("Matlab:") + caffe_status(" Matlab :" HAVE_MATLAB THEN "Yes (${Matlab_mex}, ${Matlab_mexext}" ELSE "No") + caffe_status(" Octave :" Octave_compiler THEN "Yes (${Octave_compiler})" ELSE "No") + if(HAVE_MATLAB AND Octave_compiler) + caffe_status(" Build mex using : ${Matlab_build_mex_using}") + endif() + caffe_status("") + endif() + if(BUILD_docs) + caffe_status("Documentaion:") + caffe_status(" Doxygen :" DOXYGEN_FOUND THEN "${DOXYGEN_EXECUTABLE} (${DOXYGEN_VERSION})" ELSE "No") + caffe_status(" config_file : ${DOXYGEN_config_file}") + + caffe_status("") + endif() + caffe_status("Install:") + caffe_status(" Install path : ${CMAKE_INSTALL_PREFIX}") + caffe_status("") +endfunction() + diff --git a/cmake/Targets.cmake b/cmake/Targets.cmake new file mode 100755 index 0000000..2401f25 --- /dev/null +++ b/cmake/Targets.cmake @@ -0,0 +1,173 @@ +################################################################################################ +# Defines global Caffe_LINK flag, This flag is required to prevent linker from excluding +# some objects which are not addressed directly but are registered via static constructors +if(BUILD_SHARED_LIBS) + set(Caffe_LINK caffe) +else() + if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") + set(Caffe_LINK -Wl,-force_load caffe) + elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") + set(Caffe_LINK -Wl,--whole-archive caffe -Wl,--no-whole-archive) + endif() +endif() + +################################################################################################ +# Convenient command to setup source group for IDEs that support this feature (VS, XCode) +# Usage: +# caffe_source_group( GLOB[_RECURSE] ) +function(caffe_source_group group) + cmake_parse_arguments(CAFFE_SOURCE_GROUP "" "" "GLOB;GLOB_RECURSE" ${ARGN}) + if(CAFFE_SOURCE_GROUP_GLOB) + file(GLOB srcs1 ${CAFFE_SOURCE_GROUP_GLOB}) + source_group(${group} FILES ${srcs1}) + endif() + + if(CAFFE_SOURCE_GROUP_GLOB_RECURSE) + file(GLOB_RECURSE srcs2 ${CAFFE_SOURCE_GROUP_GLOB_RECURSE}) + source_group(${group} FILES ${srcs2}) + endif() +endfunction() + +################################################################################################ +# Collecting sources from globbing and appending to output list variable +# Usage: +# caffe_collect_sources( GLOB[_RECURSE] ) +function(caffe_collect_sources variable) + cmake_parse_arguments(CAFFE_COLLECT_SOURCES "" "" "GLOB;GLOB_RECURSE" ${ARGN}) + if(CAFFE_COLLECT_SOURCES_GLOB) + file(GLOB srcs1 ${CAFFE_COLLECT_SOURCES_GLOB}) + set(${variable} ${variable} ${srcs1}) + endif() + + if(CAFFE_COLLECT_SOURCES_GLOB_RECURSE) + file(GLOB_RECURSE srcs2 ${CAFFE_COLLECT_SOURCES_GLOB_RECURSE}) + set(${variable} ${variable} ${srcs2}) + endif() +endfunction() + +################################################################################################ +# Short command getting caffe sources (assuming standard Caffe code tree) +# Usage: +# caffe_pickup_caffe_sources() +function(caffe_pickup_caffe_sources root) + # put all files in source groups (visible as subfolder in many IDEs) + caffe_source_group("Include" GLOB "${root}/include/caffe/*.h*") + caffe_source_group("Include\\Util" GLOB "${root}/include/caffe/util/*.h*") + caffe_source_group("Include" GLOB "${PROJECT_BINARY_DIR}/caffe_config.h*") + caffe_source_group("Source" GLOB "${root}/src/caffe/*.cpp") + caffe_source_group("Source\\Util" GLOB "${root}/src/caffe/util/*.cpp") + caffe_source_group("Source\\Layers" GLOB "${root}/src/caffe/layers/*.cpp") + caffe_source_group("Source\\Cuda" GLOB "${root}/src/caffe/layers/*.cu") + caffe_source_group("Source\\Cuda" GLOB "${root}/src/caffe/util/*.cu") + caffe_source_group("Source\\Proto" GLOB "${root}/src/caffe/proto/*.proto") + + # source groups for test target + caffe_source_group("Include" GLOB "${root}/include/caffe/test/test_*.h*") + caffe_source_group("Source" GLOB "${root}/src/caffe/test/test_*.cpp") + caffe_source_group("Source\\Cuda" GLOB "${root}/src/caffe/test/test_*.cu") + + # collect files + file(GLOB test_hdrs ${root}/include/caffe/test/test_*.h*) + file(GLOB test_srcs ${root}/src/caffe/test/test_*.cpp) + file(GLOB_RECURSE hdrs ${root}/include/caffe/*.h*) + file(GLOB_RECURSE srcs ${root}/src/caffe/*.cpp) + list(REMOVE_ITEM hdrs ${test_hdrs}) + list(REMOVE_ITEM srcs ${test_srcs}) + + # adding headers to make the visible in some IDEs (Qt, VS, Xcode) + list(APPEND srcs ${hdrs} ${PROJECT_BINARY_DIR}/caffe_config.h) + list(APPEND test_srcs ${test_hdrs}) + + # collect cuda files + file(GLOB test_cuda ${root}/src/caffe/test/test_*.cu) + file(GLOB_RECURSE cuda ${root}/src/caffe/*.cu) + list(REMOVE_ITEM cuda ${test_cuda}) + + # add proto to make them editable in IDEs too + file(GLOB_RECURSE proto_files ${root}/src/caffe/*.proto) + list(APPEND srcs ${proto_files}) + + # convet to absolute paths + caffe_convert_absolute_paths(srcs) + caffe_convert_absolute_paths(cuda) + caffe_convert_absolute_paths(test_srcs) + caffe_convert_absolute_paths(test_cuda) + + # propogate to parent scope + set(srcs ${srcs} PARENT_SCOPE) + set(cuda ${cuda} PARENT_SCOPE) + set(test_srcs ${test_srcs} PARENT_SCOPE) + set(test_cuda ${test_cuda} PARENT_SCOPE) +endfunction() + +################################################################################################ +# Short command for setting defeault target properties +# Usage: +# caffe_default_properties() +function(caffe_default_properties target) + set_target_properties(${target} PROPERTIES + DEBUG_POSTFIX ${Caffe_DEBUG_POSTFIX} + ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib" + LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib" + RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/bin") + # make sure we build all external depepdencies first + if (DEFINED external_project_dependencies) + add_dependencies(${target} ${external_project_dependencies}) + endif() +endfunction() + +################################################################################################ +# Short command for setting runtime directory for build target +# Usage: +# caffe_set_runtime_directory( ) +function(caffe_set_runtime_directory target dir) + set_target_properties(${target} PROPERTIES + RUNTIME_OUTPUT_DIRECTORY "${dir}") +endfunction() + +################################################################################################ +# Short command for setting solution folder property for target +# Usage: +# caffe_set_solution_folder( ) +function(caffe_set_solution_folder target folder) + if(USE_PROJECT_FOLDERS) + set_target_properties(${target} PROPERTIES FOLDER "${folder}") + endif() +endfunction() + +################################################################################################ +# Reads lines from input file, prepends source directory to each line and writes to output file +# Usage: +# caffe_configure_testdatafile() +function(caffe_configure_testdatafile file) + file(STRINGS ${file} __lines) + set(result "") + foreach(line ${__lines}) + set(result "${result}${PROJECT_SOURCE_DIR}/${line}\n") + endforeach() + file(WRITE ${file}.gen.cmake ${result}) +endfunction() + +################################################################################################ +# Filter out all files that are not included in selected list +# Usage: +# caffe_leave_only_selected_tests( ) +function(caffe_leave_only_selected_tests file_list) + if(NOT ARGN) + return() # blank list means leave all + endif() + string(REPLACE "," ";" __selected ${ARGN}) + list(APPEND __selected caffe_main) + + set(result "") + foreach(f ${${file_list}}) + get_filename_component(name ${f} NAME_WE) + string(REGEX REPLACE "^test_" "" name ${name}) + list(FIND __selected ${name} __index) + if(NOT __index EQUAL -1) + list(APPEND result ${f}) + endif() + endforeach() + set(${file_list} ${result} PARENT_SCOPE) +endfunction() + diff --git a/cmake/Templates/CaffeConfig.cmake.in b/cmake/Templates/CaffeConfig.cmake.in new file mode 100755 index 0000000..8f23742 --- /dev/null +++ b/cmake/Templates/CaffeConfig.cmake.in @@ -0,0 +1,58 @@ +# Config file for the Caffe package. +# +# Note: +# Caffe and this config file depends on opencv, +# so put `find_package(OpenCV)` before searching Caffe +# via `find_package(Caffe)`. All other lib/includes +# dependencies are hard coded in the file +# +# After successful configuration the following variables +# will be defined: +# +# Caffe_INCLUDE_DIRS - Caffe include directories +# Caffe_LIBRARIES - libraries to link against +# Caffe_DEFINITIONS - a list of definitions to pass to compiler +# +# Caffe_HAVE_CUDA - signals about CUDA support +# Caffe_HAVE_CUDNN - signals about cuDNN support + + +# OpenCV dependency + +if(NOT OpenCV_FOUND) + set(Caffe_OpenCV_CONFIG_PATH "@OpenCV_CONFIG_PATH@") + if(Caffe_OpenCV_CONFIG_PATH) + get_filename_component(Caffe_OpenCV_CONFIG_PATH ${Caffe_OpenCV_CONFIG_PATH} ABSOLUTE) + + if(EXISTS ${Caffe_OpenCV_CONFIG_PATH} AND NOT TARGET opencv_core) + message(STATUS "Caffe: using OpenCV config from ${Caffe_OpenCV_CONFIG_PATH}") + include(${Caffe_OpenCV_CONFIG_PATH}/OpenCVModules.cmake) + endif() + + else() + find_package(OpenCV REQUIRED) + endif() + unset(Caffe_OpenCV_CONFIG_PATH) +endif() + +# Compute paths +get_filename_component(Caffe_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) +set(Caffe_INCLUDE_DIRS "@Caffe_INCLUDE_DIRS@") + +@Caffe_INSTALL_INCLUDE_DIR_APPEND_COMMAND@ + +# Our library dependencies +if(NOT TARGET caffe AND NOT caffe_BINARY_DIR) + include("${Caffe_CMAKE_DIR}/CaffeTargets.cmake") +endif() + +# List of IMPORTED libs created by CaffeTargets.cmake +set(Caffe_LIBRARIES caffe) + +# Definitions +set(Caffe_DEFINITIONS "@Caffe_DEFINITIONS@") + +# Cuda support variables +set(Caffe_CPU_ONLY @CPU_ONLY@) +set(Caffe_HAVE_CUDA @HAVE_CUDA@) +set(Caffe_HAVE_CUDNN @HAVE_CUDNN@) diff --git a/cmake/Templates/CaffeConfigVersion.cmake.in b/cmake/Templates/CaffeConfigVersion.cmake.in new file mode 100755 index 0000000..19f8530 --- /dev/null +++ b/cmake/Templates/CaffeConfigVersion.cmake.in @@ -0,0 +1,11 @@ +set(PACKAGE_VERSION "@Caffe_VERSION@") + +# Check whether the requested PACKAGE_FIND_VERSION is compatible +if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}") + set(PACKAGE_VERSION_COMPATIBLE FALSE) +else() + set(PACKAGE_VERSION_COMPATIBLE TRUE) + if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}") + set(PACKAGE_VERSION_EXACT TRUE) + endif() +endif() diff --git a/cmake/Templates/caffe_config.h.in b/cmake/Templates/caffe_config.h.in new file mode 100755 index 0000000..6039e8f --- /dev/null +++ b/cmake/Templates/caffe_config.h.in @@ -0,0 +1,32 @@ +/* Sources directory */ +#define SOURCE_FOLDER "${PROJECT_SOURCE_DIR}" + +/* Binaries directory */ +#define BINARY_FOLDER "${PROJECT_BINARY_DIR}" + +/* NVIDA Cuda */ +#cmakedefine HAVE_CUDA + +/* NVIDA cuDNN */ +#cmakedefine HAVE_CUDNN +#cmakedefine USE_CUDNN + +/* NVIDA cuDNN */ +#cmakedefine CPU_ONLY + +/* Test device */ +#define CUDA_TEST_DEVICE ${CUDA_TEST_DEVICE} + +/* Temporary (TODO: remove) */ +#if 1 + #define CMAKE_SOURCE_DIR SOURCE_FOLDER "/src/" + #define EXAMPLES_SOURCE_DIR BINARY_FOLDER "/examples/" + #define CMAKE_EXT ".gen.cmake" +#else + #define CMAKE_SOURCE_DIR "src/" + #define EXAMPLES_SOURCE_DIR "examples/" + #define CMAKE_EXT "" +#endif + +/* Matlab */ +#cmakedefine HAVE_MATLAB diff --git a/cmake/Utils.cmake b/cmake/Utils.cmake new file mode 100755 index 0000000..a1bde1a --- /dev/null +++ b/cmake/Utils.cmake @@ -0,0 +1,381 @@ +################################################################################################ +# Command alias for debugging messages +# Usage: +# dmsg() +function(dmsg) + message(STATUS ${ARGN}) +endfunction() + +################################################################################################ +# Removes duplicates from list(s) +# Usage: +# caffe_list_unique( [] [...]) +macro(caffe_list_unique) + foreach(__lst ${ARGN}) + if(${__lst}) + list(REMOVE_DUPLICATES ${__lst}) + endif() + endforeach() +endmacro() + +################################################################################################ +# Clears variables from list +# Usage: +# caffe_clear_vars() +macro(caffe_clear_vars) + foreach(_var ${ARGN}) + unset(${_var}) + endforeach() +endmacro() + +################################################################################################ +# Removes duplicates from string +# Usage: +# caffe_string_unique() +function(caffe_string_unique __string) + if(${__string}) + set(__list ${${__string}}) + separate_arguments(__list) + list(REMOVE_DUPLICATES __list) + foreach(__e ${__list}) + set(__str "${__str} ${__e}") + endforeach() + set(${__string} ${__str} PARENT_SCOPE) + endif() +endfunction() + +################################################################################################ +# Prints list element per line +# Usage: +# caffe_print_list() +function(caffe_print_list) + foreach(e ${ARGN}) + message(STATUS ${e}) + endforeach() +endfunction() + +################################################################################################ +# Function merging lists of compiler flags to single string. +# Usage: +# caffe_merge_flag_lists(out_variable [] [] ...) +function(caffe_merge_flag_lists out_var) + set(__result "") + foreach(__list ${ARGN}) + foreach(__flag ${${__list}}) + string(STRIP ${__flag} __flag) + set(__result "${__result} ${__flag}") + endforeach() + endforeach() + string(STRIP ${__result} __result) + set(${out_var} ${__result} PARENT_SCOPE) +endfunction() + +################################################################################################ +# Converts all paths in list to absolute +# Usage: +# caffe_convert_absolute_paths() +function(caffe_convert_absolute_paths variable) + set(__dlist "") + foreach(__s ${${variable}}) + get_filename_component(__abspath ${__s} ABSOLUTE) + list(APPEND __list ${__abspath}) + endforeach() + set(${variable} ${__list} PARENT_SCOPE) +endfunction() + +################################################################################################ +# Reads set of version defines from the header file +# Usage: +# caffe_parse_header( ..) +macro(caffe_parse_header FILENAME FILE_VAR) + set(vars_regex "") + set(__parnet_scope OFF) + set(__add_cache OFF) + foreach(name ${ARGN}) + if("${name}" STREQUAL "PARENT_SCOPE") + set(__parnet_scope ON) + elseif("${name}" STREQUAL "CACHE") + set(__add_cache ON) + elseif(vars_regex) + set(vars_regex "${vars_regex}|${name}") + else() + set(vars_regex "${name}") + endif() + endforeach() + if(EXISTS "${FILENAME}") + file(STRINGS "${FILENAME}" ${FILE_VAR} REGEX "#define[ \t]+(${vars_regex})[ \t]+[0-9]+" ) + else() + unset(${FILE_VAR}) + endif() + foreach(name ${ARGN}) + if(NOT "${name}" STREQUAL "PARENT_SCOPE" AND NOT "${name}" STREQUAL "CACHE") + if(${FILE_VAR}) + if(${FILE_VAR} MATCHES ".+[ \t]${name}[ \t]+([0-9]+).*") + string(REGEX REPLACE ".+[ \t]${name}[ \t]+([0-9]+).*" "\\1" ${name} "${${FILE_VAR}}") + else() + set(${name} "") + endif() + if(__add_cache) + set(${name} ${${name}} CACHE INTERNAL "${name} parsed from ${FILENAME}" FORCE) + elseif(__parnet_scope) + set(${name} "${${name}}" PARENT_SCOPE) + endif() + else() + unset(${name} CACHE) + endif() + endif() + endforeach() +endmacro() + +################################################################################################ +# Reads single version define from the header file and parses it +# Usage: +# caffe_parse_header_single_define( ) +function(caffe_parse_header_single_define LIBNAME HDR_PATH VARNAME) + set(${LIBNAME}_H "") + if(EXISTS "${HDR_PATH}") + file(STRINGS "${HDR_PATH}" ${LIBNAME}_H REGEX "^#define[ \t]+${VARNAME}[ \t]+\"[^\"]*\".*$" LIMIT_COUNT 1) + endif() + + if(${LIBNAME}_H) + string(REGEX REPLACE "^.*[ \t]${VARNAME}[ \t]+\"([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_MAJOR "${${LIBNAME}_H}") + string(REGEX REPLACE "^.*[ \t]${VARNAME}[ \t]+\"[0-9]+\\.([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_MINOR "${${LIBNAME}_H}") + string(REGEX REPLACE "^.*[ \t]${VARNAME}[ \t]+\"[0-9]+\\.[0-9]+\\.([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_PATCH "${${LIBNAME}_H}") + set(${LIBNAME}_VERSION_MAJOR ${${LIBNAME}_VERSION_MAJOR} ${ARGN} PARENT_SCOPE) + set(${LIBNAME}_VERSION_MINOR ${${LIBNAME}_VERSION_MINOR} ${ARGN} PARENT_SCOPE) + set(${LIBNAME}_VERSION_PATCH ${${LIBNAME}_VERSION_PATCH} ${ARGN} PARENT_SCOPE) + set(${LIBNAME}_VERSION_STRING "${${LIBNAME}_VERSION_MAJOR}.${${LIBNAME}_VERSION_MINOR}.${${LIBNAME}_VERSION_PATCH}" PARENT_SCOPE) + + # append a TWEAK version if it exists: + set(${LIBNAME}_VERSION_TWEAK "") + if("${${LIBNAME}_H}" MATCHES "^.*[ \t]${VARNAME}[ \t]+\"[0-9]+\\.[0-9]+\\.[0-9]+\\.([0-9]+).*$") + set(${LIBNAME}_VERSION_TWEAK "${CMAKE_MATCH_1}" ${ARGN} PARENT_SCOPE) + endif() + if(${LIBNAME}_VERSION_TWEAK) + set(${LIBNAME}_VERSION_STRING "${${LIBNAME}_VERSION_STRING}.${${LIBNAME}_VERSION_TWEAK}" ${ARGN} PARENT_SCOPE) + else() + set(${LIBNAME}_VERSION_STRING "${${LIBNAME}_VERSION_STRING}" ${ARGN} PARENT_SCOPE) + endif() + endif() +endfunction() + +######################################################################################################## +# An option that the user can select. Can accept condition to control when option is available for user. +# Usage: +# caffe_option( "doc string" [IF ]) +function(caffe_option variable description value) + set(__value ${value}) + set(__condition "") + set(__varname "__value") + foreach(arg ${ARGN}) + if(arg STREQUAL "IF" OR arg STREQUAL "if") + set(__varname "__condition") + else() + list(APPEND ${__varname} ${arg}) + endif() + endforeach() + unset(__varname) + if("${__condition}" STREQUAL "") + set(__condition 2 GREATER 1) + endif() + + if(${__condition}) + if("${__value}" MATCHES ";") + if(${__value}) + option(${variable} "${description}" ON) + else() + option(${variable} "${description}" OFF) + endif() + elseif(DEFINED ${__value}) + if(${__value}) + option(${variable} "${description}" ON) + else() + option(${variable} "${description}" OFF) + endif() + else() + option(${variable} "${description}" ${__value}) + endif() + else() + unset(${variable} CACHE) + endif() +endfunction() + +################################################################################################ +# Utility macro for comparing two lists. Used for CMake debugging purposes +# Usage: +# caffe_compare_lists( [description]) +function(caffe_compare_lists list1 list2 desc) + set(__list1 ${${list1}}) + set(__list2 ${${list2}}) + list(SORT __list1) + list(SORT __list2) + list(LENGTH __list1 __len1) + list(LENGTH __list2 __len2) + + if(NOT ${__len1} EQUAL ${__len2}) + message(FATAL_ERROR "Lists are not equal. ${__len1} != ${__len2}. ${desc}") + endif() + + foreach(__i RANGE 1 ${__len1}) + math(EXPR __index "${__i}- 1") + list(GET __list1 ${__index} __item1) + list(GET __list2 ${__index} __item2) + if(NOT ${__item1} STREQUAL ${__item2}) + message(FATAL_ERROR "Lists are not equal. Differ at element ${__index}. ${desc}") + endif() + endforeach() +endfunction() + +################################################################################################ +# Command for disabling warnings for different platforms (see below for gcc and VisualStudio) +# Usage: +# caffe_warnings_disable( -Wshadow /wd4996 ..,) +macro(caffe_warnings_disable) + set(_flag_vars "") + set(_msvc_warnings "") + set(_gxx_warnings "") + + foreach(arg ${ARGN}) + if(arg MATCHES "^CMAKE_") + list(APPEND _flag_vars ${arg}) + elseif(arg MATCHES "^/wd") + list(APPEND _msvc_warnings ${arg}) + elseif(arg MATCHES "^-W") + list(APPEND _gxx_warnings ${arg}) + endif() + endforeach() + + if(NOT _flag_vars) + set(_flag_vars CMAKE_C_FLAGS CMAKE_CXX_FLAGS) + endif() + + if(MSVC AND _msvc_warnings) + foreach(var ${_flag_vars}) + foreach(warning ${_msvc_warnings}) + set(${var} "${${var}} ${warning}") + endforeach() + endforeach() + elseif((CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANGXX) AND _gxx_warnings) + foreach(var ${_flag_vars}) + foreach(warning ${_gxx_warnings}) + if(NOT warning MATCHES "^-Wno-") + string(REPLACE "${warning}" "" ${var} "${${var}}") + string(REPLACE "-W" "-Wno-" warning "${warning}") + endif() + set(${var} "${${var}} ${warning}") + endforeach() + endforeach() + endif() + caffe_clear_vars(_flag_vars _msvc_warnings _gxx_warnings) +endmacro() + +################################################################################################ +# Helper function get current definitions +# Usage: +# caffe_get_current_definitions() +function(caffe_get_current_definitions definitions_var) + get_property(current_definitions DIRECTORY PROPERTY COMPILE_DEFINITIONS) + set(result "") + + foreach(d ${current_definitions}) + list(APPEND result -D${d}) + endforeach() + + caffe_list_unique(result) + set(${definitions_var} ${result} PARENT_SCOPE) +endfunction() + +################################################################################################ +# Helper function get current includes/definitions +# Usage: +# caffe_get_current_cflags() +function(caffe_get_current_cflags cflags_var) + get_property(current_includes DIRECTORY PROPERTY INCLUDE_DIRECTORIES) + caffe_convert_absolute_paths(current_includes) + caffe_get_current_definitions(cflags) + + foreach(i ${current_includes}) + list(APPEND cflags "-I${i}") + endforeach() + + caffe_list_unique(cflags) + set(${cflags_var} ${cflags} PARENT_SCOPE) +endfunction() + +################################################################################################ +# Helper function to parse current linker libs into link directories, libflags and osx frameworks +# Usage: +# caffe_parse_linker_libs( ) +function(caffe_parse_linker_libs Caffe_LINKER_LIBS_variable folders_var flags_var frameworks_var) + + set(__unspec "") + set(__debug "") + set(__optimized "") + set(__framework "") + set(__varname "__unspec") + + # split libs into debug, optimized, unspecified and frameworks + foreach(list_elem ${${Caffe_LINKER_LIBS_variable}}) + if(list_elem STREQUAL "debug") + set(__varname "__debug") + elseif(list_elem STREQUAL "optimized") + set(__varname "__optimized") + elseif(list_elem MATCHES "^-framework[ \t]+([^ \t].*)") + list(APPEND __framework -framework ${CMAKE_MATCH_1}) + else() + list(APPEND ${__varname} ${list_elem}) + set(__varname "__unspec") + endif() + endforeach() + + # attach debug or optimized libs to unspecified according to current configuration + if(CMAKE_BUILD_TYPE MATCHES "Debug") + set(__libs ${__unspec} ${__debug}) + else() + set(__libs ${__unspec} ${__optimized}) + endif() + + set(libflags "") + set(folders "") + + # convert linker libraries list to link flags + foreach(lib ${__libs}) + if(TARGET ${lib}) + list(APPEND folders $) + list(APPEND libflags -l${lib}) + elseif(lib MATCHES "^-l.*") + list(APPEND libflags ${lib}) + elseif(IS_ABSOLUTE ${lib}) + get_filename_component(name_we ${lib} NAME_WE) + get_filename_component(folder ${lib} PATH) + + string(REGEX MATCH "^lib(.*)" __match ${name_we}) + list(APPEND libflags -l${CMAKE_MATCH_1}) + list(APPEND folders ${folder}) + else() + message(FATAL_ERROR "Logic error. Need to update cmake script") + endif() + endforeach() + + caffe_list_unique(libflags folders) + + set(${folders_var} ${folders} PARENT_SCOPE) + set(${flags_var} ${libflags} PARENT_SCOPE) + set(${frameworks_var} ${__framework} PARENT_SCOPE) +endfunction() + +################################################################################################ +# Helper function to detect Darwin version, i.e. 10.8, 10.9, 10.10, .... +# Usage: +# caffe_detect_darwin_version() +function(caffe_detect_darwin_version output_var) + if(APPLE) + execute_process(COMMAND /usr/bin/sw_vers -productVersion + RESULT_VARIABLE __sw_vers OUTPUT_VARIABLE __sw_vers_out + ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) + + set(${output_var} ${__sw_vers_out} PARENT_SCOPE) + else() + set(${output_var} "" PARENT_SCOPE) + endif() +endfunction() diff --git a/cmake/lint.cmake b/cmake/lint.cmake new file mode 100755 index 0000000..70a0065 --- /dev/null +++ b/cmake/lint.cmake @@ -0,0 +1,50 @@ + +set(CMAKE_SOURCE_DIR ..) +set(LINT_COMMAND ${CMAKE_SOURCE_DIR}/scripts/cpp_lint.py) +set(SRC_FILE_EXTENSIONS h hpp hu c cpp cu cc) +set(EXCLUDE_FILE_EXTENSTIONS pb.h pb.cc) +set(LINT_DIRS include src/caffe examples tools python matlab) + +cmake_policy(SET CMP0009 NEW) # suppress cmake warning + +# find all files of interest +foreach(ext ${SRC_FILE_EXTENSIONS}) + foreach(dir ${LINT_DIRS}) + file(GLOB_RECURSE FOUND_FILES ${CMAKE_SOURCE_DIR}/${dir}/*.${ext}) + set(LINT_SOURCES ${LINT_SOURCES} ${FOUND_FILES}) + endforeach() +endforeach() + +# find all files that should be excluded +foreach(ext ${EXCLUDE_FILE_EXTENSTIONS}) + file(GLOB_RECURSE FOUND_FILES ${CMAKE_SOURCE_DIR}/*.${ext}) + set(EXCLUDED_FILES ${EXCLUDED_FILES} ${FOUND_FILES}) +endforeach() + +# exclude generated pb files +list(REMOVE_ITEM LINT_SOURCES ${EXCLUDED_FILES}) + +execute_process( + COMMAND ${LINT_COMMAND} ${LINT_SOURCES} + ERROR_VARIABLE LINT_OUTPUT + ERROR_STRIP_TRAILING_WHITESPACE +) + +string(REPLACE "\n" ";" LINT_OUTPUT ${LINT_OUTPUT}) + +list(GET LINT_OUTPUT -1 LINT_RESULT) +list(REMOVE_AT LINT_OUTPUT -1) +string(REPLACE " " ";" LINT_RESULT ${LINT_RESULT}) +list(GET LINT_RESULT -1 NUM_ERRORS) +if(NUM_ERRORS GREATER 0) + foreach(msg ${LINT_OUTPUT}) + string(FIND ${msg} "Done" result) + if(result LESS 0) + message(STATUS ${msg}) + endif() + endforeach() + message(FATAL_ERROR "Lint found ${NUM_ERRORS} errors!") +else() + message(STATUS "Lint did not find any errors!") +endif() + diff --git a/exp_caltech.py b/exp_caltech.py new file mode 100755 index 0000000..d779390 --- /dev/null +++ b/exp_caltech.py @@ -0,0 +1,229 @@ +import sys +sys.path.insert(0, './python/') +import caffe +import numpy as np +from lcg_random import lcg_rand +import ncs +from easydict import EasyDict as edict +import time + +start_time = time.time() +ncs_time = 0. +adjusting_time = 0. +retraining_time = 0. +# model files +proto='./models/caltech_caffenet/train_val_caltech.prototxt' +weights='/home/deepModels/caffe_models/bvlc_reference_caffenet/scratch_caltech_caffenet_train_iter_10000.caffemodel' +solver_path='./models/caltech_caffenet/caltech_solver.prototxt' +es_method='ncs' +# cpu/gpu +caffe.set_mode_gpu() +caffe.set_device(0) +# init solver +solver = caffe.SGDSolver(solver_path) +# basic parameters +# accuracy constraint for pruning +acc_constrain=0.08 +# stop iteration count +niter = 15001 +# stop pruning iteration count +prune_stop_iter = 10000 +# interval for +prune_interval = 250 +# interval for std variate +std_interval = 7000 +# the list of layer names +layer_name = ['conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8*'] +# the dict of layer names to its arrary indices +layer_inds = {'conv1':0, 'conv2':1, 'conv3':2,'conv4':3,'conv5':4,'fc6':5,'fc7':6,'fc8*':7} +# the dict of crates for each layer +crates = {'conv1':0.001, 'conv2':0.001, 'conv3':0.001,'conv4':0.001,'conv5':0.001,'fc6':0.001,'fc7':0.001,'fc8*':0.001} +# the list of the crates +crates_list = 8*[0.001] +# the gamma for each layer +gamma = {'conv1':0.00002, 'conv2':0.00002, 'conv3':0.00002,'conv4':0.00002,'conv5':0.00002,'fc6':0.0002,'fc7':0.0002,'fc8*':0.0002} +gamma_star = 0.0002 +ncs_stepsize = 50 +# random see for numpy.random +seed=np.random.randint(1000000) +#seed = 217750 +np.random.seed([seed]) +# the dict to store intermedia results +es_cache = {} +#retrieval_tag=[] +r_count=0 +# load the pretrained caffe model +if weights: + solver.net.copy_from(weights) + +# definition of many axuliliary methods +# run the network on its dataset +def test_net(thenet, _start='data', _count=1): + ''' + thenet: the object of network + _start: the layer to start from + _count: the number of batches to run + ''' + scores = 0 + for i in range(_count): + thenet.forward(start=_start) + scores += thenet.blobs['accuracy'].data + return scores/_count + +# Set the crates of each layer, the pruning will happen in the next forward action +def apply_prune(thenet, _crates): + ''' + thenet: the model to be pruned + _crates: the list of crates for layers + ''' + for _id in range(len(layer_name)): + if _crates[_id] < 0: + continue + layer_id = layer_name[_id] + mask0 = thenet.params[layer_id][2].data.ravel()[0] + if mask0 == 0: + thenet.params[layer_id][2].data.ravel()[0] = -_crates[_id] + elif mask0 == 1: + thenet.params[layer_id][2].data.ravel()[0] = 1+_crates[_id] + else: + pdb.set_trace() + +# calcuate the sparsity of a network model +def get_sparsity(thenet): + ''' + thenet: the network for checking + ''' + remain = 0 + total = 0 + for layer_id in layer_name: + remain += len(np.where(thenet.params[layer_id][2].data != 0)[0]) + remain += len(np.where(thenet.params[layer_id][3].data != 0)[0]) + total += thenet.params[layer_id][0].data.size + total += thenet.params[layer_id][1].data.size + #return total*1./(100.*remain) + return remain*1./total + +# evaluate the accuracy of a network with a set of crates respect to a original accuracy +def evaluate(thenet, x_set, batchcount=1, accuracy_ontrain=0.9988): + nofit=False + fitness=[] + X=[] + for x in x_set: + x_fit = 1.1 + apply_prune(thenet,x) + acc = test_net(thenet, _start='conv1', _count=batchcount) + if acc >= accuracy_ontrain - acc_constrain: + x_fit = get_sparsity(thenet) + nofit=True + fitness.append(x_fit) + X.append(x) + return (X, fitness, nofit) +#------mian-------------- +solver.step(1) +# Adaptive dynamic surgery +for itr in range(niter): + #r = np.random.rand() + #if itr%500==0 and solver.test_nets[0].blobs['accuracy'].data >= 0.9909: + # retrieval_tag.append(itr) + tmp_crates=[] + tmp_ind = [] + for ii in layer_name: + #tmp_crates.append(crates[ii]*(np.power(1+gamma[ii]*itr, -1)>np.random.rand())) + tmp_tag = np.power(1+gamma[ii]*itr, -1)>np.random.rand() + if tmp_tag: + tmp_ind.append(ii) + tmp_crates.append(tmp_tag*crates[ii]) + if itr < prune_stop_iter and itr%std_interval == 0: + ncs_stepsize = ncs_stepsize/10. + if itr%500 == 0: + print "Compression:{}, Accuracy:{}".format(1./get_sparsity(solver.net), test_net(solver.net, _count=1, _start="conv1")) + if len(tmp_ind)>0 and itr < prune_stop_iter: + _tmp_c = np.array(len(crates_list)*[-1.]) + for t_name in tmp_ind: + _tmp_c[layer_inds[t_name]] = crates[t_name] + apply_prune(solver.net, _tmp_c) + #if len(tmp_ind)>1 and itr < prune_stop_iter: + if itr%prune_interval==0 and len(tmp_ind)>1 and itr < prune_stop_iter: + ncs_start_t = time.time() + accuracy_ = test_net(solver.net, _count=1, _start="conv1") + + # make sure a worable son x + es = {} + if es_method == 'ncs': + __C = edict() + __C.parameters = {'reset_xl_to_pop':False,'init_value':tmp_crates, 'stepsize':ncs_stepsize, 'bounds':[0.0, 10.], 'ftarget':0, 'tmax':1600, 'popsize':8, 'best_k':1} + es = ncs.NCS(__C.parameters) + print '***************NCS initialization***************' + tmp_x_ = np.array(crates_list) + tmp_input_x = tmp_crates + for _ii in range(len(tmp_ind)): + tmp_x_[layer_inds[tmp_ind[_ii]]] = tmp_input_x[_ii] + _,tmp_fit,_= evaluate(solver.net, [tmp_x_], 1, accuracy_) + es.set_initFitness(es.popsize*tmp_fit) + print 'fit:{}'.format(tmp_fit) + print '***************NCS initialization***************' + + # evolution loop + while not es.stop(): + x = es.ask() + X = [] + for x_ in x: + tmp_x_ = np.array(crates_list) + for _ii in range(len(tmp_ind)): + tmp_x_[layer_inds[tmp_ind[_ii]]] = x_[_ii] + X.append(tmp_x_) + + X_arrange,fit,has_fit_x = evaluate(solver.net, X, 1, accuracy_) + + X = [] + for x_ in X_arrange: + tmp_x_ = np.array(len(tmp_ind)*[0.]) + for _ii in range(len(tmp_ind)): + tmp_x_[_ii]= x_[layer_inds[tmp_ind[_ii]]] + X.append(tmp_x_) + #print X,fit + es.tell(X, fit) + #es.disp(100) + for _ii in range(len(tmp_ind)): + crates_list[layer_inds[tmp_ind[_ii]]] = es.result()[0][_ii] + for c_i in range(len(crates_list)): + crates[layer_name[c_i]] = crates_list[c_i] + es_cache[itr]={'compression':-es.result()[1], 'crates':crates_list[:]} + _tmp_c = np.array(len(crates_list)*[-1.]) + for t_name in tmp_ind: + _tmp_c[layer_inds[t_name]] = crates[t_name] + apply_prune(solver.net, crates_list) + + ncs_end_t = time.time() + ncs_time += (ncs_end_t - ncs_start_t) + + loop_start_t = time.time() + # adjusting or retraining + solver.step(1) + + loop_end_t = time.time() + if itr < prune_stop_iter: + adjusting_time += (loop_end_t - loop_start_t) + else: + retraining_time += (loop_end_t - loop_start_t) + +# record +import datetime +now = datetime.datetime.now() +time_styled = now.strftime("%Y-%m-%d %H:%M:%S") +out_ = open('record_{}.txt'.format(time_styled), 'w') +for key,value in es_cache.items(): + out_.write("Iteration[{}]:\t{}x\t{}\n".format(key,value['compression'],value['crates'])) +out_.close() +print 'random seed:{}'.format(seed) +end_time = time.time() +#print(ncs_time) +#print(adjusting_time) +#print(retraining_time) +print('NCS time: %.4f mins' % (ncs_time/60.)) +print('adjusting time: %.4f mins' % (adjusting_time/60.)) +print('retraining time: %.4f mins' % (retraining_time/60.)) +print('Total time: %.4f mins' % ((end_time - start_time)/60.)) +#print "Retrieval accuracy @ iteration {}".format(retrieval_tag) +# save final model +#solver.net.save('./models/letnet5/9_letnet5_iter_{}.caffemodel'.format(itr+1)) diff --git a/exp_lenet300100.py b/exp_lenet300100.py new file mode 100755 index 0000000..c2ff6f7 --- /dev/null +++ b/exp_lenet300100.py @@ -0,0 +1,201 @@ +import sys +sys.path.insert(0, './python/') +import caffe +import numpy as np +from lcg_random import lcg_rand +import ncs +from easydict import EasyDict as edict +import time +import pdb + +# model files +proto='./models/lenet300100/lenet_train_test.prototxt' +# based on the network used in DS paper, 97.72 accuracy +#weights='/home/gitProject/Dynamic-Network-Surgery/models/lenet300100/caffe_lenet300100_original.caffemodel' +# based on the network used in IPR, 97.73 accuracy +weights='./models/lenet300100/lenet300100_iter_10000.caffemodel' +solver_path='./models/lenet300100/lenet_solver.prototxt' +es_method='ncs' +# cpu/gpu +caffe.set_mode_gpu() +caffe.set_device(0) +# init solver +solver = caffe.SGDSolver(solver_path) +# basic parameters +# accuracy constraint for pruning +acc_constrain=0.08 +# stop iteration count +#niter = 20501 +niter = 30001 +# stop pruning iteration count +prune_stop_iter = 15000 +# the list of layer names +layer_name = ['ip1','ip2','ip3'] +# the dict of layer names to its arrary indices +layer_inds = {'ip1':0, 'ip2':1, 'ip3':2} +# the dict of crates for each layer +crates = {'ip1':0.001, 'ip2':0.001, 'ip3':0.001} +# the list of the crates +crates_list = [0.001, 0.001, 0.001] +# the gamma for each layer +gamma = {'ip1':0.0002, 'ip2':0.0002, 'ip3':0.0002} +gamma_star = 0.0002 +ncs_stepsize = 50 +# random see for numpy.random +#seed= 981118 # for 112x compression with acc_constrain=0.3 +seed=961449 # for 113.5x compression with acc_constrain=0.08 +#seed= np.random.randint(1000000) +np.random.seed([seed]) +# the dict to store intermedia results +es_cache = {} +#retrieval_tag=[] +r_count=0 +# load the pretrained caffe model +if weights: + solver.net.copy_from(weights) + +# definition of many axuliliary methods +# run the network on its dataset +def test_net(thenet, _start='mnist', _count=1): + ''' + thenet: the object of network + _start: the layer to start from + _count: the number of batches to run + ''' + scores = 0 + for i in range(_count): + thenet.forward(start=_start) + scores += thenet.blobs['accuracy'].data + return scores/_count + +# Set the crates of each layer, the pruning will happen in the next forward action +def apply_prune(thenet, _crates): + ''' + thenet: the model to be pruned + _crates: the list of crates for layers + ''' + for _id in range(len(layer_name)): + if _crates[_id] < 0: + continue + layer_id = layer_name[_id] + mask0 = thenet.params[layer_id][2].data.ravel()[0] + if mask0 == 0: + thenet.params[layer_id][2].data.ravel()[0] = -_crates[_id] + elif mask0 == 1: + thenet.params[layer_id][2].data.ravel()[0] = 1+_crates[_id] + else: + pdb.set_trace() + +# calcuate the sparsity of a network model +def get_sparsity(thenet): + ''' + thenet: the network for checking + ''' + remain = 0 + total = 0 + for layer_id in layer_name: + remain += len(np.where(thenet.params[layer_id][2].data != 0)[0]) + remain += len(np.where(thenet.params[layer_id][3].data != 0)[0]) + total += thenet.params[layer_id][0].data.size + total += thenet.params[layer_id][1].data.size + #return total*1./(100.*remain) + return remain*1./total + +# evaluate the accuracy of a network with a set of crates respect to a original accuracy +def evaluate(thenet, x_set, batchcount=1, accuracy_ontrain=0.9988): + fitness=[] + X=[] + for x in x_set: + x_fit = 1.1 + apply_prune(thenet,x) + acc = test_net(thenet, _start='ip1', _count=batchcount) + if acc >= accuracy_ontrain - acc_constrain: + x_fit = get_sparsity(thenet) + fitness.append(x_fit) + X.append(x) + return (X, fitness) +#------mian-------------- +start_time = time.time() + +solver.step(1) +# Adaptive dynamic surgery +for itr in range(niter): + #r = np.random.rand() + #if itr%500==0 and solver.test_nets[0].blobs['accuracy'].data >= 0.9909: + # retrieval_tag.append(itr) + tmp_crates=[] + tmp_ind = [] + for ii in layer_name: + #tmp_crates.append(crates[ii]*(np.power(1+gamma[ii]*itr, -1)>np.random.rand())) + tmp_tag = np.power(1+gamma[ii]*itr, -1)>np.random.rand() + if tmp_tag: + tmp_ind.append(ii) + tmp_crates.append(tmp_tag*crates[ii]) + if itr < 2000 and itr%10000 == 0: + ncs_stepsize = ncs_stepsize/10. + if itr%500 == 0: + print "Compression:{}, Accuracy:{}".format(1./get_sparsity(solver.net), test_net(solver.net, _count=1, _start="ip1")) + if len(tmp_ind)>0 and itr < prune_stop_iter:# run at window @6 + _tmp_c = np.array(len(crates_list)*[-1.]) + for t_name in tmp_ind: + _tmp_c[layer_inds[t_name]] = crates[t_name] + apply_prune(solver.net, _tmp_c) + #if len(tmp_ind)>1 and itr < prune_stop_iter: + if itr%1000==0 and len(tmp_ind)>1 and itr < prune_stop_iter:# run at window @3 + accuracy_ = test_net(solver.net, _count=1, _start="ip1") + es = {} + if es_method == 'ncs': + __C = edict() + __C.parameters = {'reset_xl_to_pop':False,'init_value':tmp_crates, 'stepsize':ncs_stepsize, 'bounds':[0.0, 10.], 'ftarget':0, 'tmax':1600, 'popsize':10, 'best_k':1} + es = ncs.NCS(__C.parameters) + print '***************NCS initialization***************' + tmp_x_ = np.array(crates_list) + tmp_input_x = tmp_crates + for _ii in range(len(tmp_ind)): + tmp_x_[layer_inds[tmp_ind[_ii]]] = tmp_input_x[_ii] + _,tmp_fit = evaluate(solver.net, [tmp_x_], 1, accuracy_) + es.set_initFitness(es.popsize*tmp_fit) + print 'fit:{}'.format(tmp_fit) + print '***************NCS initialization***************' + while not es.stop(): + x = es.ask() + X = [] + for x_ in x: + tmp_x_ = np.array(crates_list) + for _ii in range(len(tmp_ind)): + tmp_x_[layer_inds[tmp_ind[_ii]]] = x_[_ii] + X.append(tmp_x_) + + X_arrange,fit = evaluate(solver.net, X, 1, accuracy_) + + X = [] + for x_ in X_arrange: + tmp_x_ = np.array(len(tmp_ind)*[0.]) + for _ii in range(len(tmp_ind)): + tmp_x_[_ii]= x_[layer_inds[tmp_ind[_ii]]] + X.append(tmp_x_) + #print X,fit + es.tell(X, fit) + #es.disp(100) + for _ii in range(len(tmp_ind)): + crates_list[layer_inds[tmp_ind[_ii]]] = es.result()[0][_ii] + for c_i in range(len(crates_list)): + crates[layer_name[c_i]] = crates_list[c_i] + es_cache[itr]={'compression':-es.result()[1], 'crates':crates_list[:]} + _tmp_c = np.array(len(crates_list)*[-1.]) + for t_name in tmp_ind: + _tmp_c[layer_inds[t_name]] = crates[t_name] + apply_prune(solver.net, crates_list) + solver.step(1) + +end_time = time.time() +# record +import datetime +now = datetime.datetime.now() +time_styled = now.strftime("%Y-%m-%d %H:%M:%S") +out_ = open('record_{}.txt'.format(time_styled), 'w') +for key,value in es_cache.items(): + out_.write("Iteration[{}]:\t{}x\t{}\n".format(key,value['compression'],value['crates'])) +out_.close() +print 'random seed:{}'.format(seed) +print "Time:%.4f" % ((end_time - start_time)/60.) diff --git a/exp_lenet5.py b/exp_lenet5.py new file mode 100755 index 0000000..870a8bc --- /dev/null +++ b/exp_lenet5.py @@ -0,0 +1,197 @@ +import sys +sys.path.insert(0, './python/') +import caffe +import numpy as np +from lcg_random import lcg_rand +import ncs +from easydict import EasyDict as edict + +# model files +proto='./models/lenet5/lenet_train_test.prototxt' +weights='./models/lenet5/caffe_lenet5_original.caffemodel' +solver_path='./models/lenet5/lenet_solver.prototxt' +es_method='ncs' +# cpu/gpu +caffe.set_mode_gpu() +caffe.set_device(0) +# init solver +solver = caffe.SGDSolver(solver_path) +# basic parameters +# accuracy constraint for pruning +acc_constrain=0.05 +# stop iteration count +#niter = 20501 +niter = 30001 +# stop pruning iteration count +prune_stop_iter = 15000 +# the list of layer names +layer_name = ['conv1','conv2','ip1', 'ip2'] +# the dict of layer names to its arrary indices +layer_inds = {'conv1':0, 'conv2':1, 'ip1':2, 'ip2':3} +# the dict of crates for each layer +#crates = {'conv1':1.95, 'conv2':3.35, 'ip1':3.7, 'ip2':2.8} +#crates = {'conv1':-1.54, 'conv2':-1.40, 'ip1':-1.63, 'ip2':-1.37} +crates = {'conv1':0.001, 'conv2':0.001, 'ip1':0.001, 'ip2':0.001} +# the list of the crates +#crates_list = [1.95, 3.35, 3.7, 2.8] +#crates_list = [-1.54, -1.40, -1.63, -1.37] +crates_list = [0.001, 0.001, 0.001, 0.001] +# the gamma for each layer +gamma = {'conv1':0.00002, 'conv2':0.00002, 'ip1':0.0002, 'ip2':0.0002} +gamma_star = 0.0002 +ncs_stepsize = 50 +# random see for numpy.random +seed=np.random.randint(1000000) +seed=981118#seed 93306,124x,0.04;78011, 127x, 0.05,430000,150x, 515769,185x +np.random.seed([seed]) +# the dict to store intermedia results +es_cache = {} +#retrieval_tag=[] +r_count=0 +# load the pretrained caffe model +if weights: + solver.net.copy_from(weights) + +# definition of many axuliliary methods +# run the network on its dataset +def test_net(thenet, _start='mnist', _count=1): + ''' + thenet: the object of network + _start: the layer to start from + _count: the number of batches to run + ''' + scores = 0 + for i in range(_count): + thenet.forward(start=_start) + scores += thenet.blobs['accuracy'].data + return scores/_count + +# Set the crates of each layer, the pruning will happen in the next forward action +def apply_prune(thenet, _crates): + ''' + thenet: the model to be pruned + _crates: the list of crates for layers + ''' + for _id in range(len(layer_name)): + if _crates[_id] < 0: + continue + layer_id = layer_name[_id] + mask0 = thenet.params[layer_id][2].data.ravel()[0] + if mask0 == 0: + thenet.params[layer_id][2].data.ravel()[0] = -_crates[_id] + elif mask0 == 1: + thenet.params[layer_id][2].data.ravel()[0] = 1+_crates[_id] + else: + pdb.set_trace() + +# calcuate the sparsity of a network model +def get_sparsity(thenet): + ''' + thenet: the network for checking + ''' + remain = 0 + total = 0 + for layer_id in layer_name: + remain += len(np.where(thenet.params[layer_id][2].data != 0)[0]) + remain += len(np.where(thenet.params[layer_id][3].data != 0)[0]) + total += thenet.params[layer_id][0].data.size + total += thenet.params[layer_id][1].data.size + #return total*1./(100.*remain) + return remain*1./total + +# evaluate the accuracy of a network with a set of crates respect to a original accuracy +def evaluate(thenet, x_set, batchcount=1, accuracy_ontrain=0.9988): + fitness=[] + X=[] + for x in x_set: + x_fit = 1.1 + apply_prune(thenet,x) + acc = test_net(thenet, _start='conv1', _count=batchcount) + if acc >= accuracy_ontrain - acc_constrain: + x_fit = get_sparsity(thenet) + fitness.append(x_fit) + X.append(x) + return (X, fitness) +#------mian-------------- +solver.step(1) +# Adaptive dynamic surgery +for itr in range(niter): + #r = np.random.rand() + #if itr%500==0 and solver.test_nets[0].blobs['accuracy'].data >= 0.9909: + # retrieval_tag.append(itr) + tmp_crates=[] + tmp_ind = [] + for ii in layer_name: + #tmp_crates.append(crates[ii]*(np.power(1+gamma[ii]*itr, -1)>np.random.rand())) + tmp_tag = np.power(1+gamma[ii]*itr, -1)>np.random.rand() + if tmp_tag: + tmp_ind.append(ii) + tmp_crates.append(tmp_tag*crates[ii]) + if itr < 20000 and itr%10000 == 0: + ncs_stepsize = ncs_stepsize/10. + if itr%500 == 0: + print "Compression:{}, Accuracy:{}".format(1./get_sparsity(solver.net), test_net(solver.net, _count=1, _start="conv1")) + if len(tmp_ind)>0 and itr < prune_stop_iter:# run at window @6 + _tmp_c = np.array(len(crates_list)*[-1.]) + for t_name in tmp_ind: + _tmp_c[layer_inds[t_name]] = crates[t_name] + apply_prune(solver.net, _tmp_c) + #if len(tmp_ind)>1 and itr < prune_stop_iter: + if itr%1000==0 and len(tmp_ind)>1 and itr < prune_stop_iter:# run at window @3 + accuracy_ = test_net(solver.net, _count=1, _start="conv1") + es = {} + if es_method == 'ncs': + __C = edict() + __C.parameters = {'reset_xl_to_pop':False,'init_value':tmp_crates, 'stepsize':ncs_stepsize, 'bounds':[0.0, 20.], 'ftarget':0, 'tmax':1600, 'popsize':10, 'best_k':1} + es = ncs.NCS(__C.parameters) + print '***************NCS initialization***************' + tmp_x_ = np.array(crates_list) + tmp_input_x = tmp_crates + for _ii in range(len(tmp_ind)): + tmp_x_[layer_inds[tmp_ind[_ii]]] = tmp_input_x[_ii] + _,tmp_fit = evaluate(solver.net, [tmp_x_], 1, accuracy_) + es.set_initFitness(es.popsize*tmp_fit) + print 'fit:{}'.format(tmp_fit) + print '***************NCS initialization***************' + while not es.stop(): + x = es.ask() + X = [] + for x_ in x: + tmp_x_ = np.array(crates_list) + for _ii in range(len(tmp_ind)): + tmp_x_[layer_inds[tmp_ind[_ii]]] = x_[_ii] + X.append(tmp_x_) + + X_arrange,fit = evaluate(solver.net, X, 1, accuracy_) + + X = [] + for x_ in X_arrange: + tmp_x_ = np.array(len(tmp_ind)*[0.]) + for _ii in range(len(tmp_ind)): + tmp_x_[_ii]= x_[layer_inds[tmp_ind[_ii]]] + X.append(tmp_x_) + #print X,fit + es.tell(X, fit) + #es.disp(100) + for _ii in range(len(tmp_ind)): + crates_list[layer_inds[tmp_ind[_ii]]] = es.result()[0][_ii] + for c_i in range(len(crates_list)): + crates[layer_name[c_i]] = crates_list[c_i] + es_cache[itr]={'compression':-es.result()[1], 'crates':crates_list[:]} + _tmp_c = np.array(len(crates_list)*[-1.]) + for t_name in tmp_ind: + _tmp_c[layer_inds[t_name]] = crates[t_name] + apply_prune(solver.net, crates_list) + solver.step(1) +# record +import datetime +now = datetime.datetime.now() +time_styled = now.strftime("%Y-%m-%d %H:%M:%S") +out_ = open('record_{}.txt'.format(time_styled), 'w') +for key,value in es_cache.items(): + out_.write("Iteration[{}]:\t{}x\t{}\n".format(key,value['compression'],value['crates'])) +out_.close() +print 'random seed:{}'.format(seed) +#print "Retrieval accuracy @ iteration {}".format(retrieval_tag) +# save final model +#solver.net.save('./models/letnet5/9_letnet5_iter_{}.caffemodel'.format(itr+1)) diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp new file mode 100755 index 0000000..9b813e7 --- /dev/null +++ b/include/caffe/blob.hpp @@ -0,0 +1,280 @@ +#ifndef CAFFE_BLOB_HPP_ +#define CAFFE_BLOB_HPP_ + +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" + +const int kMaxBlobAxes = 32; + +namespace caffe { + +/** + * @brief A wrapper around SyncedMemory holders serving as the basic + * computational unit through which Layer%s, Net%s, and Solver%s + * interact. + * + * TODO(dox): more thorough description. + */ +template +class Blob { + public: + Blob() + : data_(), diff_(), count_(0), capacity_(0) {} + + /// @brief Deprecated; use Blob(const vector& shape). + explicit Blob(const int num, const int channels, const int height, + const int width); + explicit Blob(const vector& shape); + + /// @brief Deprecated; use Reshape(const vector& shape). + void Reshape(const int num, const int channels, const int height, + const int width); + /** + * @brief Change the dimensions of the blob, allocating new memory if + * necessary. + * + * This function can be called both to create an initial allocation + * of memory, and to adjust the dimensions of a top blob during Layer::Reshape + * or Layer::Forward. When changing the size of blob, memory will only be + * reallocated if sufficient memory does not already exist, and excess memory + * will never be freed. + * + * Note that reshaping an input blob and immediately calling Net::Backward is + * an error; either Net::Forward or Net::Reshape need to be called to + * propagate the new input shape to higher layers. + */ + void Reshape(const vector& shape); + void Reshape(const BlobShape& shape); + void ReshapeLike(const Blob& other); + inline string shape_string() const { + ostringstream stream; + for (int i = 0; i < shape_.size(); ++i) { + stream << shape_[i] << " "; + } + stream << "(" << count_ << ")"; + return stream.str(); + } + inline const vector& shape() const { return shape_; } + /** + * @brief Returns the dimension of the index-th axis (or the negative index-th + * axis from the end, if index is negative). + * + * @param index the axis index, which may be negative as it will be + * "canonicalized" using CanonicalAxisIndex. + * Dies on out of range index. + */ + inline int shape(int index) const { + return shape_[CanonicalAxisIndex(index)]; + } + inline int num_axes() const { return shape_.size(); } + inline int count() const { return count_; } + + /** + * @brief Compute the volume of a slice; i.e., the product of dimensions + * among a range of axes. + * + * @param start_axis The first axis to include in the slice. + * + * @param end_axis The first axis to exclude from the slice. + */ + inline int count(int start_axis, int end_axis) const { + CHECK_LE(start_axis, end_axis); + CHECK_GE(start_axis, 0); + CHECK_GE(end_axis, 0); + CHECK_LE(start_axis, num_axes()); + CHECK_LE(end_axis, num_axes()); + int count = 1; + for (int i = start_axis; i < end_axis; ++i) { + count *= shape(i); + } + return count; + } + /** + * @brief Compute the volume of a slice spanning from a particular first + * axis to the final axis. + * + * @param start_axis The first axis to include in the slice. + */ + inline int count(int start_axis) const { + return count(start_axis, num_axes()); + } + + /** + * @brief Returns the 'canonical' version of a (usually) user-specified axis, + * allowing for negative indexing (e.g., -1 for the last axis). + * + * @param index the axis index. + * If 0 <= index < num_axes(), return index. + * If -num_axes <= index <= -1, return (num_axes() - (-index)), + * e.g., the last axis index (num_axes() - 1) if index == -1, + * the second to last if index == -2, etc. + * Dies on out of range index. + */ + inline int CanonicalAxisIndex(int axis_index) const { + CHECK_GE(axis_index, -num_axes()) + << "axis " << axis_index << " out of range for " << num_axes() + << "-D Blob with shape " << shape_string(); + CHECK_LT(axis_index, num_axes()) + << "axis " << axis_index << " out of range for " << num_axes() + << "-D Blob with shape " << shape_string(); + if (axis_index < 0) { + return axis_index + num_axes(); + } + return axis_index; + } + + /// @brief Deprecated legacy shape accessor num: use shape(0) instead. + inline int num() const { return LegacyShape(0); } + /// @brief Deprecated legacy shape accessor channels: use shape(1) instead. + inline int channels() const { return LegacyShape(1); } + /// @brief Deprecated legacy shape accessor height: use shape(2) instead. + inline int height() const { return LegacyShape(2); } + /// @brief Deprecated legacy shape accessor width: use shape(3) instead. + inline int width() const { return LegacyShape(3); } + inline int LegacyShape(int index) const { + CHECK_LE(num_axes(), 4) + << "Cannot use legacy accessors on Blobs with > 4 axes."; + CHECK_LT(index, 4); + CHECK_GE(index, -4); + if (index >= num_axes() || index < -num_axes()) { + // Axis is out of range, but still in [0, 3] (or [-4, -1] for reverse + // indexing) -- this special case simulates the one-padding used to fill + // extraneous axes of legacy blobs. + return 1; + } + return shape(index); + } + + inline int offset(const int n, const int c = 0, const int h = 0, + const int w = 0) const { + CHECK_GE(n, 0); + CHECK_LE(n, num()); + CHECK_GE(channels(), 0); + CHECK_LE(c, channels()); + CHECK_GE(height(), 0); + CHECK_LE(h, height()); + CHECK_GE(width(), 0); + CHECK_LE(w, width()); + return ((n * channels() + c) * height() + h) * width() + w; + } + + inline int offset(const vector& indices) const { + CHECK_LE(indices.size(), num_axes()); + int offset = 0; + for (int i = 0; i < num_axes(); ++i) { + offset *= shape(i); + if (indices.size() > i) { + CHECK_GE(indices[i], 0); + CHECK_LT(indices[i], shape(i)); + offset += indices[i]; + } + } + return offset; + } + /** + * @brief Copy from a source Blob. + * + * @param source the Blob to copy from + * @param copy_diff if false, copy the data; if true, copy the diff + * @param reshape if false, require this Blob to be pre-shaped to the shape + * of other (and die otherwise); if true, Reshape this Blob to other's + * shape if necessary + */ + void CopyFrom(const Blob& source, bool copy_diff = false, + bool reshape = false); + + inline Dtype data_at(const int n, const int c, const int h, + const int w) const { + return cpu_data()[offset(n, c, h, w)]; + } + + inline Dtype diff_at(const int n, const int c, const int h, + const int w) const { + return cpu_diff()[offset(n, c, h, w)]; + } + + inline Dtype data_at(const vector& index) const { + return cpu_data()[offset(index)]; + } + + inline Dtype diff_at(const vector& index) const { + return cpu_diff()[offset(index)]; + } + + inline const shared_ptr& data() const { + CHECK(data_); + return data_; + } + + inline const shared_ptr& diff() const { + CHECK(diff_); + return diff_; + } + + const Dtype* cpu_data() const; + void set_cpu_data(Dtype* data); + const Dtype* gpu_data() const; + const Dtype* cpu_diff() const; + const Dtype* gpu_diff() const; + Dtype* mutable_cpu_data(); + Dtype* mutable_gpu_data(); + Dtype* mutable_cpu_diff(); + Dtype* mutable_gpu_diff(); + void Update(); + void FromProto(const BlobProto& proto, bool reshape = true); + void ToProto(BlobProto* proto, bool write_diff = false) const; + + /// @brief Compute the sum of absolute values (L1 norm) of the data. + Dtype asum_data() const; + /// @brief Compute the sum of absolute values (L1 norm) of the diff. + Dtype asum_diff() const; + /// @brief Compute the sum of squares (L2 norm squared) of the data. + Dtype sumsq_data() const; + /// @brief Compute the sum of squares (L2 norm squared) of the diff. + Dtype sumsq_diff() const; + + /// @brief Scale the blob data by a constant factor. + void scale_data(Dtype scale_factor); + /// @brief Scale the blob diff by a constant factor. + void scale_diff(Dtype scale_factor); + + /** + * @brief Set the data_ shared_ptr to point to the SyncedMemory holding the + * data_ of Blob other -- useful in Layer%s which simply perform a copy + * in their Forward pass. + * + * This deallocates the SyncedMemory holding this Blob's data_, as + * shared_ptr calls its destructor when reset with the "=" operator. + */ + void ShareData(const Blob& other); + /** + * @brief Set the diff_ shared_ptr to point to the SyncedMemory holding the + * diff_ of Blob other -- useful in Layer%s which simply perform a copy + * in their Forward pass. + * + * This deallocates the SyncedMemory holding this Blob's diff_, as + * shared_ptr calls its destructor when reset with the "=" operator. + */ + void ShareDiff(const Blob& other); + + bool ShapeEquals(const BlobProto& other); + + protected: + shared_ptr data_; + shared_ptr diff_; + vector shape_; + int count_; + int capacity_; + + DISABLE_COPY_AND_ASSIGN(Blob); +}; // class Blob + +} // namespace caffe + +#endif // CAFFE_BLOB_HPP_ diff --git a/include/caffe/caffe.hpp b/include/caffe/caffe.hpp new file mode 100755 index 0000000..68a5e1d --- /dev/null +++ b/include/caffe/caffe.hpp @@ -0,0 +1,20 @@ +// caffe.hpp is the header file that you need to include in your code. It wraps +// all the internal caffe header files into one for simpler inclusion. + +#ifndef CAFFE_CAFFE_HPP_ +#define CAFFE_CAFFE_HPP_ + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/layer_factory.hpp" +#include "caffe/net.hpp" +#include "caffe/parallel.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/solver.hpp" +#include "caffe/util/benchmark.hpp" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" + +#endif // CAFFE_CAFFE_HPP_ diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp new file mode 100755 index 0000000..1df6b9a --- /dev/null +++ b/include/caffe/common.hpp @@ -0,0 +1,178 @@ +#ifndef CAFFE_COMMON_HPP_ +#define CAFFE_COMMON_HPP_ + +#include +#include +#include + +#include +#include +#include // NOLINT(readability/streams) +#include // NOLINT(readability/streams) +#include +#include +#include +#include +#include // pair +#include + +#include "caffe/util/device_alternate.hpp" + +// gflags 2.1 issue: namespace google was changed to gflags without warning. +// Luckily we will be able to use GFLAGS_GFLAGS_H_ to detect if it is version +// 2.1. If yes, we will add a temporary solution to redirect the namespace. +// TODO(Yangqing): Once gflags solves the problem in a more elegant way, let's +// remove the following hack. +#ifndef GFLAGS_GFLAGS_H_ +namespace gflags = google; +#endif // GFLAGS_GFLAGS_H_ + +// Disable the copy and assignment operator for a class. +#define DISABLE_COPY_AND_ASSIGN(classname) \ +private:\ + classname(const classname&);\ + classname& operator=(const classname&) + +// Instantiate a class with float and double specifications. +#define INSTANTIATE_CLASS(classname) \ + char gInstantiationGuard##classname; \ + template class classname; \ + template class classname + +#define INSTANTIATE_LAYER_GPU_FORWARD(classname) \ + template void classname::Forward_gpu( \ + const std::vector*>& bottom, \ + const std::vector*>& top); \ + template void classname::Forward_gpu( \ + const std::vector*>& bottom, \ + const std::vector*>& top); + +#define INSTANTIATE_LAYER_GPU_BACKWARD(classname) \ + template void classname::Backward_gpu( \ + const std::vector*>& top, \ + const std::vector& propagate_down, \ + const std::vector*>& bottom); \ + template void classname::Backward_gpu( \ + const std::vector*>& top, \ + const std::vector& propagate_down, \ + const std::vector*>& bottom) + +#define INSTANTIATE_LAYER_GPU_FUNCS(classname) \ + INSTANTIATE_LAYER_GPU_FORWARD(classname); \ + INSTANTIATE_LAYER_GPU_BACKWARD(classname) + +// A simple macro to mark codes that are not implemented, so that when the code +// is executed we will see a fatal log. +#define NOT_IMPLEMENTED LOG(FATAL) << "Not Implemented Yet" + +// See PR #1236 +namespace cv { class Mat; } + +namespace caffe { + +// We will use the boost shared_ptr instead of the new C++11 one mainly +// because cuda does not work (at least now) well with C++11 features. +using boost::shared_ptr; + +// Common functions and classes from std that caffe often uses. +using std::fstream; +using std::ios; +using std::isnan; +using std::isinf; +using std::iterator; +using std::make_pair; +using std::map; +using std::ostringstream; +using std::pair; +using std::set; +using std::string; +using std::stringstream; +using std::vector; + +// A global initialization function that you should call in your main function. +// Currently it initializes google flags and google logging. +void GlobalInit(int* pargc, char*** pargv); + +// A singleton class to hold common caffe stuff, such as the handler that +// caffe is going to use for cublas, curand, etc. +class Caffe { + public: + ~Caffe(); + + // Thread local context for Caffe. Moved to common.cpp instead of + // including boost/thread.hpp to avoid a boost/NVCC issues (#1009, #1010) + // on OSX. Also fails on Linux with CUDA 7.0.18. + static Caffe& Get(); + + enum Brew { CPU, GPU }; + + // This random number generator facade hides boost and CUDA rng + // implementation from one another (for cross-platform compatibility). + class RNG { + public: + RNG(); + explicit RNG(unsigned int seed); + explicit RNG(const RNG&); + RNG& operator=(const RNG&); + void* generator(); + private: + class Generator; + shared_ptr generator_; + }; + + // Getters for boost rng, curand, and cublas handles + inline static RNG& rng_stream() { + if (!Get().random_generator_) { + Get().random_generator_.reset(new RNG()); + } + return *(Get().random_generator_); + } +#ifndef CPU_ONLY + inline static cublasHandle_t cublas_handle() { return Get().cublas_handle_; } + inline static curandGenerator_t curand_generator() { + return Get().curand_generator_; + } +#endif + + // Returns the mode: running on CPU or GPU. + inline static Brew mode() { return Get().mode_; } + // The setters for the variables + // Sets the mode. It is recommended that you don't change the mode halfway + // into the program since that may cause allocation of pinned memory being + // freed in a non-pinned way, which may cause problems - I haven't verified + // it personally but better to note it here in the header file. + inline static void set_mode(Brew mode) { Get().mode_ = mode; } + // Sets the random seed of both boost and curand + static void set_random_seed(const unsigned int seed); + // Sets the device. Since we have cublas and curand stuff, set device also + // requires us to reset those values. + static void SetDevice(const int device_id); + // Prints the current GPU status. + static void DeviceQuery(); + // Parallel training info + inline static int solver_count() { return Get().solver_count_; } + inline static void set_solver_count(int val) { Get().solver_count_ = val; } + inline static bool root_solver() { return Get().root_solver_; } + inline static void set_root_solver(bool val) { Get().root_solver_ = val; } + + protected: +#ifndef CPU_ONLY + cublasHandle_t cublas_handle_; + curandGenerator_t curand_generator_; +#endif + shared_ptr random_generator_; + + Brew mode_; + int solver_count_; + bool root_solver_; + + private: + // The private constructor to avoid duplicate instantiation. + Caffe(); + + DISABLE_COPY_AND_ASSIGN(Caffe); +}; + +} // namespace caffe + +#endif // CAFFE_COMMON_HPP_ diff --git a/include/caffe/common_layers.hpp b/include/caffe/common_layers.hpp new file mode 100755 index 0000000..499efd5 --- /dev/null +++ b/include/caffe/common_layers.hpp @@ -0,0 +1,660 @@ +#ifndef CAFFE_COMMON_LAYERS_HPP_ +#define CAFFE_COMMON_LAYERS_HPP_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/data_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/neuron_layers.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Compute the index of the @f$ K @f$ max values for each datum across + * all dimensions @f$ (C \times H \times W) @f$. + * + * Intended for use after a classification layer to produce a prediction. + * If parameter out_max_val is set to true, output is a vector of pairs + * (max_ind, max_val) for each image. + * + * NOTE: does not implement Backwards operation. + */ +template +class ArgMaxLayer : public Layer { + public: + /** + * @param param provides ArgMaxParameter argmax_param, + * with ArgMaxLayer options: + * - top_k (\b optional uint, default 1). + * the number @f$ K @f$ of maximal items to output. + * - out_max_val (\b optional bool, default false). + * if set, output a vector of pairs (max_ind, max_val) for each image. + */ + explicit ArgMaxLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "ArgMax"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times 1 \times K \times 1) @f$ or, if out_max_val + * @f$ (N \times 2 \times K \times 1) @f$ + * the computed outputs @f$ + * y_n = \arg\max\limits_i x_{ni} + * @f$ (for @f$ K = 1 @f$). + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + /// @brief Not implemented (non-differentiable function) + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + NOT_IMPLEMENTED; + } + bool out_max_val_; + size_t top_k_; +}; + +/** + * @brief Takes at least two Blob%s and concatenates them along either the num + * or channel dimension, outputting the result. + */ +template +class ConcatLayer : public Layer { + public: + explicit ConcatLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Concat"; } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + /** + * @param bottom input Blob vector (length 2+) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x_1 @f$ + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x_2 @f$ + * -# ... + * - K @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x_K @f$ + * @param top output Blob vector (length 1) + * -# @f$ (KN \times C \times H \times W) @f$ if axis == 0, or + * @f$ (N \times KC \times H \times W) @f$ if axis == 1: + * the concatenated output @f$ + * y = [\begin{array}{cccc} x_1 & x_2 & ... & x_K \end{array}] + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the concatenate inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (KN \times C \times H \times W) @f$ if axis == 0, or + * @f$ (N \times KC \times H \times W) @f$ if axis == 1: + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to concatenated outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length K), into which the top gradient + * @f$ \frac{\partial E}{\partial y} @f$ is deconcatenated back to the + * inputs @f$ + * \left[ \begin{array}{cccc} + * \frac{\partial E}{\partial x_1} & + * \frac{\partial E}{\partial x_2} & + * ... & + * \frac{\partial E}{\partial x_K} + * \end{array} \right] = + * \frac{\partial E}{\partial y} + * @f$ + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int count_; + int num_concats_; + int concat_input_size_; + int concat_axis_; +}; + +/** + * @brief Compute elementwise operations, such as product and sum, + * along multiple input Blobs. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class EltwiseLayer : public Layer { + public: + explicit EltwiseLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Eltwise"; } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + EltwiseParameter_EltwiseOp op_; + vector coeffs_; + Blob max_idx_; + + bool stable_prod_grad_; +}; + +/** + * @brief Takes two+ Blobs, interprets last Blob as a selector and + * filter remaining Blobs accordingly with selector data (0 means that + * the corresponding item has to be filtered, non-zero means that corresponding + * item needs to stay). + */ +template +class FilterLayer : public Layer { + public: + explicit FilterLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Filter"; } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + /** + * @param bottom input Blob vector (length 2+) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs to be filtered @f$ x_1 @f$ + * -# ... + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs to be filtered @f$ x_K @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the selector blob + * @param top output Blob vector (length 1+) + * -# @f$ (S \times C \times H \times W) @f$ () + * the filtered output @f$ x_1 @f$ + * where S is the number of items + * that haven't been filtered + * @f$ (S \times C \times H \times W) @f$ + * the filtered output @f$ x_K @f$ + * where S is the number of items + * that haven't been filtered + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the forwarded inputs. + * + * @param top output Blob vector (length 1+), providing the error gradient with + * respect to the outputs + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2+), into which the top error + * gradient is copied + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool first_reshape_; + vector indices_to_forward_; +}; + +/** + * @brief Reshapes the input Blob into flat vectors. + * + * Note: because this layer does not change the input values -- merely the + * dimensions -- it can simply copy the input. The copy happens "virtually" + * (thus taking effectively 0 real time) by setting, in Forward, the data + * pointer of the top Blob to that of the bottom Blob (see Blob::ShareData), + * and in Backward, the diff pointer of the bottom Blob to that of the top Blob + * (see Blob::ShareDiff). + */ +template +class FlattenLayer : public Layer { + public: + explicit FlattenLayer(const LayerParameter& param) + : Layer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Flatten"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + /** + * @param bottom input Blob vector (length 2+) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs + * @param top output Blob vector (length 1) + * -# @f$ (N \times CHW \times 1 \times 1) @f$ + * the outputs -- i.e., the (virtually) copied, flattened inputs + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the concatenate inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length K), into which the top error + * gradient is (virtually) copied + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +/** + * @brief Also known as a "fully-connected" layer, computes an inner product + * with a set of learned weights, and (optionally) adds biases. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class InnerProductLayer : public Layer { + public: + explicit InnerProductLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "InnerProduct"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int M_; + int K_; + int N_; + bool bias_term_; + Blob bias_multiplier_; +}; + + +/** + * @brief The compressed InnerProduct layer, also known as a compressed + * "fully-connected" layer + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class CInnerProductLayer : public Layer { + public: + explicit CInnerProductLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "CInnerProduct"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int M_; + int K_; + int N_; + bool bias_term_; + Blob bias_multiplier_; + + private: + Blob weight_tmp_; + Blob bias_tmp_; + Blob rand_weight_m_; + Blob rand_bias_m_; + Dtype gamma,power; + Dtype crate; + Dtype mu,std; + int iter_stop_; +}; + + +/** + * @brief Normalizes the input to have 0-mean and/or unit (1) variance. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class MVNLayer : public Layer { + public: + explicit MVNLayer(const LayerParameter& param) + : Layer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "MVN"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob mean_, variance_, temp_; + + /// sum_multiplier is used to carry out sum using BLAS + Blob sum_multiplier_; + Dtype eps_; +}; + +/* + * @brief Reshapes the input Blob into an arbitrary-sized output Blob. + * + * Note: similarly to FlattenLayer, this layer does not change the input values + * (see FlattenLayer, Blob::ShareData and Blob::ShareDiff). + */ +template +class ReshapeLayer : public Layer { + public: + explicit ReshapeLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Reshape"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top) {} + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + + /// @brief vector of axes indices whose dimensions we'll copy from the bottom + vector copy_axes_; + /// @brief the index of the axis whose dimension we infer, or -1 if none + int inferred_axis_; + /// @brief the product of the "constant" output dimensions + int constant_count_; +}; + +/** + * @brief Compute "reductions" -- operations that return a scalar output Blob + * for an input Blob of arbitrary size, such as the sum, absolute sum, + * and sum of squares. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class ReductionLayer : public Layer { + public: + explicit ReductionLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Reduction"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + /// @brief the reduction operation performed by the layer + ReductionParameter_ReductionOp op_; + /// @brief a scalar coefficient applied to all outputs + Dtype coeff_; + /// @brief the index of the first input axis to reduce + int axis_; + /// @brief the number of reductions performed + int num_; + /// @brief the input size of each reduction + int dim_; + /// @brief a helper Blob used for summation (op_ == SUM) + Blob sum_multiplier_; +}; + +/** + * @brief Ignores bottom blobs while producing no top blobs. (This is useful + * to suppress outputs during testing.) + */ +template +class SilenceLayer : public Layer { + public: + explicit SilenceLayer(const LayerParameter& param) + : Layer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "Silence"; } + virtual inline int MinBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 0; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top) {} + // We can't define Forward_gpu here, since STUB_GPU will provide + // its own definition for CPU_ONLY mode. + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +/** + * @brief Computes the softmax function. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class SoftmaxLayer : public Layer { + public: + explicit SoftmaxLayer(const LayerParameter& param) + : Layer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Softmax"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int outer_num_; + int inner_num_; + int softmax_axis_; + /// sum_multiplier is used to carry out sum using BLAS + Blob sum_multiplier_; + /// scale is an intermediate Blob to hold temporary results. + Blob scale_; +}; + +#ifdef USE_CUDNN +/** + * @brief cuDNN implementation of SoftmaxLayer. + * Fallback to SoftmaxLayer for CPU mode. + */ +template +class CuDNNSoftmaxLayer : public SoftmaxLayer { + public: + explicit CuDNNSoftmaxLayer(const LayerParameter& param) + : SoftmaxLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNSoftmaxLayer(); + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_; + cudnnTensorDescriptor_t top_desc_; +}; +#endif + +/** + * @brief Creates a "split" path in the network by copying the bottom Blob + * into multiple top Blob%s to be used by multiple consuming layers. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class SplitLayer : public Layer { + public: + explicit SplitLayer(const LayerParameter& param) + : Layer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Split"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int count_; +}; + +/** + * @brief Takes a Blob and slices it along either the num or channel dimension, + * outputting multiple sliced Blob results. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class SliceLayer : public Layer { + public: + explicit SliceLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Slice"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 2; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int count_; + int num_slices_; + int slice_size_; + int slice_axis_; + vector slice_point_; +}; + +} // namespace caffe + +#endif // CAFFE_COMMON_LAYERS_HPP_ diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp new file mode 100755 index 0000000..552d814 --- /dev/null +++ b/include/caffe/data_layers.hpp @@ -0,0 +1,343 @@ +#ifndef CAFFE_DATA_LAYERS_HPP_ +#define CAFFE_DATA_LAYERS_HPP_ + +#include +#include +#include + +#include "hdf5.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/data_reader.hpp" +#include "caffe/data_transformer.hpp" +#include "caffe/filler.hpp" +#include "caffe/internal_thread.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/blocking_queue.hpp" +#include "caffe/util/db.hpp" + +namespace caffe { + +/** + * @brief Provides base for data layers that feed blobs to the Net. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class BaseDataLayer : public Layer { + public: + explicit BaseDataLayer(const LayerParameter& param); + // LayerSetUp: implements common data layer setup functionality, and calls + // DataLayerSetUp to do special data layer setup for individual layer types. + // This method may not be overridden except by the BasePrefetchingDataLayer. + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top) {} + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + + protected: + TransformationParameter transform_param_; + shared_ptr > data_transformer_; + bool output_labels_; +}; + +template +class Batch { + public: + Blob data_, label_; +}; + +template +class BasePrefetchingDataLayer : + public BaseDataLayer, public InternalThread { + public: + explicit BasePrefetchingDataLayer(const LayerParameter& param); + // LayerSetUp: implements common data layer setup functionality, and calls + // DataLayerSetUp to do special data layer setup for individual layer types. + // This method may not be overridden. + void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + // Prefetches batches (asynchronously if to GPU memory) + static const int PREFETCH_COUNT = 3; + + protected: + virtual void InternalThreadEntry(); + virtual void load_batch(Batch* batch) = 0; + + Batch prefetch_[PREFETCH_COUNT]; + BlockingQueue*> prefetch_free_; + BlockingQueue*> prefetch_full_; + + Blob transformed_data_; +}; + +template +class DataLayer : public BasePrefetchingDataLayer { + public: + explicit DataLayer(const LayerParameter& param); + virtual ~DataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + // DataLayer uses DataReader instead for sharing for parallelism + virtual inline bool ShareInParallel() const { return false; } + virtual inline const char* type() const { return "Data"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return 2; } + + protected: + virtual void load_batch(Batch* batch); + + DataReader reader_; +}; + +/** + * @brief Provides data to the Net generated by a Filler. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class DummyDataLayer : public Layer { + public: + explicit DummyDataLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "DummyData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + + vector > > fillers_; + vector refill_; +}; + +/** + * @brief Provides data to the Net from HDF5 files. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class HDF5DataLayer : public Layer { + public: + explicit HDF5DataLayer(const LayerParameter& param) + : Layer(param) {} + virtual ~HDF5DataLayer(); + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "HDF5Data"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void LoadHDF5FileData(const char* filename); + + std::vector hdf_filenames_; + unsigned int num_files_; + unsigned int current_file_; + hsize_t current_row_; + std::vector > > hdf_blobs_; + std::vector data_permutation_; + std::vector file_permutation_; +}; + +/** + * @brief Write blobs to disk as HDF5 files. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class HDF5OutputLayer : public Layer { + public: + explicit HDF5OutputLayer(const LayerParameter& param) + : Layer(param), file_opened_(false) {} + virtual ~HDF5OutputLayer(); + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "HDF5Output"; } + // TODO: no limit on the number of blobs + virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 0; } + + inline std::string file_name() const { return file_name_; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void SaveBlobs(); + + bool file_opened_; + std::string file_name_; + hid_t file_id_; + Blob data_blob_; + Blob label_blob_; +}; + +/** + * @brief Provides data to the Net from image files. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class ImageDataLayer : public BasePrefetchingDataLayer { + public: + explicit ImageDataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param) {} + virtual ~ImageDataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "ImageData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + protected: + shared_ptr prefetch_rng_; + virtual void ShuffleImages(); + virtual void load_batch(Batch* batch); + + vector > lines_; + int lines_id_; +}; + +/** + * @brief Provides data to the Net from memory. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class MemoryDataLayer : public BaseDataLayer { + public: + explicit MemoryDataLayer(const LayerParameter& param) + : BaseDataLayer(param), has_new_data_(false) {} + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "MemoryData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + virtual void AddDatumVector(const vector& datum_vector); + virtual void AddMatVector(const vector& mat_vector, + const vector& labels); + + // Reset should accept const pointers, but can't, because the memory + // will be given to Blob, which is mutable + void Reset(Dtype* data, Dtype* label, int n); + void set_batch_size(int new_size); + + int batch_size() { return batch_size_; } + int channels() { return channels_; } + int height() { return height_; } + int width() { return width_; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + int batch_size_, channels_, height_, width_, size_; + Dtype* data_; + Dtype* labels_; + int n_; + size_t pos_; + Blob added_data_; + Blob added_label_; + bool has_new_data_; +}; + +/** + * @brief Provides data to the Net from windows of images files, specified + * by a window data file. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class WindowDataLayer : public BasePrefetchingDataLayer { + public: + explicit WindowDataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param) {} + virtual ~WindowDataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "WindowData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + protected: + virtual unsigned int PrefetchRand(); + virtual void load_batch(Batch* batch); + + shared_ptr prefetch_rng_; + vector > > image_database_; + enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM }; + vector > fg_windows_; + vector > bg_windows_; + Blob data_mean_; + vector mean_values_; + bool has_mean_file_; + bool has_mean_values_; + bool cache_images_; + vector > image_database_cache_; +}; + +} // namespace caffe + +#endif // CAFFE_DATA_LAYERS_HPP_ diff --git a/include/caffe/data_reader.hpp b/include/caffe/data_reader.hpp new file mode 100755 index 0000000..8ed5542 --- /dev/null +++ b/include/caffe/data_reader.hpp @@ -0,0 +1,82 @@ +#ifndef CAFFE_DATA_READER_HPP_ +#define CAFFE_DATA_READER_HPP_ + +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/internal_thread.hpp" +#include "caffe/util/blocking_queue.hpp" +#include "caffe/util/db.hpp" + +namespace caffe { + +/** + * @brief Reads data from a source to queues available to data layers. + * A single reading thread is created per source, even if multiple solvers + * are running in parallel, e.g. for multi-GPU training. This makes sure + * databases are read sequentially, and that each solver accesses a different + * subset of the database. Data is distributed to solvers in a round-robin + * way to keep parallel training deterministic. + */ +class DataReader { + public: + explicit DataReader(const LayerParameter& param); + ~DataReader(); + + inline BlockingQueue& free() const { + return queue_pair_->free_; + } + inline BlockingQueue& full() const { + return queue_pair_->full_; + } + + protected: + // Queue pairs are shared between a body and its readers + class QueuePair { + public: + explicit QueuePair(int size); + ~QueuePair(); + + BlockingQueue free_; + BlockingQueue full_; + + DISABLE_COPY_AND_ASSIGN(QueuePair); + }; + + // A single body is created per source + class Body : public InternalThread { + public: + explicit Body(const LayerParameter& param); + virtual ~Body(); + + protected: + void InternalThreadEntry(); + void read_one(db::Cursor* cursor, QueuePair* qp); + + const LayerParameter param_; + BlockingQueue > new_queue_pairs_; + + friend class DataReader; + + DISABLE_COPY_AND_ASSIGN(Body); + }; + + // A source is uniquely identified by its layer name + path, in case + // the same database is read from two different locations in the net. + static inline string source_key(const LayerParameter& param) { + return param.name() + ":" + param.data_param().source(); + } + + const shared_ptr queue_pair_; + shared_ptr body_; + + static map > bodies_; + +DISABLE_COPY_AND_ASSIGN(DataReader); +}; + +} // namespace caffe + +#endif // CAFFE_DATA_READER_HPP_ diff --git a/include/caffe/data_transformer.hpp b/include/caffe/data_transformer.hpp new file mode 100755 index 0000000..0ad68c8 --- /dev/null +++ b/include/caffe/data_transformer.hpp @@ -0,0 +1,151 @@ +#ifndef CAFFE_DATA_TRANSFORMER_HPP +#define CAFFE_DATA_TRANSFORMER_HPP + +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Applies common transformations to the input data, such as + * scaling, mirroring, substracting the image mean... + */ +template +class DataTransformer { + public: + explicit DataTransformer(const TransformationParameter& param, Phase phase); + virtual ~DataTransformer() {} + + /** + * @brief Initialize the Random number generations if needed by the + * transformation. + */ + void InitRand(); + + /** + * @brief Applies the transformation defined in the data layer's + * transform_param block to the data. + * + * @param datum + * Datum containing the data to be transformed. + * @param transformed_blob + * This is destination blob. It can be part of top blob's data if + * set_cpu_data() is used. See data_layer.cpp for an example. + */ + void Transform(const Datum& datum, Blob* transformed_blob); + + /** + * @brief Applies the transformation defined in the data layer's + * transform_param block to a vector of Datum. + * + * @param datum_vector + * A vector of Datum containing the data to be transformed. + * @param transformed_blob + * This is destination blob. It can be part of top blob's data if + * set_cpu_data() is used. See memory_layer.cpp for an example. + */ + void Transform(const vector & datum_vector, + Blob* transformed_blob); + + /** + * @brief Applies the transformation defined in the data layer's + * transform_param block to a vector of Mat. + * + * @param mat_vector + * A vector of Mat containing the data to be transformed. + * @param transformed_blob + * This is destination blob. It can be part of top blob's data if + * set_cpu_data() is used. See memory_layer.cpp for an example. + */ + void Transform(const vector & mat_vector, + Blob* transformed_blob); + + /** + * @brief Applies the transformation defined in the data layer's + * transform_param block to a cv::Mat + * + * @param cv_img + * cv::Mat containing the data to be transformed. + * @param transformed_blob + * This is destination blob. It can be part of top blob's data if + * set_cpu_data() is used. See image_data_layer.cpp for an example. + */ + void Transform(const cv::Mat& cv_img, Blob* transformed_blob); + + /** + * @brief Applies the same transformation defined in the data layer's + * transform_param block to all the num images in a input_blob. + * + * @param input_blob + * A Blob containing the data to be transformed. It applies the same + * transformation to all the num images in the blob. + * @param transformed_blob + * This is destination blob, it will contain as many images as the + * input blob. It can be part of top blob's data. + */ + void Transform(Blob* input_blob, Blob* transformed_blob); + + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * + * @param datum + * Datum containing the data to be transformed. + */ + vector InferBlobShape(const Datum& datum); + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * It uses the first element to infer the shape of the blob. + * + * @param datum_vector + * A vector of Datum containing the data to be transformed. + */ + vector InferBlobShape(const vector & datum_vector); + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * It uses the first element to infer the shape of the blob. + * + * @param mat_vector + * A vector of Mat containing the data to be transformed. + */ + vector InferBlobShape(const vector & mat_vector); + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * + * @param cv_img + * cv::Mat containing the data to be transformed. + */ + vector InferBlobShape(const cv::Mat& cv_img); + + protected: + /** + * @brief Generates a random integer from Uniform({0, 1, ..., n-1}). + * + * @param n + * The upperbound (exclusive) value of the random number. + * @return + * A uniformly random integer value from ({0, 1, ..., n-1}). + */ + virtual int Rand(int n); + + void Transform(const Datum& datum, Dtype* transformed_data); + // Tranformation parameters + TransformationParameter param_; + + + shared_ptr rng_; + Phase phase_; + Blob data_mean_; + vector mean_values_; +}; + +} // namespace caffe + +#endif // CAFFE_DATA_TRANSFORMER_HPP_ + diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp new file mode 100755 index 0000000..888f4a4 --- /dev/null +++ b/include/caffe/filler.hpp @@ -0,0 +1,296 @@ +// Fillers are random number generators that fills a blob using the specified +// algorithm. The expectation is that they are only going to be used during +// initialization time and will not involve any GPUs. + +#ifndef CAFFE_FILLER_HPP +#define CAFFE_FILLER_HPP + +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +/// @brief Fills a Blob with constant or randomly-generated data. +template +class Filler { + public: + explicit Filler(const FillerParameter& param) : filler_param_(param) {} + virtual ~Filler() {} + virtual void Fill(Blob* blob) = 0; + protected: + FillerParameter filler_param_; +}; // class Filler + + +/// @brief Fills a Blob with constant values @f$ x = 0 @f$. +template +class ConstantFiller : public Filler { + public: + explicit ConstantFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + Dtype* data = blob->mutable_cpu_data(); + const int count = blob->count(); + const Dtype value = this->filler_param_.value(); + CHECK(count); + for (int i = 0; i < count; ++i) { + data[i] = value; + } + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +/// @brief Fills a Blob with uniformly distributed values @f$ x\sim U(a, b) @f$. +template +class UniformFiller : public Filler { + public: + explicit UniformFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK(blob->count()); + caffe_rng_uniform(blob->count(), Dtype(this->filler_param_.min()), + Dtype(this->filler_param_.max()), blob->mutable_cpu_data()); + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +/// @brief Fills a Blob with Gaussian-distributed values @f$ x = a @f$. +template +class GaussianFiller : public Filler { + public: + explicit GaussianFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + Dtype* data = blob->mutable_cpu_data(); + CHECK(blob->count()); + caffe_rng_gaussian(blob->count(), Dtype(this->filler_param_.mean()), + Dtype(this->filler_param_.std()), blob->mutable_cpu_data()); + int sparse = this->filler_param_.sparse(); + CHECK_GE(sparse, -1); + if (sparse >= 0) { + // Sparse initialization is implemented for "weight" blobs; i.e. matrices. + // These have num == channels == 1; width is number of inputs; height is + // number of outputs. The 'sparse' variable specifies the mean number + // of non-zero input weights for a given output. + CHECK_GE(blob->num_axes(), 1); + const int num_outputs = blob->shape(0); + Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs); + rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int))); + int* mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); + caffe_rng_bernoulli(blob->count(), non_zero_probability, mask); + for (int i = 0; i < blob->count(); ++i) { + data[i] *= mask[i]; + } + } + } + + protected: + shared_ptr rand_vec_; +}; + +/** @brief Fills a Blob with values @f$ x \in [0, 1] @f$ + * such that @f$ \forall i \sum_j x_{ij} = 1 @f$. + */ +template +class PositiveUnitballFiller : public Filler { + public: + explicit PositiveUnitballFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + Dtype* data = blob->mutable_cpu_data(); + DCHECK(blob->count()); + caffe_rng_uniform(blob->count(), 0, 1, blob->mutable_cpu_data()); + // We expect the filler to not be called very frequently, so we will + // just use a simple implementation + int dim = blob->count() / blob->num(); + CHECK(dim); + for (int i = 0; i < blob->num(); ++i) { + Dtype sum = 0; + for (int j = 0; j < dim; ++j) { + sum += data[i * dim + j]; + } + for (int j = 0; j < dim; ++j) { + data[i * dim + j] /= sum; + } + } + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +/** + * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ is + * set inversely proportional to number of incoming nodes, outgoing + * nodes, or their average. + * + * A Filler based on the paper [Bengio and Glorot 2010]: Understanding + * the difficulty of training deep feedforward neuralnetworks. + * + * It fills the incoming matrix by randomly sampling uniform data from [-scale, + * scale] where scale = sqrt(3 / n) where n is the fan_in, fan_out, or their + * average, depending on the variance_norm option. You should make sure the + * input blob has shape (num, a, b, c) where a * b * c = fan_in and num * b * c + * = fan_out. Note that this is currently not the case for inner product layers. + * + * TODO(dox): make notation in above comment consistent with rest & use LaTeX. + */ +template +class XavierFiller : public Filler { + public: + explicit XavierFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK(blob->count()); + int fan_in = blob->count() / blob->num(); + int fan_out = blob->count() / blob->channels(); + Dtype n = fan_in; // default to fan_in + if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_AVERAGE) { + n = (fan_in + fan_out) / Dtype(2); + } else if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_FAN_OUT) { + n = fan_out; + } + Dtype scale = sqrt(Dtype(3) / n); + caffe_rng_uniform(blob->count(), -scale, scale, + blob->mutable_cpu_data()); + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +/** + * @brief Fills a Blob with values @f$ x \sim N(0, \sigma^2) @f$ where + * @f$ \sigma^2 @f$ is set inversely proportional to number of incoming + * nodes, outgoing nodes, or their average. + * + * A Filler based on the paper [He, Zhang, Ren and Sun 2015]: Specifically + * accounts for ReLU nonlinearities. + * + * Aside: for another perspective on the scaling factor, see the derivation of + * [Saxe, McClelland, and Ganguli 2013 (v3)]. + * + * It fills the incoming matrix by randomly sampling Gaussian data with std = + * sqrt(2 / n) where n is the fan_in, fan_out, or their average, depending on + * the variance_norm option. You should make sure the input blob has shape (num, + * a, b, c) where a * b * c = fan_in and num * b * c = fan_out. Note that this + * is currently not the case for inner product layers. + */ +template +class MSRAFiller : public Filler { + public: + explicit MSRAFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK(blob->count()); + int fan_in = blob->count() / blob->num(); + int fan_out = blob->count() / blob->channels(); + Dtype n = fan_in; // default to fan_in + if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_AVERAGE) { + n = (fan_in + fan_out) / Dtype(2); + } else if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_FAN_OUT) { + n = fan_out; + } + Dtype std = sqrt(Dtype(2) / n); + caffe_rng_gaussian(blob->count(), Dtype(0), std, + blob->mutable_cpu_data()); + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +/*! +@brief Fills a Blob with coefficients for bilinear interpolation. + +A common use case is with the DeconvolutionLayer acting as upsampling. +You can upsample a feature map with shape of (B, C, H, W) by any integer factor +using the following proto. +\code +layer { + name: "upsample", type: "Deconvolution" + bottom: "{{bottom_name}}" top: "{{top_name}}" + convolution_param { + kernel_size: {{2 * factor - factor % 2}} stride: {{factor}} + num_output: {{C}} group: {{C}} + pad: {{ceil((factor - 1) / 2.)}} + weight_filler: { type: "bilinear" } bias_term: false + } + param { lr_mult: 0 decay_mult: 0 } +} +\endcode +Please use this by replacing `{{}}` with your values. By specifying +`num_output: {{C}} group: {{C}}`, it behaves as +channel-wise convolution. The filter shape of this deconvolution layer will be +(C, 1, K, K) where K is `kernel_size`, and this filler will set a (K, K) +interpolation kernel for every channel of the filter identically. The resulting +shape of the top feature map will be (B, C, factor * H, factor * W). +Note that the learning rate and the +weight decay are set to 0 in order to keep coefficient values of bilinear +interpolation unchanged during training. If you apply this to an image, this +operation is equivalent to the following call in Python with Scikit.Image. +\code{.py} +out = skimage.transform.rescale(img, factor, mode='constant', cval=0) +\endcode + */ +template +class BilinearFiller : public Filler { + public: + explicit BilinearFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK_EQ(blob->num_axes(), 4) << "Blob must be 4 dim."; + CHECK_EQ(blob->width(), blob->height()) << "Filter must be square"; + Dtype* data = blob->mutable_cpu_data(); + int f = ceil(blob->width() / 2.); + float c = (2 * f - 1 - f % 2) / (2. * f); + for (int i = 0; i < blob->count(); ++i) { + float x = i % blob->width(); + float y = (i / blob->width()) % blob->height(); + data[i] = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c)); + } + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +/** + * @brief Get a specific filler from the specification given in FillerParameter. + * + * Ideally this would be replaced by a factory pattern, but we will leave it + * this way for now. + */ +template +Filler* GetFiller(const FillerParameter& param) { + const std::string& type = param.type(); + if (type == "constant") { + return new ConstantFiller(param); + } else if (type == "gaussian") { + return new GaussianFiller(param); + } else if (type == "positive_unitball") { + return new PositiveUnitballFiller(param); + } else if (type == "uniform") { + return new UniformFiller(param); + } else if (type == "xavier") { + return new XavierFiller(param); + } else if (type == "msra") { + return new MSRAFiller(param); + } else if (type == "bilinear") { + return new BilinearFiller(param); + } else { + CHECK(false) << "Unknown filler name: " << param.type(); + } + return (Filler*)(NULL); +} + +} // namespace caffe + +#endif // CAFFE_FILLER_HPP_ diff --git a/include/caffe/internal_thread.hpp b/include/caffe/internal_thread.hpp new file mode 100755 index 0000000..6a8c5a0 --- /dev/null +++ b/include/caffe/internal_thread.hpp @@ -0,0 +1,53 @@ +#ifndef CAFFE_INTERNAL_THREAD_HPP_ +#define CAFFE_INTERNAL_THREAD_HPP_ + +#include "caffe/common.hpp" + +/** + Forward declare boost::thread instead of including boost/thread.hpp + to avoid a boost/NVCC issues (#1009, #1010) on OSX. + */ +namespace boost { class thread; } + +namespace caffe { + +/** + * Virtual class encapsulate boost::thread for use in base class + * The child class will acquire the ability to run a single thread, + * by reimplementing the virtual function InternalThreadEntry. + */ +class InternalThread { + public: + InternalThread() : thread_() {} + virtual ~InternalThread(); + + /** + * Caffe's thread local state will be initialized using the current + * thread values, e.g. device id, solver index etc. The random seed + * is initialized using caffe_rng_rand. + */ + void StartInternalThread(); + + /** Will not return until the internal thread has exited. */ + void StopInternalThread(); + + bool is_started() const; + + protected: + /* Implement this method in your subclass + with the code you want your thread to run. */ + virtual void InternalThreadEntry() {} + + /* Should be tested when running loops to exit when requested. */ + bool must_stop(); + + private: + void entry(int device, Caffe::Brew mode, int rand_seed, int solver_count, + bool root_solver); + + shared_ptr thread_; +}; + +} // namespace caffe + +#endif // CAFFE_INTERNAL_THREAD_HPP_ diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp new file mode 100755 index 0000000..f4dba1d --- /dev/null +++ b/include/caffe/layer.hpp @@ -0,0 +1,523 @@ +#ifndef CAFFE_LAYER_H_ +#define CAFFE_LAYER_H_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer_factory.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/device_alternate.hpp" + +/** + Forward declare boost::thread instead of including boost/thread.hpp + to avoid a boost/NVCC issues (#1009, #1010) on OSX. + */ +namespace boost { class mutex; } + +namespace caffe { + +/** + * @brief An interface for the units of computation which can be composed into a + * Net. + * + * Layer%s must implement a Forward function, in which they take their input + * (bottom) Blob%s (if any) and compute their output Blob%s (if any). + * They may also implement a Backward function, in which they compute the error + * gradients with respect to their input Blob%s, given the error gradients with + * their output Blob%s. + */ +template +class Layer { + public: + /** + * You should not implement your own constructor. Any set up code should go + * to SetUp(), where the dimensions of the bottom blobs are provided to the + * layer. + */ + explicit Layer(const LayerParameter& param) + : layer_param_(param), is_shared_(false) { + // Set phase and copy blobs (if there are any). + phase_ = param.phase(); + if (layer_param_.blobs_size() > 0) { + blobs_.resize(layer_param_.blobs_size()); + for (int i = 0; i < layer_param_.blobs_size(); ++i) { + blobs_[i].reset(new Blob()); + blobs_[i]->FromProto(layer_param_.blobs(i)); + } + } + } + virtual ~Layer() {} + + /** + * @brief Implements common layer setup functionality. + * + * @param bottom the preshaped input blobs + * @param top + * the allocated but unshaped output blobs, to be shaped by Reshape + * + * Checks that the number of bottom and top blobs is correct. + * Calls LayerSetUp to do special layer setup for individual layer types, + * followed by Reshape to set up sizes of top blobs and internal buffers. + * Sets up the loss weight multiplier blobs for any non-zero loss weights. + * This method may not be overridden. + */ + void SetUp(const vector*>& bottom, + const vector*>& top) { + InitMutex(); + CheckBlobCounts(bottom, top); + LayerSetUp(bottom, top); + Reshape(bottom, top); + SetLossWeights(top); + } + + /** + * @brief Does layer-specific setup: your layer should implement this function + * as well as Reshape. + * + * @param bottom + * the preshaped input blobs, whose data fields store the input data for + * this layer + * @param top + * the allocated but unshaped output blobs + * + * This method should do one-time layer specific setup. This includes reading + * and processing relevent parameters from the layer_param_. + * Setting up the shapes of top blobs and internal buffers should be done in + * Reshape, which will be called before the forward pass to + * adjust the top blob sizes. + */ + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top) {} + + /** + * @brief Whether a layer should be shared by multiple nets during data + * parallelism. By default, all layers except for data layers should + * not be shared. data layers should be shared to ensure each worker + * solver access data sequentially during data parallelism. + */ + virtual inline bool ShareInParallel() const { return false; } + + /** @brief Return whether this layer is actually shared by other nets. + * If ShareInParallel() is true and using more than one GPU and the + * net has TRAIN phase, then this function is expected return true. + */ + inline bool IsShared() const { return is_shared_; } + + /** @brief Set whether this layer is actually shared by other nets + * If ShareInParallel() is true and using more than one GPU and the + * net has TRAIN phase, then is_shared should be set true. + */ + inline void SetShared(bool is_shared) { + CHECK(ShareInParallel() || !is_shared) + << type() << "Layer does not support sharing."; + is_shared_ = is_shared; + } + + /** + * @brief Adjust the shapes of top blobs and internal buffers to accommodate + * the shapes of the bottom blobs. + * + * @param bottom the input blobs, with the requested input shapes + * @param top the top blobs, which should be reshaped as needed + * + * This method should reshape top blobs as needed according to the shapes + * of the bottom (input) blobs, as well as reshaping any internal buffers + * and making any other necessary adjustments so that the layer can + * accommodate the bottom blobs. + */ + virtual void Reshape(const vector*>& bottom, + const vector*>& top) = 0; + + /** + * @brief Given the bottom blobs, compute the top blobs and the loss. + * + * @param bottom + * the input blobs, whose data fields store the input data for this layer + * @param top + * the preshaped output blobs, whose data fields will store this layers' + * outputs + * \return The total loss from the layer. + * + * The Forward wrapper calls the relevant device wrapper function + * (Forward_cpu or Forward_gpu) to compute the top blob values given the + * bottom blobs. If the layer has any non-zero loss_weights, the wrapper + * then computes and returns the loss. + * + * Your layer should implement Forward_cpu and (optionally) Forward_gpu. + */ + inline Dtype Forward(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Given the top blob error gradients, compute the bottom blob error + * gradients. + * + * @param top + * the output blobs, whose diff fields store the gradient of the error + * with respect to themselves + * @param propagate_down + * a vector with equal length to bottom, with each index indicating + * whether to propagate the error gradients down to the bottom blob at + * the corresponding index + * @param bottom + * the input blobs, whose diff fields will store the gradient of the error + * with respect to themselves after Backward is run + * + * The Backward wrapper calls the relevant device wrapper function + * (Backward_cpu or Backward_gpu) to compute the bottom blob diffs given the + * top blob diffs. + * + * Your layer should implement Backward_cpu and (optionally) Backward_gpu. + */ + inline void Backward(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + + /** + * @brief Returns the vector of learnable parameter blobs. + */ + vector > >& blobs() { + return blobs_; + } + + /** + * @brief Returns the layer parameter. + */ + const LayerParameter& layer_param() const { return layer_param_; } + + /** + * @brief Writes the layer parameter to a protocol buffer + */ + virtual void ToProto(LayerParameter* param, bool write_diff = false); + + /** + * @brief Returns the scalar loss associated with a top blob at a given index. + */ + inline Dtype loss(const int top_index) const { + return (loss_.size() > top_index) ? loss_[top_index] : Dtype(0); + } + + /** + * @brief Sets the loss associated with a top blob at a given index. + */ + inline void set_loss(const int top_index, const Dtype value) { + if (loss_.size() <= top_index) { + loss_.resize(top_index + 1, Dtype(0)); + } + loss_[top_index] = value; + } + + /** + * @brief Returns the layer type. + */ + virtual inline const char* type() const { return ""; } + + /** + * @brief Returns the exact number of bottom blobs required by the layer, + * or -1 if no exact number is required. + * + * This method should be overridden to return a non-negative value if your + * layer expects some exact number of bottom blobs. + */ + virtual inline int ExactNumBottomBlobs() const { return -1; } + /** + * @brief Returns the minimum number of bottom blobs required by the layer, + * or -1 if no minimum number is required. + * + * This method should be overridden to return a non-negative value if your + * layer expects some minimum number of bottom blobs. + */ + virtual inline int MinBottomBlobs() const { return -1; } + /** + * @brief Returns the maximum number of bottom blobs required by the layer, + * or -1 if no maximum number is required. + * + * This method should be overridden to return a non-negative value if your + * layer expects some maximum number of bottom blobs. + */ + virtual inline int MaxBottomBlobs() const { return -1; } + /** + * @brief Returns the exact number of top blobs required by the layer, + * or -1 if no exact number is required. + * + * This method should be overridden to return a non-negative value if your + * layer expects some exact number of top blobs. + */ + virtual inline int ExactNumTopBlobs() const { return -1; } + /** + * @brief Returns the minimum number of top blobs required by the layer, + * or -1 if no minimum number is required. + * + * This method should be overridden to return a non-negative value if your + * layer expects some minimum number of top blobs. + */ + virtual inline int MinTopBlobs() const { return -1; } + /** + * @brief Returns the maximum number of top blobs required by the layer, + * or -1 if no maximum number is required. + * + * This method should be overridden to return a non-negative value if your + * layer expects some maximum number of top blobs. + */ + virtual inline int MaxTopBlobs() const { return -1; } + /** + * @brief Returns true if the layer requires an equal number of bottom and + * top blobs. + * + * This method should be overridden to return true if your layer expects an + * equal number of bottom and top blobs. + */ + virtual inline bool EqualNumBottomTopBlobs() const { return false; } + + /** + * @brief Return whether "anonymous" top blobs are created automatically + * by the layer. + * + * If this method returns true, Net::Init will create enough "anonymous" top + * blobs to fulfill the requirement specified by ExactNumTopBlobs() or + * MinTopBlobs(). + */ + virtual inline bool AutoTopBlobs() const { return false; } + + /** + * @brief Return whether to allow force_backward for a given bottom blob + * index. + * + * If AllowForceBackward(i) == false, we will ignore the force_backward + * setting and backpropagate to blob i only if it needs gradient information + * (as is done when force_backward == false). + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return true; + } + + /** + * @brief Specifies whether the layer should compute gradients w.r.t. a + * parameter at a particular index given by param_id. + * + * You can safely ignore false values and always compute gradients + * for all parameters, but possibly with wasteful computation. + */ + inline bool param_propagate_down(const int param_id) { + return (param_propagate_down_.size() > param_id) ? + param_propagate_down_[param_id] : false; + } + /** + * @brief Sets whether the layer should compute gradients w.r.t. a + * parameter at a particular index given by param_id. + */ + inline void set_param_propagate_down(const int param_id, const bool value) { + if (param_propagate_down_.size() <= param_id) { + param_propagate_down_.resize(param_id + 1, true); + } + param_propagate_down_[param_id] = value; + } + /************ For dynamic network surgery ***************/ + inline void set_current_iter_num(const int iter_num) { + iter_ = iter_num; + } + /********************************************************/ + protected: + /** The protobuf that stores the layer parameters */ + LayerParameter layer_param_; + /** The phase: TRAIN or TEST */ + Phase phase_; + /** The current iteration number */ + int iter_; + /** The vector that stores the learnable parameters as a set of blobs. */ + vector > > blobs_; + /** Vector indicating whether to compute the diff of each param blob. */ + vector param_propagate_down_; + + /** The vector that indicates whether each top blob has a non-zero weight in + * the objective function. */ + vector loss_; + + /** @brief Using the CPU device, compute the layer output. */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top) = 0; + /** + * @brief Using the GPU device, compute the layer output. + * Fall back to Forward_cpu() if unavailable. + */ + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top) { + // LOG(WARNING) << "Using CPU code as backup."; + return Forward_cpu(bottom, top); + } + + /** + * @brief Using the CPU device, compute the gradients for any parameters and + * for the bottom blobs if propagate_down is true. + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) = 0; + /** + * @brief Using the GPU device, compute the gradients for any parameters and + * for the bottom blobs if propagate_down is true. + * Fall back to Backward_cpu() if unavailable. + */ + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + // LOG(WARNING) << "Using CPU code as backup."; + Backward_cpu(top, propagate_down, bottom); + } + + /** + * Called by the parent Layer's SetUp to check that the number of bottom + * and top Blobs provided as input match the expected numbers specified by + * the {ExactNum,Min,Max}{Bottom,Top}Blobs() functions. + */ + virtual void CheckBlobCounts(const vector*>& bottom, + const vector*>& top) { + if (ExactNumBottomBlobs() >= 0) { + CHECK_EQ(ExactNumBottomBlobs(), bottom.size()) + << type() << " Layer takes " << ExactNumBottomBlobs() + << " bottom blob(s) as input."; + } + if (MinBottomBlobs() >= 0) { + CHECK_LE(MinBottomBlobs(), bottom.size()) + << type() << " Layer takes at least " << MinBottomBlobs() + << " bottom blob(s) as input."; + } + if (MaxBottomBlobs() >= 0) { + CHECK_GE(MaxBottomBlobs(), bottom.size()) + << type() << " Layer takes at most " << MaxBottomBlobs() + << " bottom blob(s) as input."; + } + if (ExactNumTopBlobs() >= 0) { + CHECK_EQ(ExactNumTopBlobs(), top.size()) + << type() << " Layer produces " << ExactNumTopBlobs() + << " top blob(s) as output."; + } + if (MinTopBlobs() >= 0) { + CHECK_LE(MinTopBlobs(), top.size()) + << type() << " Layer produces at least " << MinTopBlobs() + << " top blob(s) as output."; + } + if (MaxTopBlobs() >= 0) { + CHECK_GE(MaxTopBlobs(), top.size()) + << type() << " Layer produces at most " << MaxTopBlobs() + << " top blob(s) as output."; + } + if (EqualNumBottomTopBlobs()) { + CHECK_EQ(bottom.size(), top.size()) + << type() << " Layer produces one top blob as output for each " + << "bottom blob input."; + } + } + + /** + * Called by SetUp to initialize the weights associated with any top blobs in + * the loss function. Store non-zero loss weights in the diff blob. + */ + inline void SetLossWeights(const vector*>& top) { + const int num_loss_weights = layer_param_.loss_weight_size(); + if (num_loss_weights) { + CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be " + "unspecified or specified once per top blob."; + for (int top_id = 0; top_id < top.size(); ++top_id) { + const Dtype loss_weight = layer_param_.loss_weight(top_id); + if (loss_weight == Dtype(0)) { continue; } + this->set_loss(top_id, loss_weight); + const int count = top[top_id]->count(); + Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff(); + caffe_set(count, loss_weight, loss_multiplier); + } + } + } + + private: + /** Whether this layer is actually shared by other nets*/ + bool is_shared_; + + /** The mutex for sequential forward if this layer is shared */ + shared_ptr forward_mutex_; + + /** Initialize forward_mutex_ */ + void InitMutex(); + /** Lock forward_mutex_ if this layer is shared */ + void Lock(); + /** Unlock forward_mutex_ if this layer is shared */ + void Unlock(); + + DISABLE_COPY_AND_ASSIGN(Layer); +}; // class Layer + +// Forward and backward wrappers. You should implement the cpu and +// gpu specific implementations instead, and should not change these +// functions. +template +inline Dtype Layer::Forward(const vector*>& bottom, + const vector*>& top) { + // Lock during forward to ensure sequential forward + Lock(); + Dtype loss = 0; + Reshape(bottom, top); + switch (Caffe::mode()) { + case Caffe::CPU: + Forward_cpu(bottom, top); + for (int top_id = 0; top_id < top.size(); ++top_id) { + if (!this->loss(top_id)) { continue; } + const int count = top[top_id]->count(); + const Dtype* data = top[top_id]->cpu_data(); + const Dtype* loss_weights = top[top_id]->cpu_diff(); + loss += caffe_cpu_dot(count, data, loss_weights); + } + break; + case Caffe::GPU: + Forward_gpu(bottom, top); +#ifndef CPU_ONLY + for (int top_id = 0; top_id < top.size(); ++top_id) { + if (!this->loss(top_id)) { continue; } + const int count = top[top_id]->count(); + const Dtype* data = top[top_id]->gpu_data(); + const Dtype* loss_weights = top[top_id]->gpu_diff(); + Dtype blob_loss = 0; + caffe_gpu_dot(count, data, loss_weights, &blob_loss); + loss += blob_loss; + } +#endif + break; + default: + LOG(FATAL) << "Unknown caffe mode."; + } + Unlock(); + return loss; +} + +template +inline void Layer::Backward(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + switch (Caffe::mode()) { + case Caffe::CPU: + Backward_cpu(top, propagate_down, bottom); + break; + case Caffe::GPU: + Backward_gpu(top, propagate_down, bottom); + break; + default: + LOG(FATAL) << "Unknown caffe mode."; + } +} + +// Serialize LayerParameter to protocol buffer +template +void Layer::ToProto(LayerParameter* param, bool write_diff) { + param->Clear(); + param->CopyFrom(layer_param_); + param->clear_blobs(); + for (int i = 0; i < blobs_.size(); ++i) { + blobs_[i]->ToProto(param->add_blobs(), write_diff); + } +} + +} // namespace caffe + +#endif // CAFFE_LAYER_H_ diff --git a/include/caffe/layer_factory.hpp b/include/caffe/layer_factory.hpp new file mode 100755 index 0000000..2c2fde4 --- /dev/null +++ b/include/caffe/layer_factory.hpp @@ -0,0 +1,140 @@ +/** + * @brief A layer factory that allows one to register layers. + * During runtime, registered layers could be called by passing a LayerParameter + * protobuffer to the CreateLayer function: + * + * LayerRegistry::CreateLayer(param); + * + * There are two ways to register a layer. Assuming that we have a layer like: + * + * template + * class MyAwesomeLayer : public Layer { + * // your implementations + * }; + * + * and its type is its C++ class name, but without the "Layer" at the end + * ("MyAwesomeLayer" -> "MyAwesome"). + * + * If the layer is going to be created simply by its constructor, in your c++ + * file, add the following line: + * + * REGISTER_LAYER_CLASS(MyAwesome); + * + * Or, if the layer is going to be created by another creator function, in the + * format of: + * + * template + * Layer GetMyAwesomeLayer(const LayerParameter& param) { + * // your implementation + * } + * + * (for example, when your layer has multiple backends, see GetConvolutionLayer + * for a use case), then you can register the creator function instead, like + * + * REGISTER_LAYER_CREATOR(MyAwesome, GetMyAwesomeLayer) + * + * Note that each layer type should only be registered once. + */ + +#ifndef CAFFE_LAYER_FACTORY_H_ +#define CAFFE_LAYER_FACTORY_H_ + +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +template +class Layer; + +template +class LayerRegistry { + public: + typedef shared_ptr > (*Creator)(const LayerParameter&); + typedef std::map CreatorRegistry; + + static CreatorRegistry& Registry() { + static CreatorRegistry* g_registry_ = new CreatorRegistry(); + return *g_registry_; + } + + // Adds a creator. + static void AddCreator(const string& type, Creator creator) { + CreatorRegistry& registry = Registry(); + CHECK_EQ(registry.count(type), 0) + << "Layer type " << type << " already registered."; + registry[type] = creator; + } + + // Get a layer using a LayerParameter. + static shared_ptr > CreateLayer(const LayerParameter& param) { + if (Caffe::root_solver()) { + LOG(INFO) << "Creating layer " << param.name(); + } + const string& type = param.type(); + CreatorRegistry& registry = Registry(); + CHECK_EQ(registry.count(type), 1) << "Unknown layer type: " << type + << " (known types: " << LayerTypeListString() << ")"; + return registry[type](param); + } + + static vector LayerTypeList() { + CreatorRegistry& registry = Registry(); + vector layer_types; + for (typename CreatorRegistry::iterator iter = registry.begin(); + iter != registry.end(); ++iter) { + layer_types.push_back(iter->first); + } + return layer_types; + } + + private: + // Layer registry should never be instantiated - everything is done with its + // static variables. + LayerRegistry() {} + + static string LayerTypeListString() { + vector layer_types = LayerTypeList(); + string layer_types_str; + for (vector::iterator iter = layer_types.begin(); + iter != layer_types.end(); ++iter) { + if (iter != layer_types.begin()) { + layer_types_str += ", "; + } + layer_types_str += *iter; + } + return layer_types_str; + } +}; + + +template +class LayerRegisterer { + public: + LayerRegisterer(const string& type, + shared_ptr > (*creator)(const LayerParameter&)) { + // LOG(INFO) << "Registering layer type: " << type; + LayerRegistry::AddCreator(type, creator); + } +}; + + +#define REGISTER_LAYER_CREATOR(type, creator) \ + static LayerRegisterer g_creator_f_##type(#type, creator); \ + static LayerRegisterer g_creator_d_##type(#type, creator) \ + +#define REGISTER_LAYER_CLASS(type) \ + template \ + shared_ptr > Creator_##type##Layer(const LayerParameter& param) \ + { \ + return shared_ptr >(new type##Layer(param)); \ + } \ + REGISTER_LAYER_CREATOR(type, Creator_##type##Layer) + +} // namespace caffe + +#endif // CAFFE_LAYER_FACTORY_H_ diff --git a/include/caffe/layers/absval_layer.hpp b/include/caffe/layers/absval_layer.hpp new file mode 100755 index 0000000..9b5305d --- /dev/null +++ b/include/caffe/layers/absval_layer.hpp @@ -0,0 +1,68 @@ +#ifndef CAFFE_ABSVAL_LAYER_HPP_ +#define CAFFE_ABSVAL_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" + +namespace caffe { + +/** + * @brief Computes @f$ y = |x| @f$ + * + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ y = |x| @f$ + */ +template +class AbsValLayer : public NeuronLayer { + public: + explicit AbsValLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "AbsVal"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + /// @copydoc AbsValLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the absolute value inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} = + * \mathrm{sign}(x) \frac{\partial E}{\partial y} + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +} // namespace caffe + +#endif // CAFFE_ABSVAL_LAYER_HPP_ diff --git a/include/caffe/layers/accuracy_layer.hpp b/include/caffe/layers/accuracy_layer.hpp new file mode 100755 index 0000000..fe2adb9 --- /dev/null +++ b/include/caffe/layers/accuracy_layer.hpp @@ -0,0 +1,95 @@ +#ifndef CAFFE_ACCURACY_LAYER_HPP_ +#define CAFFE_ACCURACY_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/loss_layer.hpp" + +namespace caffe { + +/** + * @brief Computes the classification accuracy for a one-of-many + * classification task. + */ +template +class AccuracyLayer : public Layer { + public: + /** + * @param param provides AccuracyParameter accuracy_param, + * with AccuracyLayer options: + * - top_k (\b optional, default 1). + * Sets the maximum rank @f$ k @f$ at which a prediction is considered + * correct. For example, if @f$ k = 5 @f$, a prediction is counted + * correct if the correct label is among the top 5 predicted labels. + */ + explicit AccuracyLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Accuracy"; } + virtual inline int ExactNumBottomBlobs() const { return 2; } + + // If there are two top blobs, then the second blob will contain + // accuracies per class. + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlos() const { return 2; } + + protected: + /** + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ x @f$, a Blob with values in + * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of + * the @f$ K = CHW @f$ classes. Each @f$ x_n @f$ is mapped to a predicted + * label @f$ \hat{l}_n @f$ given by its maximal index: + * @f$ \hat{l}_n = \arg\max\limits_k x_{nk} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed accuracy: @f$ + * \frac{1}{N} \sum\limits_{n=1}^N \delta\{ \hat{l}_n = l_n \} + * @f$, where @f$ + * \delta\{\mathrm{condition}\} = \left\{ + * \begin{array}{lr} + * 1 & \mbox{if condition} \\ + * 0 & \mbox{otherwise} + * \end{array} \right. + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + + /// @brief Not implemented -- AccuracyLayer cannot be used as a loss. + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + for (int i = 0; i < propagate_down.size(); ++i) { + if (propagate_down[i]) { NOT_IMPLEMENTED; } + } + } + + int label_axis_, outer_num_, inner_num_; + + int top_k_; + + /// Whether to ignore instances with a certain label. + bool has_ignore_label_; + /// The label indicating that an instance should be ignored. + int ignore_label_; + /// Keeps counts of the number of samples per class. + Blob nums_buffer_; +}; + +} // namespace caffe + +#endif // CAFFE_ACCURACY_LAYER_HPP_ diff --git a/include/caffe/layers/argmax_layer.hpp b/include/caffe/layers/argmax_layer.hpp new file mode 100755 index 0000000..4fef363 --- /dev/null +++ b/include/caffe/layers/argmax_layer.hpp @@ -0,0 +1,77 @@ +#ifndef CAFFE_ARGMAX_LAYER_HPP_ +#define CAFFE_ARGMAX_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Compute the index of the @f$ K @f$ max values for each datum across + * all dimensions @f$ (C \times H \times W) @f$. + * + * Intended for use after a classification layer to produce a prediction. + * If parameter out_max_val is set to true, output is a vector of pairs + * (max_ind, max_val) for each image. The axis parameter specifies an axis + * along which to maximise. + * + * NOTE: does not implement Backwards operation. + */ +template +class ArgMaxLayer : public Layer { + public: + /** + * @param param provides ArgMaxParameter argmax_param, + * with ArgMaxLayer options: + * - top_k (\b optional uint, default 1). + * the number @f$ K @f$ of maximal items to output. + * - out_max_val (\b optional bool, default false). + * if set, output a vector of pairs (max_ind, max_val) unless axis is set then + * output max_val along the specified axis. + * - axis (\b optional int). + * if set, maximise along the specified axis else maximise the flattened + * trailing dimensions for each index of the first / num dimension. + */ + explicit ArgMaxLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "ArgMax"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times 1 \times K) @f$ or, if out_max_val + * @f$ (N \times 2 \times K) @f$ unless axis set than e.g. + * @f$ (N \times K \times H \times W) @f$ if axis == 1 + * the computed outputs @f$ + * y_n = \arg\max\limits_i x_{ni} + * @f$ (for @f$ K = 1 @f$). + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + /// @brief Not implemented (non-differentiable function) + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + NOT_IMPLEMENTED; + } + bool out_max_val_; + size_t top_k_; + bool has_axis_; + int axis_; +}; + +} // namespace caffe + +#endif // CAFFE_ARGMAX_LAYER_HPP_ diff --git a/include/caffe/layers/base_conv_layer.hpp b/include/caffe/layers/base_conv_layer.hpp new file mode 100755 index 0000000..0160a83 --- /dev/null +++ b/include/caffe/layers/base_conv_layer.hpp @@ -0,0 +1,174 @@ +#ifndef CAFFE_BASE_CONVOLUTION_LAYER_HPP_ +#define CAFFE_BASE_CONVOLUTION_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/im2col.hpp" + +namespace caffe { + +/** + * @brief Abstract base class that factors out the BLAS code common to + * ConvolutionLayer and DeconvolutionLayer. + */ +template +class BaseConvolutionLayer : public Layer { + public: + explicit BaseConvolutionLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline int MinBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline bool EqualNumBottomTopBlobs() const { return true; } + + protected: + // Helper functions that abstract away the column buffer and gemm arguments. + // The last argument in forward_cpu_gemm is so that we can skip the im2col if + // we just called weight_cpu_gemm with the same input. + void forward_cpu_gemm(const Dtype* input, const Dtype* weights, + Dtype* output, bool skip_im2col = false); + void forward_cpu_bias(Dtype* output, const Dtype* bias); + void backward_cpu_gemm(const Dtype* input, const Dtype* weights, + Dtype* output); + void weight_cpu_gemm(const Dtype* input, const Dtype* output, Dtype* + weights); + void backward_cpu_bias(Dtype* bias, const Dtype* input); + +#ifndef CPU_ONLY + void forward_gpu_gemm(const Dtype* col_input, const Dtype* weights, + Dtype* output, bool skip_im2col = false); + void forward_gpu_bias(Dtype* output, const Dtype* bias); + void backward_gpu_gemm(const Dtype* input, const Dtype* weights, + Dtype* col_output); + void weight_gpu_gemm(const Dtype* col_input, const Dtype* output, Dtype* + weights); + void backward_gpu_bias(Dtype* bias, const Dtype* input); +#endif + + /// @brief The spatial dimensions of the input. + inline int input_shape(int i) { + return (*bottom_shape_)[channel_axis_ + i]; + } + // reverse_dimensions should return true iff we are implementing deconv, so + // that conv helpers know which dimensions are which. + virtual bool reverse_dimensions() = 0; + // Compute height_out_ and width_out_ from other parameters. + virtual void compute_output_shape() = 0; + + /// @brief The spatial dimensions of a filter kernel. + Blob kernel_shape_; + /// @brief The spatial dimensions of the stride. + Blob stride_; + /// @brief The spatial dimensions of the padding. + Blob pad_; + /// @brief The spatial dimensions of the dilation. + Blob dilation_; + /// @brief The spatial dimensions of the convolution input. + Blob conv_input_shape_; + /// @brief The spatial dimensions of the col_buffer. + vector col_buffer_shape_; + /// @brief The spatial dimensions of the output. + vector output_shape_; + const vector* bottom_shape_; + + int num_spatial_axes_; + int bottom_dim_; + int top_dim_; + + int channel_axis_; + int num_; + int channels_; + int group_; + int out_spatial_dim_; + int weight_offset_; + int num_output_; + bool bias_term_; + bool is_1x1_; + bool force_nd_im2col_; + + private: + // wrap im2col/col2im so we don't have to remember the (long) argument lists + inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) { + if (!force_nd_im2col_ && num_spatial_axes_ == 2) { + im2col_cpu(data, conv_in_channels_, + conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], + kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], + pad_.cpu_data()[0], pad_.cpu_data()[1], + stride_.cpu_data()[0], stride_.cpu_data()[1], + dilation_.cpu_data()[0], dilation_.cpu_data()[1], col_buff); + } else { + im2col_nd_cpu(data, num_spatial_axes_, conv_input_shape_.cpu_data(), + col_buffer_shape_.data(), kernel_shape_.cpu_data(), + pad_.cpu_data(), stride_.cpu_data(), dilation_.cpu_data(), col_buff); + } + } + inline void conv_col2im_cpu(const Dtype* col_buff, Dtype* data) { + if (!force_nd_im2col_ && num_spatial_axes_ == 2) { + col2im_cpu(col_buff, conv_in_channels_, + conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], + kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], + pad_.cpu_data()[0], pad_.cpu_data()[1], + stride_.cpu_data()[0], stride_.cpu_data()[1], + dilation_.cpu_data()[0], dilation_.cpu_data()[1], data); + } else { + col2im_nd_cpu(col_buff, num_spatial_axes_, conv_input_shape_.cpu_data(), + col_buffer_shape_.data(), kernel_shape_.cpu_data(), + pad_.cpu_data(), stride_.cpu_data(), dilation_.cpu_data(), data); + } + } +#ifndef CPU_ONLY + inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) { + if (!force_nd_im2col_ && num_spatial_axes_ == 2) { + im2col_gpu(data, conv_in_channels_, + conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], + kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], + pad_.cpu_data()[0], pad_.cpu_data()[1], + stride_.cpu_data()[0], stride_.cpu_data()[1], + dilation_.cpu_data()[0], dilation_.cpu_data()[1], col_buff); + } else { + im2col_nd_gpu(data, num_spatial_axes_, num_kernels_im2col_, + conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), + kernel_shape_.gpu_data(), pad_.gpu_data(), + stride_.gpu_data(), dilation_.gpu_data(), col_buff); + } + } + inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) { + if (!force_nd_im2col_ && num_spatial_axes_ == 2) { + col2im_gpu(col_buff, conv_in_channels_, + conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], + kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], + pad_.cpu_data()[0], pad_.cpu_data()[1], + stride_.cpu_data()[0], stride_.cpu_data()[1], + dilation_.cpu_data()[0], dilation_.cpu_data()[1], data); + } else { + col2im_nd_gpu(col_buff, num_spatial_axes_, num_kernels_col2im_, + conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), + kernel_shape_.gpu_data(), pad_.gpu_data(), stride_.gpu_data(), + dilation_.gpu_data(), data); + } + } +#endif + + int num_kernels_im2col_; + int num_kernels_col2im_; + int conv_out_channels_; + int conv_in_channels_; + int conv_out_spatial_dim_; + int kernel_dim_; + int col_offset_; + int output_offset_; + + Blob col_buffer_; + Blob bias_multiplier_; +}; + +} // namespace caffe + +#endif // CAFFE_BASE_CONVOLUTION_LAYER_HPP_ diff --git a/include/caffe/layers/base_data_layer.hpp b/include/caffe/layers/base_data_layer.hpp new file mode 100755 index 0000000..2c49b73 --- /dev/null +++ b/include/caffe/layers/base_data_layer.hpp @@ -0,0 +1,86 @@ +#ifndef CAFFE_DATA_LAYERS_HPP_ +#define CAFFE_DATA_LAYERS_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/data_transformer.hpp" +#include "caffe/internal_thread.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/blocking_queue.hpp" + +namespace caffe { + +/** + * @brief Provides base for data layers that feed blobs to the Net. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class BaseDataLayer : public Layer { + public: + explicit BaseDataLayer(const LayerParameter& param); + // LayerSetUp: implements common data layer setup functionality, and calls + // DataLayerSetUp to do special data layer setup for individual layer types. + // This method may not be overridden except by the BasePrefetchingDataLayer. + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top) {} + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + + protected: + TransformationParameter transform_param_; + shared_ptr > data_transformer_; + bool output_labels_; +}; + +template +class Batch { + public: + Blob data_, label_; +}; + +template +class BasePrefetchingDataLayer : + public BaseDataLayer, public InternalThread { + public: + explicit BasePrefetchingDataLayer(const LayerParameter& param); + // LayerSetUp: implements common data layer setup functionality, and calls + // DataLayerSetUp to do special data layer setup for individual layer types. + // This method may not be overridden. + void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + // Prefetches batches (asynchronously if to GPU memory) + static const int PREFETCH_COUNT = 3; + + protected: + virtual void InternalThreadEntry(); + virtual void load_batch(Batch* batch) = 0; + + Batch prefetch_[PREFETCH_COUNT]; + BlockingQueue*> prefetch_free_; + BlockingQueue*> prefetch_full_; + + Blob transformed_data_; +}; + +} // namespace caffe + +#endif // CAFFE_DATA_LAYERS_HPP_ diff --git a/include/caffe/layers/batch_norm_layer.hpp b/include/caffe/layers/batch_norm_layer.hpp new file mode 100755 index 0000000..43f7b28 --- /dev/null +++ b/include/caffe/layers/batch_norm_layer.hpp @@ -0,0 +1,78 @@ +#ifndef CAFFE_BATCHNORM_LAYER_HPP_ +#define CAFFE_BATCHNORM_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Normalizes the input to have 0-mean and/or unit (1) variance across + * the batch. + * + * This layer computes Batch Normalization as described in [1]. For each channel + * in the data (i.e. axis 1), it subtracts the mean and divides by the variance, + * where both statistics are computed across both spatial dimensions and across + * the different examples in the batch. + * + * By default, during training time, the network is computing global + * mean/variance statistics via a running average, which is then used at test + * time to allow deterministic outputs for each input. You can manually toggle + * whether the network is accumulating or using the statistics via the + * use_global_stats option. For reference, these statistics are kept in the + * layer's three blobs: (0) mean, (1) variance, and (2) moving average factor. + * + * Note that the original paper also included a per-channel learned bias and + * scaling factor. To implement this in Caffe, define a `ScaleLayer` configured + * with `bias_term: true` after each `BatchNormLayer` to handle both the bias + * and scaling factor. + * + * [1] S. Ioffe and C. Szegedy, "Batch Normalization: Accelerating Deep Network + * Training by Reducing Internal Covariate Shift." arXiv preprint + * arXiv:1502.03167 (2015). + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class BatchNormLayer : public Layer { + public: + explicit BatchNormLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "BatchNorm"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob mean_, variance_, temp_, x_norm_; + bool use_global_stats_; + Dtype moving_average_fraction_; + int channels_; + Dtype eps_; + + // extra temporarary variables is used to carry out sums/broadcasting + // using BLAS + Blob batch_sum_multiplier_; + Blob num_by_chans_; + Blob spatial_sum_multiplier_; +}; + +} // namespace caffe + +#endif // CAFFE_BATCHNORM_LAYER_HPP_ diff --git a/include/caffe/layers/batch_reindex_layer.hpp b/include/caffe/layers/batch_reindex_layer.hpp new file mode 100755 index 0000000..ebb3a56 --- /dev/null +++ b/include/caffe/layers/batch_reindex_layer.hpp @@ -0,0 +1,83 @@ +#ifndef CAFFE_BATCHREINDEX_LAYER_HPP_ +#define CAFFE_BATCHREINDEX_LAYER_HPP_ + +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Index into the input blob along its first axis. + * + * This layer can be used to select, reorder, and even replicate examples in a + * batch. The second blob is cast to int and treated as an index into the + * first axis of the first blob. + */ +template +class BatchReindexLayer : public Layer { + public: + explicit BatchReindexLayer(const LayerParameter& param) + : Layer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "BatchReindex"; } + virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + /** + * @param bottom input Blob vector (length 2+) + * -# @f$ (N \times ...) @f$ + * the inputs @f$ x_1 @f$ + * -# @f$ (M) @f$ + * the inputs @f$ x_2 @f$ + * @param top output Blob vector (length 1) + * -# @f$ (M \times ...) @f$: + * the reindexed array @f$ + * y = x_1[x_2] + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the reordered input. + * + * @param top output Blob vector (length 1), providing the error gradient + * with respect to the outputs + * -# @f$ (M \times ...) @f$: + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to concatenated outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2): + * - @f$ \frac{\partial E}{\partial y} @f$ is de-indexed (summing where + * required) back to the input x_1 + * - This layer cannot backprop to x_2, i.e. propagate_down[1] must be + * false. + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + private: + struct pair_sort_first { + bool operator()(const std::pair &left, + const std::pair &right) { + return left.first < right.first; + } + }; + void check_batch_reindex(int initial_num, int final_num, + const Dtype* ridx_data); +}; + +} // namespace caffe + +#endif // CAFFE_BATCHREINDEX_LAYER_HPP_ diff --git a/include/caffe/layers/bias_layer.hpp b/include/caffe/layers/bias_layer.hpp new file mode 100755 index 0000000..9639c9c --- /dev/null +++ b/include/caffe/layers/bias_layer.hpp @@ -0,0 +1,54 @@ +#ifndef CAFFE_BIAS_LAYER_HPP_ +#define CAFFE_BIAS_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Computes a sum of two input Blobs, with the shape of the latter Blob + * "broadcast" to match the shape of the former. Equivalent to tiling + * the latter Blob, then computing the elementwise sum. + * + * The second input may be omitted, in which case it's learned as a parameter + * of the layer. Note: in case bias and scaling are desired, both operations can + * be handled by `ScaleLayer` configured with `bias_term: true`. + */ +template +class BiasLayer : public Layer { + public: + explicit BiasLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Bias"; } + virtual inline int MinBottomBlobs() const { return 1; } + virtual inline int MaxBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + private: + Blob bias_multiplier_; + int outer_dim_, bias_dim_, inner_dim_, dim_; +}; + + + +} // namespace caffe + +#endif // CAFFE_BIAS_LAYER_HPP_ diff --git a/include/caffe/layers/bnll_layer.hpp b/include/caffe/layers/bnll_layer.hpp new file mode 100755 index 0000000..be07c74 --- /dev/null +++ b/include/caffe/layers/bnll_layer.hpp @@ -0,0 +1,70 @@ +#ifndef CAFFE_BNLL_LAYER_HPP_ +#define CAFFE_BNLL_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" + +namespace caffe { + +/** + * @brief Computes @f$ y = x + \log(1 + \exp(-x)) @f$ if @f$ x > 0 @f$; + * @f$ y = \log(1 + \exp(x)) @f$ otherwise. + * + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = \left\{ + * \begin{array}{ll} + * x + \log(1 + \exp(-x)) & \mbox{if } x > 0 \\ + * \log(1 + \exp(x)) & \mbox{otherwise} + * \end{array} \right. + * @f$ + */ +template +class BNLLLayer : public NeuronLayer { + public: + explicit BNLLLayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual inline const char* type() const { return "BNLL"; } + + protected: + /// @copydoc BNLLLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the BNLL inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +} // namespace caffe + +#endif // CAFFE_BNLL_LAYER_HPP_ diff --git a/include/caffe/layers/concat_layer.hpp b/include/caffe/layers/concat_layer.hpp new file mode 100755 index 0000000..a157024 --- /dev/null +++ b/include/caffe/layers/concat_layer.hpp @@ -0,0 +1,87 @@ +#ifndef CAFFE_CONCAT_LAYER_HPP_ +#define CAFFE_CONCAT_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Takes at least two Blob%s and concatenates them along either the num + * or channel dimension, outputting the result. + */ +template +class ConcatLayer : public Layer { + public: + explicit ConcatLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Concat"; } + virtual inline int MinBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + /** + * @param bottom input Blob vector (length 2+) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x_1 @f$ + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x_2 @f$ + * -# ... + * - K @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x_K @f$ + * @param top output Blob vector (length 1) + * -# @f$ (KN \times C \times H \times W) @f$ if axis == 0, or + * @f$ (N \times KC \times H \times W) @f$ if axis == 1: + * the concatenated output @f$ + * y = [\begin{array}{cccc} x_1 & x_2 & ... & x_K \end{array}] + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the concatenate inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (KN \times C \times H \times W) @f$ if axis == 0, or + * @f$ (N \times KC \times H \times W) @f$ if axis == 1: + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to concatenated outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length K), into which the top gradient + * @f$ \frac{\partial E}{\partial y} @f$ is deconcatenated back to the + * inputs @f$ + * \left[ \begin{array}{cccc} + * \frac{\partial E}{\partial x_1} & + * \frac{\partial E}{\partial x_2} & + * ... & + * \frac{\partial E}{\partial x_K} + * \end{array} \right] = + * \frac{\partial E}{\partial y} + * @f$ + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int count_; + int num_concats_; + int concat_input_size_; + int concat_axis_; +}; + +} // namespace caffe + +#endif // CAFFE_CONCAT_LAYER_HPP_ diff --git a/include/caffe/layers/contrastive_loss_layer.hpp b/include/caffe/layers/contrastive_loss_layer.hpp new file mode 100755 index 0000000..e890afb --- /dev/null +++ b/include/caffe/layers/contrastive_loss_layer.hpp @@ -0,0 +1,101 @@ +#ifndef CAFFE_CONTRASTIVE_LOSS_LAYER_HPP_ +#define CAFFE_CONTRASTIVE_LOSS_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/loss_layer.hpp" + +namespace caffe { + +/** + * @brief Computes the contrastive loss @f$ + * E = \frac{1}{2N} \sum\limits_{n=1}^N \left(y\right) d^2 + + * \left(1-y\right) \max \left(margin-d, 0\right)^2 + * @f$ where @f$ + * d = \left| \left| a_n - b_n \right| \right|_2 @f$. This can be + * used to train siamese networks. + * + * @param bottom input Blob vector (length 3) + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$ a \in [-\infty, +\infty]@f$ + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$ b \in [-\infty, +\infty]@f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the binary similarity @f$ s \in [0, 1]@f$ + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed contrastive loss: @f$ E = + * \frac{1}{2N} \sum\limits_{n=1}^N \left(y\right) d^2 + + * \left(1-y\right) \max \left(margin-d, 0\right)^2 + * @f$ where @f$ + * d = \left| \left| a_n - b_n \right| \right|_2 @f$. + * This can be used to train siamese networks. + */ +template +class ContrastiveLossLayer : public LossLayer { + public: + explicit ContrastiveLossLayer(const LayerParameter& param) + : LossLayer(param), diff_() {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline int ExactNumBottomBlobs() const { return 3; } + virtual inline const char* type() const { return "ContrastiveLoss"; } + /** + * Unlike most loss layers, in the ContrastiveLossLayer we can backpropagate + * to the first two inputs. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return bottom_index != 2; + } + + protected: + /// @copydoc ContrastiveLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the Contrastive error gradient w.r.t. the inputs. + * + * Computes the gradients with respect to the two input vectors (bottom[0] and + * bottom[1]), but not the similarity label (bottom[2]). + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$a@f$; Backward fills their diff with + * gradients if propagate_down[0] + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$b@f$; Backward fills their diff with gradients if + * propagate_down[1] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob diff_; // cached for backward pass + Blob dist_sq_; // cached for backward pass + Blob diff_sq_; // tmp storage for gpu forward pass + Blob summer_vec_; // tmp storage for gpu forward pass +}; + +} // namespace caffe + +#endif // CAFFE_CONTRASTIVE_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/conv_layer.hpp b/include/caffe/layers/conv_layer.hpp new file mode 100755 index 0000000..93a618d --- /dev/null +++ b/include/caffe/layers/conv_layer.hpp @@ -0,0 +1,84 @@ +#ifndef CAFFE_CONV_LAYER_HPP_ +#define CAFFE_CONV_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/base_conv_layer.hpp" + +namespace caffe { + +/** + * @brief Convolves the input image with a bank of learned filters, + * and (optionally) adds biases. + * + * Caffe convolves by reduction to matrix multiplication. This achieves + * high-throughput and generality of input and filter dimensions but comes at + * the cost of memory for matrices. This makes use of efficiency in BLAS. + * + * The input is "im2col" transformed to a channel K' x H x W data matrix + * for multiplication with the N x K' x H x W filter matrix to yield a + * N' x H x W output matrix that is then "col2im" restored. K' is the + * input channel * kernel height * kernel width dimension of the unrolled + * inputs so that the im2col matrix has a column for each input region to + * be filtered. col2im restores the output spatial structure by rolling up + * the output channel N' columns of the output matrix. + */ +template +class ConvolutionLayer : public BaseConvolutionLayer { + public: + /** + * @param param provides ConvolutionParameter convolution_param, + * with ConvolutionLayer options: + * - num_output. The number of filters. + * - kernel_size / kernel_h / kernel_w. The filter dimensions, given by + * kernel_size for square filters or kernel_h and kernel_w for rectangular + * filters. + * - stride / stride_h / stride_w (\b optional, default 1). The filter + * stride, given by stride_size for equal dimensions or stride_h and stride_w + * for different strides. By default the convolution is dense with stride 1. + * - pad / pad_h / pad_w (\b optional, default 0). The zero-padding for + * convolution, given by pad for equal dimensions or pad_h and pad_w for + * different padding. Input padding is computed implicitly instead of + * actually padding. + * - dilation (\b optional, default 1). The filter + * dilation, given by dilation_size for equal dimensions for different + * dilation. By default the convolution has dilation 1. + * - group (\b optional, default 1). The number of filter groups. Group + * convolution is a method for reducing parameterization by selectively + * connecting input and output channels. The input and output channel dimensions must be divisible + * by the number of groups. For group @f$ \geq 1 @f$, the + * convolutional filters' input and output channels are separated s.t. each + * group takes 1 / group of the input channels and makes 1 / group of the + * output channels. Concretely 4 input channels, 8 output channels, and + * 2 groups separate input channels 1-2 and output channels 1-4 into the + * first group and input channels 3-4 and output channels 5-8 into the second + * group. + * - bias_term (\b optional, default true). Whether to have a bias. + * - engine: convolution has CAFFE (matrix multiplication) and CUDNN (library + * kernels + stream parallelism) engines. + */ + explicit ConvolutionLayer(const LayerParameter& param) + : BaseConvolutionLayer(param) {} + + virtual inline const char* type() const { return "Convolution"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual inline bool reverse_dimensions() { return false; } + virtual void compute_output_shape(); +}; + +} // namespace caffe + +#endif // CAFFE_CONV_LAYER_HPP_ diff --git a/include/caffe/layers/crop_layer.hpp b/include/caffe/layers/crop_layer.hpp new file mode 100755 index 0000000..c4fda12 --- /dev/null +++ b/include/caffe/layers/crop_layer.hpp @@ -0,0 +1,76 @@ +#ifndef CAFFE_CROP_LAYER_HPP_ +#define CAFFE_CROP_LAYER_HPP_ + +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Takes a Blob and crop it, to the shape specified by the second input + * Blob, across all dimensions after the specified axis. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ + +template +class CropLayer : public Layer { + public: + explicit CropLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Crop"; } + virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + vector offsets; + + private: + // Recursive copy function. + void crop_copy(const vector*>& bottom, + const vector*>& top, + const vector& offsets, + vector indices, + int cur_dim, + const Dtype* src_data, + Dtype* dest_data, + bool is_forward); + + // Recursive copy function: this is similar to crop_copy() but loops over all + // but the last two dimensions to allow for ND cropping while still relying on + // a CUDA kernel for the innermost two dimensions for performance reasons. An + // alterantive implementation could rely on the kernel more by passing + // offsets, but this is problematic because of its variable length. + // Since in the standard (N,C,W,H) case N,C are usually not cropped a speedup + // could be achieved by not looping the application of the copy_kernel around + // these dimensions. + void crop_copy_gpu(const vector*>& bottom, + const vector*>& top, + const vector& offsets, + vector indices, + int cur_dim, + const Dtype* src_data, + Dtype* dest_data, + bool is_forward); +}; +} // namespace caffe + +#endif // CAFFE_CROP_LAYER_HPP_ diff --git a/include/caffe/layers/cudnn_conv_layer.hpp b/include/caffe/layers/cudnn_conv_layer.hpp new file mode 100755 index 0000000..31fe49a --- /dev/null +++ b/include/caffe/layers/cudnn_conv_layer.hpp @@ -0,0 +1,72 @@ +#ifndef CAFFE_CUDNN_CONV_LAYER_HPP_ +#define CAFFE_CUDNN_CONV_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/conv_layer.hpp" + +namespace caffe { + +#ifdef USE_CUDNN +/* + * @brief cuDNN implementation of ConvolutionLayer. + * Fallback to ConvolutionLayer for CPU mode. + * + * cuDNN accelerates convolution through forward kernels for filtering and bias + * plus backward kernels for the gradient w.r.t. the filters, biases, and + * inputs. Caffe + cuDNN further speeds up the computation through forward + * parallelism across groups and backward parallelism across gradients. + * + * The CUDNN engine does not have memory overhead for matrix buffers. For many + * input and filter regimes the CUDNN engine is faster than the CAFFE engine, + * but for fully-convolutional models and large inputs the CAFFE engine can be + * faster as long as it fits in memory. +*/ +template +class CuDNNConvolutionLayer : public ConvolutionLayer { + public: + explicit CuDNNConvolutionLayer(const LayerParameter& param) + : ConvolutionLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNConvolutionLayer(); + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t* handle_; + cudaStream_t* stream_; + + // algorithms for forward and backwards convolutions + cudnnConvolutionFwdAlgo_t *fwd_algo_; + cudnnConvolutionBwdFilterAlgo_t *bwd_filter_algo_; + cudnnConvolutionBwdDataAlgo_t *bwd_data_algo_; + + vector bottom_descs_, top_descs_; + cudnnTensorDescriptor_t bias_desc_; + cudnnFilterDescriptor_t filter_desc_; + vector conv_descs_; + int bottom_offset_, top_offset_, bias_offset_; + + size_t *workspace_fwd_sizes_; + size_t *workspace_bwd_data_sizes_; + size_t *workspace_bwd_filter_sizes_; + size_t workspaceSizeInBytes; // size of underlying storage + void *workspaceData; // underlying storage + void **workspace; // aliases into workspaceData +}; +#endif + +} // namespace caffe + +#endif // CAFFE_CUDNN_CONV_LAYER_HPP_ diff --git a/include/caffe/layers/cudnn_lcn_layer.hpp b/include/caffe/layers/cudnn_lcn_layer.hpp new file mode 100755 index 0000000..74cf477 --- /dev/null +++ b/include/caffe/layers/cudnn_lcn_layer.hpp @@ -0,0 +1,49 @@ +#ifndef CAFFE_CUDNN_LCN_LAYER_HPP_ +#define CAFFE_CUDNN_LCN_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/lrn_layer.hpp" +#include "caffe/layers/power_layer.hpp" + +namespace caffe { + +#ifdef USE_CUDNN +template +class CuDNNLCNLayer : public LRNLayer { + public: + explicit CuDNNLCNLayer(const LayerParameter& param) + : LRNLayer(param), handles_setup_(false), tempDataSize(0), + tempData1(NULL), tempData2(NULL) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNLCNLayer(); + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t handle_; + cudnnLRNDescriptor_t norm_desc_; + cudnnTensorDescriptor_t bottom_desc_, top_desc_; + + int size_, pre_pad_; + Dtype alpha_, beta_, k_; + + size_t tempDataSize; + void *tempData1, *tempData2; +}; +#endif + +} // namespace caffe + +#endif // CAFFE_CUDNN_LCN_LAYER_HPP_ diff --git a/include/caffe/layers/cudnn_lrn_layer.hpp b/include/caffe/layers/cudnn_lrn_layer.hpp new file mode 100755 index 0000000..000ccc3 --- /dev/null +++ b/include/caffe/layers/cudnn_lrn_layer.hpp @@ -0,0 +1,44 @@ +#ifndef CAFFE_CUDNN_LRN_LAYER_HPP_ +#define CAFFE_CUDNN_LRN_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/lrn_layer.hpp" + +namespace caffe { + +#ifdef USE_CUDNN +template +class CuDNNLRNLayer : public LRNLayer { + public: + explicit CuDNNLRNLayer(const LayerParameter& param) + : LRNLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNLRNLayer(); + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t handle_; + cudnnLRNDescriptor_t norm_desc_; + cudnnTensorDescriptor_t bottom_desc_, top_desc_; + + int size_; + Dtype alpha_, beta_, k_; +}; +#endif + +} // namespace caffe + +#endif // CAFFE_CUDNN_LRN_LAYER_HPP_ diff --git a/include/caffe/layers/cudnn_pooling_layer.hpp b/include/caffe/layers/cudnn_pooling_layer.hpp new file mode 100755 index 0000000..6d0db47 --- /dev/null +++ b/include/caffe/layers/cudnn_pooling_layer.hpp @@ -0,0 +1,49 @@ +#ifndef CAFFE_CUDNN_POOLING_LAYER_HPP_ +#define CAFFE_CUDNN_POOLING_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/pooling_layer.hpp" + +namespace caffe { + +#ifdef USE_CUDNN +/* + * @brief cuDNN implementation of PoolingLayer. + * Fallback to PoolingLayer for CPU mode. +*/ +template +class CuDNNPoolingLayer : public PoolingLayer { + public: + explicit CuDNNPoolingLayer(const LayerParameter& param) + : PoolingLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNPoolingLayer(); + // Currently, cuDNN does not support the extra top blob. + virtual inline int MinTopBlobs() const { return -1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_, top_desc_; + cudnnPoolingDescriptor_t pooling_desc_; + cudnnPoolingMode_t mode_; +}; +#endif + +} // namespace caffe + +#endif // CAFFE_CUDNN_POOLING_LAYER_HPP_ diff --git a/include/caffe/layers/cudnn_relu_layer.hpp b/include/caffe/layers/cudnn_relu_layer.hpp new file mode 100755 index 0000000..a1cb29e --- /dev/null +++ b/include/caffe/layers/cudnn_relu_layer.hpp @@ -0,0 +1,46 @@ +#ifndef CAFFE_CUDNN_RELU_LAYER_HPP_ +#define CAFFE_CUDNN_RELU_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" +#include "caffe/layers/relu_layer.hpp" + +namespace caffe { + +#ifdef USE_CUDNN +/** + * @brief CuDNN acceleration of ReLULayer. + */ +template +class CuDNNReLULayer : public ReLULayer { + public: + explicit CuDNNReLULayer(const LayerParameter& param) + : ReLULayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNReLULayer(); + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_; + cudnnTensorDescriptor_t top_desc_; + cudnnActivationDescriptor_t activ_desc_; +}; +#endif + +} // namespace caffe + +#endif // CAFFE_CUDNN_RELU_LAYER_HPP_ diff --git a/include/caffe/layers/cudnn_sigmoid_layer.hpp b/include/caffe/layers/cudnn_sigmoid_layer.hpp new file mode 100755 index 0000000..7b3486f --- /dev/null +++ b/include/caffe/layers/cudnn_sigmoid_layer.hpp @@ -0,0 +1,46 @@ +#ifndef CAFFE_CUDNN_SIGMOID_LAYER_HPP_ +#define CAFFE_CUDNN_SIGMOID_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" +#include "caffe/layers/sigmoid_layer.hpp" + +namespace caffe { + +#ifdef USE_CUDNN +/** + * @brief CuDNN acceleration of SigmoidLayer. + */ +template +class CuDNNSigmoidLayer : public SigmoidLayer { + public: + explicit CuDNNSigmoidLayer(const LayerParameter& param) + : SigmoidLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNSigmoidLayer(); + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_; + cudnnTensorDescriptor_t top_desc_; + cudnnActivationDescriptor_t activ_desc_; +}; +#endif + +} // namespace caffe + +#endif // CAFFE_CUDNN_SIGMOID_LAYER_HPP_ diff --git a/include/caffe/layers/cudnn_softmax_layer.hpp b/include/caffe/layers/cudnn_softmax_layer.hpp new file mode 100755 index 0000000..174368e --- /dev/null +++ b/include/caffe/layers/cudnn_softmax_layer.hpp @@ -0,0 +1,45 @@ +#ifndef CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ +#define CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/softmax_layer.hpp" + +namespace caffe { + +#ifdef USE_CUDNN +/** + * @brief cuDNN implementation of SoftmaxLayer. + * Fallback to SoftmaxLayer for CPU mode. + */ +template +class CuDNNSoftmaxLayer : public SoftmaxLayer { + public: + explicit CuDNNSoftmaxLayer(const LayerParameter& param) + : SoftmaxLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNSoftmaxLayer(); + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_; + cudnnTensorDescriptor_t top_desc_; +}; +#endif + +} // namespace caffe + +#endif // CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ diff --git a/include/caffe/layers/cudnn_tanh_layer.hpp b/include/caffe/layers/cudnn_tanh_layer.hpp new file mode 100755 index 0000000..59e758d --- /dev/null +++ b/include/caffe/layers/cudnn_tanh_layer.hpp @@ -0,0 +1,46 @@ +#ifndef CAFFE_CUDNN_TANH_LAYER_HPP_ +#define CAFFE_CUDNN_TANH_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" +#include "caffe/layers/tanh_layer.hpp" + +namespace caffe { + +#ifdef USE_CUDNN +/** + * @brief CuDNN acceleration of TanHLayer. + */ +template +class CuDNNTanHLayer : public TanHLayer { + public: + explicit CuDNNTanHLayer(const LayerParameter& param) + : TanHLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNTanHLayer(); + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_; + cudnnTensorDescriptor_t top_desc_; + cudnnActivationDescriptor_t activ_desc_; +}; +#endif + +} // namespace caffe + +#endif // CAFFE_CUDNN_TANH_LAYER_HPP_ diff --git a/include/caffe/layers/data_layer.hpp b/include/caffe/layers/data_layer.hpp new file mode 100755 index 0000000..6c36179 --- /dev/null +++ b/include/caffe/layers/data_layer.hpp @@ -0,0 +1,39 @@ +#ifndef CAFFE_DATA_LAYER_HPP_ +#define CAFFE_DATA_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/data_reader.hpp" +#include "caffe/data_transformer.hpp" +#include "caffe/internal_thread.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/base_data_layer.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/db.hpp" + +namespace caffe { + +template +class DataLayer : public BasePrefetchingDataLayer { + public: + explicit DataLayer(const LayerParameter& param); + virtual ~DataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + // DataLayer uses DataReader instead for sharing for parallelism + virtual inline bool ShareInParallel() const { return false; } + virtual inline const char* type() const { return "Data"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return 2; } + + protected: + virtual void load_batch(Batch* batch); + + DataReader reader_; +}; + +} // namespace caffe + +#endif // CAFFE_DATA_LAYER_HPP_ diff --git a/include/caffe/layers/deconv_layer.hpp b/include/caffe/layers/deconv_layer.hpp new file mode 100755 index 0000000..23ae887 --- /dev/null +++ b/include/caffe/layers/deconv_layer.hpp @@ -0,0 +1,51 @@ +#ifndef CAFFE_DECONV_LAYER_HPP_ +#define CAFFE_DECONV_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/base_conv_layer.hpp" + +namespace caffe { + +/** + * @brief Convolve the input with a bank of learned filters, and (optionally) + * add biases, treating filters and convolution parameters in the + * opposite sense as ConvolutionLayer. + * + * ConvolutionLayer computes each output value by dotting an input window with + * a filter; DeconvolutionLayer multiplies each input value by a filter + * elementwise, and sums over the resulting output windows. In other words, + * DeconvolutionLayer is ConvolutionLayer with the forward and backward passes + * reversed. DeconvolutionLayer reuses ConvolutionParameter for its + * parameters, but they take the opposite sense as in ConvolutionLayer (so + * padding is removed from the output rather than added to the input, and + * stride results in upsampling rather than downsampling). + */ +template +class DeconvolutionLayer : public BaseConvolutionLayer { + public: + explicit DeconvolutionLayer(const LayerParameter& param) + : BaseConvolutionLayer(param) {} + + virtual inline const char* type() const { return "Deconvolution"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual inline bool reverse_dimensions() { return true; } + virtual void compute_output_shape(); +}; + +} // namespace caffe + +#endif // CAFFE_DECONV_LAYER_HPP_ diff --git a/include/caffe/layers/dropout_layer.hpp b/include/caffe/layers/dropout_layer.hpp new file mode 100755 index 0000000..e83143b --- /dev/null +++ b/include/caffe/layers/dropout_layer.hpp @@ -0,0 +1,80 @@ +#ifndef CAFFE_DROPOUT_LAYER_HPP_ +#define CAFFE_DROPOUT_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" + +namespace caffe { + +/** + * @brief During training only, sets a random portion of @f$x@f$ to 0, adjusting + * the rest of the vector magnitude accordingly. + * + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ y = |x| @f$ + */ +template +class DropoutLayer : public NeuronLayer { + public: + /** + * @param param provides DropoutParameter dropout_param, + * with DropoutLayer options: + * - dropout_ratio (\b optional, default 0.5). + * Sets the probability @f$ p @f$ that any given unit is dropped. + */ + explicit DropoutLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Dropout"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs. At training time, we have @f$ + * y_{\mbox{train}} = \left\{ + * \begin{array}{ll} + * \frac{x}{1 - p} & \mbox{if } u > p \\ + * 0 & \mbox{otherwise} + * \end{array} \right. + * @f$, where @f$ u \sim U(0, 1)@f$ is generated independently for each + * input at each iteration. At test time, we simply have + * @f$ y_{\mbox{test}} = \mathbb{E}[y_{\mbox{train}}] = x @f$. + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + /// when divided by UINT_MAX, the randomly generated values @f$u\sim U(0,1)@f$ + Blob rand_vec_; + /// the probability @f$ p @f$ of dropping any input + Dtype threshold_; + /// the scale for undropped inputs at train time @f$ 1 / (1 - p) @f$ + Dtype scale_; + unsigned int uint_thres_; +}; + +} // namespace caffe + +#endif // CAFFE_DROPOUT_LAYER_HPP_ diff --git a/include/caffe/layers/dummy_data_layer.hpp b/include/caffe/layers/dummy_data_layer.hpp new file mode 100755 index 0000000..4180f1d --- /dev/null +++ b/include/caffe/layers/dummy_data_layer.hpp @@ -0,0 +1,49 @@ +#ifndef CAFFE_DUMMY_DATA_LAYER_HPP_ +#define CAFFE_DUMMY_DATA_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Provides data to the Net generated by a Filler. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class DummyDataLayer : public Layer { + public: + explicit DummyDataLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "DummyData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + + vector > > fillers_; + vector refill_; +}; + +} // namespace caffe + +#endif // CAFFE_DUMMY_DATA_LAYER_HPP_ diff --git a/include/caffe/layers/eltwise_layer.hpp b/include/caffe/layers/eltwise_layer.hpp new file mode 100755 index 0000000..091de83 --- /dev/null +++ b/include/caffe/layers/eltwise_layer.hpp @@ -0,0 +1,51 @@ +#ifndef CAFFE_ELTWISE_LAYER_HPP_ +#define CAFFE_ELTWISE_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Compute elementwise operations, such as product and sum, + * along multiple input Blobs. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class EltwiseLayer : public Layer { + public: + explicit EltwiseLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Eltwise"; } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + EltwiseParameter_EltwiseOp op_; + vector coeffs_; + Blob max_idx_; + + bool stable_prod_grad_; +}; + +} // namespace caffe + +#endif // CAFFE_ELTWISE_LAYER_HPP_ diff --git a/include/caffe/layers/elu_layer.hpp b/include/caffe/layers/elu_layer.hpp new file mode 100755 index 0000000..0796e89 --- /dev/null +++ b/include/caffe/layers/elu_layer.hpp @@ -0,0 +1,86 @@ +#ifndef CAFFE_ELU_LAYER_HPP_ +#define CAFFE_ELU_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" + +namespace caffe { + +/** + * @brief Exponential Linear Unit non-linearity @f$ + * y = \left\{ + * \begin{array}{lr} + * x & \mathrm{if} \; x > 0 \\ + * \alpha (\exp(x)-1) & \mathrm{if} \; x \le 0 + * \end{array} \right. + * @f$. + */ +template +class ELULayer : public NeuronLayer { + public: + /** + * @param param provides ELUParameter elu_param, + * with ELULayer options: + * - alpha (\b optional, default 1). + * the value @f$ \alpha @f$ by which controls saturation for negative inputs. + */ + explicit ELULayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual inline const char* type() const { return "ELU"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = \left\{ + * \begin{array}{lr} + * x & \mathrm{if} \; x > 0 \\ + * \alpha (\exp(x)-1) & \mathrm{if} \; x \le 0 + * \end{array} \right. + * @f$. + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the ELU inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} = \left\{ + * \begin{array}{lr} + * 1 & \mathrm{if} \; x > 0 \\ + * y + \alpha & \mathrm{if} \; x \le 0 + * \end{array} \right. + * @f$ if propagate_down[0]. + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + + +} // namespace caffe + +#endif // CAFFE_ELU_LAYER_HPP_ diff --git a/include/caffe/layers/embed_layer.hpp b/include/caffe/layers/embed_layer.hpp new file mode 100755 index 0000000..36137a6 --- /dev/null +++ b/include/caffe/layers/embed_layer.hpp @@ -0,0 +1,52 @@ +#ifndef CAFFE_EMBED_LAYER_HPP_ +#define CAFFE_EMBED_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief A layer for learning "embeddings" of one-hot vector input. + * Equivalent to an InnerProductLayer with one-hot vectors as input, but + * for efficiency the input is the "hot" index of each column itself. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class EmbedLayer : public Layer { + public: + explicit EmbedLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Embed"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int M_; + int K_; + int N_; + bool bias_term_; + Blob bias_multiplier_; +}; + +} // namespace caffe + +#endif // CAFFE_EMBED_LAYER_HPP_ diff --git a/include/caffe/layers/euclidean_loss_layer.hpp b/include/caffe/layers/euclidean_loss_layer.hpp new file mode 100755 index 0000000..f564569 --- /dev/null +++ b/include/caffe/layers/euclidean_loss_layer.hpp @@ -0,0 +1,107 @@ +#ifndef CAFFE_EUCLIDEAN_LOSS_LAYER_HPP_ +#define CAFFE_EUCLIDEAN_LOSS_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/loss_layer.hpp" + +namespace caffe { + +/** + * @brief Computes the Euclidean (L2) loss @f$ + * E = \frac{1}{2N} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n + * \right| \right|_2^2 @f$ for real-valued regression tasks. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{y} \in [-\infty, +\infty]@f$ + * -# @f$ (N \times C \times H \times W) @f$ + * the targets @f$ y \in [-\infty, +\infty]@f$ + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed Euclidean loss: @f$ E = + * \frac{1}{2n} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n + * \right| \right|_2^2 @f$ + * + * This can be used for least-squares regression tasks. An InnerProductLayer + * input to a EuclideanLossLayer exactly formulates a linear least squares + * regression problem. With non-zero weight decay the problem becomes one of + * ridge regression -- see src/caffe/test/test_sgd_solver.cpp for a concrete + * example wherein we check that the gradients computed for a Net with exactly + * this structure match hand-computed gradient formulas for ridge regression. + * + * (Note: Caffe, and SGD in general, is certainly \b not the best way to solve + * linear least squares problems! We use it only as an instructive example.) + */ +template +class EuclideanLossLayer : public LossLayer { + public: + explicit EuclideanLossLayer(const LayerParameter& param) + : LossLayer(param), diff_() {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "EuclideanLoss"; } + /** + * Unlike most loss layers, in the EuclideanLossLayer we can backpropagate + * to both inputs -- override to return true and always allow force_backward. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return true; + } + + protected: + /// @copydoc EuclideanLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the Euclidean error gradient w.r.t. the inputs. + * + * Unlike other children of LossLayer, EuclideanLossLayer \b can compute + * gradients with respect to the label inputs bottom[1] (but still only will + * if propagate_down[1] is set, due to being produced by learnable parameters + * or if force_backward is set). In fact, this layer is "commutative" -- the + * result is the same regardless of the order of the two bottoms. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$\hat{y}@f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial \hat{y}} = + * \frac{1}{n} \sum\limits_{n=1}^N (\hat{y}_n - y_n) + * @f$ if propagate_down[0] + * -# @f$ (N \times C \times H \times W) @f$ + * the targets @f$y@f$; Backward fills their diff with gradients + * @f$ \frac{\partial E}{\partial y} = + * \frac{1}{n} \sum\limits_{n=1}^N (y_n - \hat{y}_n) + * @f$ if propagate_down[1] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob diff_; +}; + +} // namespace caffe + +#endif // CAFFE_EUCLIDEAN_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/exp_layer.hpp b/include/caffe/layers/exp_layer.hpp new file mode 100755 index 0000000..9fc8c39 --- /dev/null +++ b/include/caffe/layers/exp_layer.hpp @@ -0,0 +1,80 @@ +#ifndef CAFFE_EXP_LAYER_HPP_ +#define CAFFE_EXP_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" + +namespace caffe { + +/** + * @brief Computes @f$ y = \gamma ^ {\alpha x + \beta} @f$, + * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, + * and base @f$ \gamma @f$. + */ +template +class ExpLayer : public NeuronLayer { + public: + /** + * @param param provides ExpParameter exp_param, + * with ExpLayer options: + * - scale (\b optional, default 1) the scale @f$ \alpha @f$ + * - shift (\b optional, default 0) the shift @f$ \beta @f$ + * - base (\b optional, default -1 for a value of @f$ e \approx 2.718 @f$) + * the base @f$ \gamma @f$ + */ + explicit ExpLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Exp"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = \gamma ^ {\alpha x + \beta} + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the exp inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} = + * \frac{\partial E}{\partial y} y \alpha \log_e(gamma) + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Dtype inner_scale_, outer_scale_; +}; + +} // namespace caffe + +#endif // CAFFE_EXP_LAYER_HPP_ diff --git a/include/caffe/layers/filter_layer.hpp b/include/caffe/layers/filter_layer.hpp new file mode 100755 index 0000000..e040e66 --- /dev/null +++ b/include/caffe/layers/filter_layer.hpp @@ -0,0 +1,77 @@ +#ifndef CAFFE_FILTER_LAYER_HPP_ +#define CAFFE_FILTER_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Takes two+ Blobs, interprets last Blob as a selector and + * filter remaining Blobs accordingly with selector data (0 means that + * the corresponding item has to be filtered, non-zero means that corresponding + * item needs to stay). + */ +template +class FilterLayer : public Layer { + public: + explicit FilterLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Filter"; } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + /** + * @param bottom input Blob vector (length 2+) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs to be filtered @f$ x_1 @f$ + * -# ... + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs to be filtered @f$ x_K @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the selector blob + * @param top output Blob vector (length 1+) + * -# @f$ (S \times C \times H \times W) @f$ () + * the filtered output @f$ x_1 @f$ + * where S is the number of items + * that haven't been filtered + * @f$ (S \times C \times H \times W) @f$ + * the filtered output @f$ x_K @f$ + * where S is the number of items + * that haven't been filtered + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the forwarded inputs. + * + * @param top output Blob vector (length 1+), providing the error gradient with + * respect to the outputs + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2+), into which the top error + * gradient is copied + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool first_reshape_; + vector indices_to_forward_; +}; + +} // namespace caffe + +#endif // CAFFE_FILTER_LAYER_HPP_ diff --git a/include/caffe/layers/flatten_layer.hpp b/include/caffe/layers/flatten_layer.hpp new file mode 100755 index 0000000..e494bbb --- /dev/null +++ b/include/caffe/layers/flatten_layer.hpp @@ -0,0 +1,61 @@ +#ifndef CAFFE_FLATTEN_LAYER_HPP_ +#define CAFFE_FLATTEN_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Reshapes the input Blob into flat vectors. + * + * Note: because this layer does not change the input values -- merely the + * dimensions -- it can simply copy the input. The copy happens "virtually" + * (thus taking effectively 0 real time) by setting, in Forward, the data + * pointer of the top Blob to that of the bottom Blob (see Blob::ShareData), + * and in Backward, the diff pointer of the bottom Blob to that of the top Blob + * (see Blob::ShareDiff). + */ +template +class FlattenLayer : public Layer { + public: + explicit FlattenLayer(const LayerParameter& param) + : Layer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Flatten"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + /** + * @param bottom input Blob vector (length 2+) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs + * @param top output Blob vector (length 1) + * -# @f$ (N \times CHW \times 1 \times 1) @f$ + * the outputs -- i.e., the (virtually) copied, flattened inputs + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the concatenate inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length K), into which the top error + * gradient is (virtually) copied + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +} // namespace caffe + +#endif // CAFFE_FLATTEN_LAYER_HPP_ diff --git a/include/caffe/layers/hdf5_data_layer.hpp b/include/caffe/layers/hdf5_data_layer.hpp new file mode 100755 index 0000000..b04cf8e --- /dev/null +++ b/include/caffe/layers/hdf5_data_layer.hpp @@ -0,0 +1,62 @@ +#ifndef CAFFE_HDF5_DATA_LAYER_HPP_ +#define CAFFE_HDF5_DATA_LAYER_HPP_ + +#include "hdf5.h" + +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/base_data_layer.hpp" + +namespace caffe { + +/** + * @brief Provides data to the Net from HDF5 files. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class HDF5DataLayer : public Layer { + public: + explicit HDF5DataLayer(const LayerParameter& param) + : Layer(param) {} + virtual ~HDF5DataLayer(); + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "HDF5Data"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void LoadHDF5FileData(const char* filename); + + std::vector hdf_filenames_; + unsigned int num_files_; + unsigned int current_file_; + hsize_t current_row_; + std::vector > > hdf_blobs_; + std::vector data_permutation_; + std::vector file_permutation_; +}; + +} // namespace caffe + +#endif // CAFFE_HDF5_DATA_LAYER_HPP_ diff --git a/include/caffe/layers/hdf5_output_layer.hpp b/include/caffe/layers/hdf5_output_layer.hpp new file mode 100755 index 0000000..487d08f --- /dev/null +++ b/include/caffe/layers/hdf5_output_layer.hpp @@ -0,0 +1,64 @@ +#ifndef CAFFE_HDF5_OUTPUT_LAYER_HPP_ +#define CAFFE_HDF5_OUTPUT_LAYER_HPP_ + +#include "hdf5.h" + +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +#define HDF5_DATA_DATASET_NAME "data" +#define HDF5_DATA_LABEL_NAME "label" + +/** + * @brief Write blobs to disk as HDF5 files. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class HDF5OutputLayer : public Layer { + public: + explicit HDF5OutputLayer(const LayerParameter& param) + : Layer(param), file_opened_(false) {} + virtual ~HDF5OutputLayer(); + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "HDF5Output"; } + // TODO: no limit on the number of blobs + virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 0; } + + inline std::string file_name() const { return file_name_; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void SaveBlobs(); + + bool file_opened_; + std::string file_name_; + hid_t file_id_; + Blob data_blob_; + Blob label_blob_; +}; + +} // namespace caffe + +#endif // CAFFE_HDF5_OUTPUT_LAYER_HPP_ diff --git a/include/caffe/layers/hinge_loss_layer.hpp b/include/caffe/layers/hinge_loss_layer.hpp new file mode 100755 index 0000000..54e42bd --- /dev/null +++ b/include/caffe/layers/hinge_loss_layer.hpp @@ -0,0 +1,104 @@ +#ifndef CAFFE_HINGE_LOSS_LAYER_HPP_ +#define CAFFE_HINGE_LOSS_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/loss_layer.hpp" + +namespace caffe { + +/** + * @brief Computes the hinge loss for a one-of-many classification task. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ t @f$, a Blob with values in + * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of + * the @f$ K = CHW @f$ classes. In an SVM, @f$ t @f$ is the result of + * taking the inner product @f$ X^T W @f$ of the D-dimensional features + * @f$ X \in \mathcal{R}^{D \times N} @f$ and the learned hyperplane + * parameters @f$ W \in \mathcal{R}^{D \times K} @f$, so a Net with just + * an InnerProductLayer (with num_output = D) providing predictions to a + * HingeLossLayer and no other learnable parameters or losses is + * equivalent to an SVM. + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed hinge loss: @f$ E = + * \frac{1}{N} \sum\limits_{n=1}^N \sum\limits_{k=1}^K + * [\max(0, 1 - \delta\{l_n = k\} t_{nk})] ^ p + * @f$, for the @f$ L^p @f$ norm + * (defaults to @f$ p = 1 @f$, the L1 norm; L2 norm, as in L2-SVM, + * is also available), and @f$ + * \delta\{\mathrm{condition}\} = \left\{ + * \begin{array}{lr} + * 1 & \mbox{if condition} \\ + * -1 & \mbox{otherwise} + * \end{array} \right. + * @f$ + * + * In an SVM, @f$ t \in \mathcal{R}^{N \times K} @f$ is the result of taking + * the inner product @f$ X^T W @f$ of the features + * @f$ X \in \mathcal{R}^{D \times N} @f$ + * and the learned hyperplane parameters + * @f$ W \in \mathcal{R}^{D \times K} @f$. So, a Net with just an + * InnerProductLayer (with num_output = @f$k@f$) providing predictions to a + * HingeLossLayer is equivalent to an SVM (assuming it has no other learned + * outside the InnerProductLayer and no other losses outside the + * HingeLossLayer). + */ +template +class HingeLossLayer : public LossLayer { + public: + explicit HingeLossLayer(const LayerParameter& param) + : LossLayer(param) {} + + virtual inline const char* type() const { return "HingeLoss"; } + + protected: + /// @copydoc HingeLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the hinge loss error gradient w.r.t. the predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$t@f$; Backward computes diff + * @f$ \frac{\partial E}{\partial t} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + + +} // namespace caffe + +#endif // CAFFE_HINGE_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/im2col_layer.hpp b/include/caffe/layers/im2col_layer.hpp new file mode 100755 index 0000000..71e32f7 --- /dev/null +++ b/include/caffe/layers/im2col_layer.hpp @@ -0,0 +1,65 @@ +#ifndef CAFFE_IM2COL_LAYER_HPP_ +#define CAFFE_IM2COL_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief A helper for image operations that rearranges image regions into + * column vectors. Used by ConvolutionLayer to perform convolution + * by matrix multiplication. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class Im2colLayer : public Layer { + public: + explicit Im2colLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Im2col"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + /// @brief The spatial dimensions of a filter kernel. + Blob kernel_shape_; + /// @brief The spatial dimensions of the stride. + Blob stride_; + /// @brief The spatial dimensions of the padding. + Blob pad_; + /// @brief The spatial dimensions of the dilation. + Blob dilation_; + + int num_spatial_axes_; + int bottom_dim_; + int top_dim_; + + int channel_axis_; + int num_; + int channels_; + + bool force_nd_im2col_; +}; + +} // namespace caffe + +#endif // CAFFE_IM2COL_LAYER_HPP_ diff --git a/include/caffe/layers/image_data_layer.hpp b/include/caffe/layers/image_data_layer.hpp new file mode 100755 index 0000000..a0d3384 --- /dev/null +++ b/include/caffe/layers/image_data_layer.hpp @@ -0,0 +1,47 @@ +#ifndef CAFFE_IMAGE_DATA_LAYER_HPP_ +#define CAFFE_IMAGE_DATA_LAYER_HPP_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/data_transformer.hpp" +#include "caffe/internal_thread.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/base_data_layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Provides data to the Net from image files. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class ImageDataLayer : public BasePrefetchingDataLayer { + public: + explicit ImageDataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param) {} + virtual ~ImageDataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "ImageData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + protected: + shared_ptr prefetch_rng_; + virtual void ShuffleImages(); + virtual void load_batch(Batch* batch); + + vector > lines_; + int lines_id_; +}; + + +} // namespace caffe + +#endif // CAFFE_IMAGE_DATA_LAYER_HPP_ diff --git a/include/caffe/layers/infogain_loss_layer.hpp b/include/caffe/layers/infogain_loss_layer.hpp new file mode 100755 index 0000000..633f339 --- /dev/null +++ b/include/caffe/layers/infogain_loss_layer.hpp @@ -0,0 +1,110 @@ +#ifndef CAFFE_INFOGAIN_LOSS_LAYER_HPP_ +#define CAFFE_INFOGAIN_LOSS_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/loss_layer.hpp" + +namespace caffe { + +/** + * @brief A generalization of MultinomialLogisticLossLayer that takes an + * "information gain" (infogain) matrix specifying the "value" of all label + * pairs. + * + * Equivalent to the MultinomialLogisticLossLayer if the infogain matrix is the + * identity. + * + * @param bottom input Blob vector (length 2-3) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$, a Blob with values in + * @f$ [0, 1] @f$ indicating the predicted probability of each of the + * @f$ K = CHW @f$ classes. Each prediction vector @f$ \hat{p}_n @f$ + * should sum to 1 as in a probability distribution: @f$ + * \forall n \sum\limits_{k=1}^K \hat{p}_{nk} = 1 @f$. + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * -# @f$ (1 \times 1 \times K \times K) @f$ + * (\b optional) the infogain matrix @f$ H @f$. This must be provided as + * the third bottom blob input if not provided as the infogain_mat in the + * InfogainLossParameter. If @f$ H = I @f$, this layer is equivalent to the + * MultinomialLogisticLossLayer. + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed infogain multinomial logistic loss: @f$ E = + * \frac{-1}{N} \sum\limits_{n=1}^N H_{l_n} \log(\hat{p}_n) = + * \frac{-1}{N} \sum\limits_{n=1}^N \sum\limits_{k=1}^{K} H_{l_n,k} + * \log(\hat{p}_{n,k}) + * @f$, where @f$ H_{l_n} @f$ denotes row @f$l_n@f$ of @f$H@f$. + */ +template +class InfogainLossLayer : public LossLayer { + public: + explicit InfogainLossLayer(const LayerParameter& param) + : LossLayer(param), infogain_() {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + // InfogainLossLayer takes 2-3 bottom Blobs; if there are 3 the third should + // be the infogain matrix. (Otherwise the infogain matrix is loaded from a + // file specified by LayerParameter.) + virtual inline int ExactNumBottomBlobs() const { return -1; } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int MaxBottomBlobs() const { return 3; } + + virtual inline const char* type() const { return "InfogainLoss"; } + + protected: + /// @copydoc InfogainLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the infogain loss error gradient w.r.t. the predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. (The same applies to the infogain matrix, if + * provided as bottom[2] rather than in the layer_param.) + * + * @param top output Blob vector (length 1), providing the error gradient + * with respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels (similarly for propagate_down[2] and the + * infogain matrix, if provided as bottom[2]) + * @param bottom input Blob vector (length 2-3) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$; Backward computes diff + * @f$ \frac{\partial E}{\partial \hat{p}} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + * -# @f$ (1 \times 1 \times K \times K) @f$ + * (\b optional) the information gain matrix -- ignored as its error + * gradient computation is not implemented. + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob infogain_; +}; + +} // namespace caffe + +#endif // CAFFE_INFOGAIN_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/inner_product_layer.hpp b/include/caffe/layers/inner_product_layer.hpp new file mode 100755 index 0000000..18d0d61 --- /dev/null +++ b/include/caffe/layers/inner_product_layer.hpp @@ -0,0 +1,52 @@ +#ifndef CAFFE_INNER_PRODUCT_LAYER_HPP_ +#define CAFFE_INNER_PRODUCT_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Also known as a "fully-connected" layer, computes an inner product + * with a set of learned weights, and (optionally) adds biases. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class InnerProductLayer : public Layer { + public: + explicit InnerProductLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "InnerProduct"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int M_; + int K_; + int N_; + bool bias_term_; + Blob bias_multiplier_; + bool transpose_; ///< if true, assume transposed weights +}; + +} // namespace caffe + +#endif // CAFFE_INNER_PRODUCT_LAYER_HPP_ diff --git a/include/caffe/layers/input_layer.hpp b/include/caffe/layers/input_layer.hpp new file mode 100755 index 0000000..f447267 --- /dev/null +++ b/include/caffe/layers/input_layer.hpp @@ -0,0 +1,44 @@ +#ifndef CAFFE_INPUT_LAYER_HPP_ +#define CAFFE_INPUT_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Provides data to the Net by assigning tops directly. + * + * This data layer is a container that merely holds the data assigned to it; + * forward, backward, and reshape are all no-ops. + */ +template +class InputLayer : public Layer { + public: + explicit InputLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "Input"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top) {} + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} +}; + +} // namespace caffe + +#endif // CAFFE_INPUT_LAYER_HPP_ diff --git a/include/caffe/layers/log_layer.hpp b/include/caffe/layers/log_layer.hpp new file mode 100755 index 0000000..7d037d2 --- /dev/null +++ b/include/caffe/layers/log_layer.hpp @@ -0,0 +1,82 @@ +#ifndef CAFFE_LOG_LAYER_HPP_ +#define CAFFE_LOG_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" + +namespace caffe { + +/** + * @brief Computes @f$ y = log_{\gamma}(\alpha x + \beta) @f$, + * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, + * and base @f$ \gamma @f$. + */ +template +class LogLayer : public NeuronLayer { + public: + /** + * @param param provides LogParameter log_param, + * with LogLayer options: + * - scale (\b optional, default 1) the scale @f$ \alpha @f$ + * - shift (\b optional, default 0) the shift @f$ \beta @f$ + * - base (\b optional, default -1 for a value of @f$ e \approx 2.718 @f$) + * the base @f$ \gamma @f$ + */ + explicit LogLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Log"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = log_{\gamma}(\alpha x + \beta) + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the exp inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} = + * \frac{\partial E}{\partial y} y \alpha \log_e(gamma) + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Dtype base_scale_; + Dtype input_scale_, input_shift_; + Dtype backward_num_scale_; +}; + +} // namespace caffe + +#endif // CAFFE_LOG_LAYER_HPP_ diff --git a/include/caffe/layers/loss_layer.hpp b/include/caffe/layers/loss_layer.hpp new file mode 100755 index 0000000..dbdf612 --- /dev/null +++ b/include/caffe/layers/loss_layer.hpp @@ -0,0 +1,53 @@ +#ifndef CAFFE_LOSS_LAYER_HPP_ +#define CAFFE_LOSS_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +const float kLOG_THRESHOLD = 1e-20; + +/** + * @brief An interface for Layer%s that take two Blob%s as input -- usually + * (1) predictions and (2) ground-truth labels -- and output a + * singleton Blob representing the loss. + * + * LossLayers are typically only capable of backpropagating to their first input + * -- the predictions. + */ +template +class LossLayer : public Layer { + public: + explicit LossLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp( + const vector*>& bottom, const vector*>& top); + virtual void Reshape( + const vector*>& bottom, const vector*>& top); + + virtual inline int ExactNumBottomBlobs() const { return 2; } + + /** + * @brief For convenience and backwards compatibility, instruct the Net to + * automatically allocate a single top Blob for LossLayers, into which + * they output their singleton loss, (even if the user didn't specify + * one in the prototxt, etc.). + */ + virtual inline bool AutoTopBlobs() const { return true; } + virtual inline int ExactNumTopBlobs() const { return 1; } + /** + * We usually cannot backpropagate to the labels; ignore force_backward for + * these inputs. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return bottom_index != 1; + } +}; + +} // namespace caffe + +#endif // CAFFE_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/lrn_layer.hpp b/include/caffe/layers/lrn_layer.hpp new file mode 100755 index 0000000..06cf71a --- /dev/null +++ b/include/caffe/layers/lrn_layer.hpp @@ -0,0 +1,94 @@ +#ifndef CAFFE_LRN_LAYER_HPP_ +#define CAFFE_LRN_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/eltwise_layer.hpp" +#include "caffe/layers/pooling_layer.hpp" +#include "caffe/layers/power_layer.hpp" +#include "caffe/layers/split_layer.hpp" + +namespace caffe { + +/** + * @brief Normalize the input in a local region across or within feature maps. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class LRNLayer : public Layer { + public: + explicit LRNLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "LRN"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + virtual void CrossChannelForward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void CrossChannelForward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void WithinChannelForward(const vector*>& bottom, + const vector*>& top); + virtual void CrossChannelBackward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void CrossChannelBackward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void WithinChannelBackward(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int size_; + int pre_pad_; + Dtype alpha_; + Dtype beta_; + Dtype k_; + int num_; + int channels_; + int height_; + int width_; + + // Fields used for normalization ACROSS_CHANNELS + // scale_ stores the intermediate summing results + Blob scale_; + + // Fields used for normalization WITHIN_CHANNEL + shared_ptr > split_layer_; + vector*> split_top_vec_; + shared_ptr > square_layer_; + Blob square_input_; + Blob square_output_; + vector*> square_bottom_vec_; + vector*> square_top_vec_; + shared_ptr > pool_layer_; + Blob pool_output_; + vector*> pool_top_vec_; + shared_ptr > power_layer_; + Blob power_output_; + vector*> power_top_vec_; + shared_ptr > product_layer_; + Blob product_input_; + vector*> product_bottom_vec_; +}; + +} // namespace caffe + +#endif // CAFFE_LRN_LAYER_HPP_ diff --git a/include/caffe/layers/lstm_layer.hpp b/include/caffe/layers/lstm_layer.hpp new file mode 100755 index 0000000..a0e67c9 --- /dev/null +++ b/include/caffe/layers/lstm_layer.hpp @@ -0,0 +1,154 @@ +#ifndef CAFFE_LSTM_LAYER_HPP_ +#define CAFFE_LSTM_LAYER_HPP_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/recurrent_layer.hpp" +#include "caffe/net.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +template class RecurrentLayer; + +/** + * @brief Processes sequential inputs using a "Long Short-Term Memory" (LSTM) + * [1] style recurrent neural network (RNN). Implemented by unrolling + * the LSTM computation through time. + * + * The specific architecture used in this implementation is as described in + * "Learning to Execute" [2], reproduced below: + * i_t := \sigmoid[ W_{hi} * h_{t-1} + W_{xi} * x_t + b_i ] + * f_t := \sigmoid[ W_{hf} * h_{t-1} + W_{xf} * x_t + b_f ] + * o_t := \sigmoid[ W_{ho} * h_{t-1} + W_{xo} * x_t + b_o ] + * g_t := \tanh[ W_{hg} * h_{t-1} + W_{xg} * x_t + b_g ] + * c_t := (f_t .* c_{t-1}) + (i_t .* g_t) + * h_t := o_t .* \tanh[c_t] + * In the implementation, the i, f, o, and g computations are performed as a + * single inner product. + * + * Notably, this implementation lacks the "diagonal" gates, as used in the + * LSTM architectures described by Alex Graves [3] and others. + * + * [1] Hochreiter, Sepp, and Schmidhuber, Jürgen. "Long short-term memory." + * Neural Computation 9, no. 8 (1997): 1735-1780. + * + * [2] Zaremba, Wojciech, and Sutskever, Ilya. "Learning to execute." + * arXiv preprint arXiv:1410.4615 (2014). + * + * [3] Graves, Alex. "Generating sequences with recurrent neural networks." + * arXiv preprint arXiv:1308.0850 (2013). + */ +template +class LSTMLayer : public RecurrentLayer { + public: + explicit LSTMLayer(const LayerParameter& param) + : RecurrentLayer(param) {} + + virtual inline const char* type() const { return "LSTM"; } + + protected: + virtual void FillUnrolledNet(NetParameter* net_param) const; + virtual void RecurrentInputBlobNames(vector* names) const; + virtual void RecurrentOutputBlobNames(vector* names) const; + virtual void RecurrentInputShapes(vector* shapes) const; + virtual void OutputBlobNames(vector* names) const; +}; + +/** + * @brief A helper for LSTMLayer: computes a single timestep of the + * non-linearity of the LSTM, producing the updated cell and hidden + * states. + */ +template +class LSTMUnitLayer : public Layer { + public: + explicit LSTMUnitLayer(const LayerParameter& param) + : Layer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "LSTMUnit"; } + virtual inline int ExactNumBottomBlobs() const { return 3; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + virtual inline bool AllowForceBackward(const int bottom_index) const { + // Can't propagate to sequence continuation indicators. + return bottom_index != 2; + } + + protected: + /** + * @param bottom input Blob vector (length 3) + * -# @f$ (1 \times N \times D) @f$ + * the previous timestep cell state @f$ c_{t-1} @f$ + * -# @f$ (1 \times N \times 4D) @f$ + * the "gate inputs" @f$ [i_t', f_t', o_t', g_t'] @f$ + * -# @f$ (1 \times N) @f$ + * the sequence continuation indicators @f$ \delta_t @f$ + * @param top output Blob vector (length 2) + * -# @f$ (1 \times N \times D) @f$ + * the updated cell state @f$ c_t @f$, computed as: + * i_t := \sigmoid[i_t'] + * f_t := \sigmoid[f_t'] + * o_t := \sigmoid[o_t'] + * g_t := \tanh[g_t'] + * c_t := cont_t * (f_t .* c_{t-1}) + (i_t .* g_t) + * -# @f$ (1 \times N \times D) @f$ + * the updated hidden state @f$ h_t @f$, computed as: + * h_t := o_t .* \tanh[c_t] + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the LSTMUnit inputs. + * + * @param top output Blob vector (length 2), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times N \times D) @f$: + * containing error gradients @f$ \frac{\partial E}{\partial c_t} @f$ + * with respect to the updated cell state @f$ c_t @f$ + * -# @f$ (1 \times N \times D) @f$: + * containing error gradients @f$ \frac{\partial E}{\partial h_t} @f$ + * with respect to the updated cell state @f$ h_t @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 3), into which the error gradients + * with respect to the LSTMUnit inputs @f$ c_{t-1} @f$ and the gate + * inputs are computed. Computatation of the error gradients w.r.t. + * the sequence indicators is not implemented. + * -# @f$ (1 \times N \times D) @f$ + * the error gradient w.r.t. the previous timestep cell state + * @f$ c_{t-1} @f$ + * -# @f$ (1 \times N \times 4D) @f$ + * the error gradient w.r.t. the "gate inputs" + * @f$ [ + * \frac{\partial E}{\partial i_t} + * \frac{\partial E}{\partial f_t} + * \frac{\partial E}{\partial o_t} + * \frac{\partial E}{\partial g_t} + * ] @f$ + * -# @f$ (1 \times 1 \times N) @f$ + * the gradient w.r.t. the sequence continuation indicators + * @f$ \delta_t @f$ is currently not computed. + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + /// @brief The hidden and output dimension. + int hidden_dim_; + Blob X_acts_; +}; + +} // namespace caffe + +#endif // CAFFE_LSTM_LAYER_HPP_ diff --git a/include/caffe/layers/memory_data_layer.hpp b/include/caffe/layers/memory_data_layer.hpp new file mode 100755 index 0000000..8abcc8c --- /dev/null +++ b/include/caffe/layers/memory_data_layer.hpp @@ -0,0 +1,63 @@ +#ifndef CAFFE_MEMORY_DATA_LAYER_HPP_ +#define CAFFE_MEMORY_DATA_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/base_data_layer.hpp" + +namespace caffe { + +/** + * @brief Provides data to the Net from memory. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class MemoryDataLayer : public BaseDataLayer { + public: + explicit MemoryDataLayer(const LayerParameter& param) + : BaseDataLayer(param), has_new_data_(false) {} + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "MemoryData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + virtual void AddDatumVector(const vector& datum_vector); +#ifdef USE_OPENCV + virtual void AddMatVector(const vector& mat_vector, + const vector& labels); +#endif // USE_OPENCV + + // Reset should accept const pointers, but can't, because the memory + // will be given to Blob, which is mutable + void Reset(Dtype* data, Dtype* label, int n); + void set_batch_size(int new_size); + + int batch_size() { return batch_size_; } + int channels() { return channels_; } + int height() { return height_; } + int width() { return width_; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + int batch_size_, channels_, height_, width_, size_; + Dtype* data_; + Dtype* labels_; + int n_; + size_t pos_; + Blob added_data_; + Blob added_label_; + bool has_new_data_; +}; + +} // namespace caffe + +#endif // CAFFE_MEMORY_DATA_LAYER_HPP_ diff --git a/include/caffe/layers/multinomial_logistic_loss_layer.hpp b/include/caffe/layers/multinomial_logistic_loss_layer.hpp new file mode 100755 index 0000000..3977cf9 --- /dev/null +++ b/include/caffe/layers/multinomial_logistic_loss_layer.hpp @@ -0,0 +1,92 @@ +#ifndef CAFFE_MULTINOMIAL_LOGISTIC_LOSS_LAYER_HPP_ +#define CAFFE_MULTINOMIAL_LOGISTIC_LOSS_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/loss_layer.hpp" + +namespace caffe { + +/** + * @brief Computes the multinomial logistic loss for a one-of-many + * classification task, directly taking a predicted probability + * distribution as input. + * + * When predictions are not already a probability distribution, you should + * instead use the SoftmaxWithLossLayer, which maps predictions to a + * distribution using the SoftmaxLayer, before computing the multinomial + * logistic loss. The SoftmaxWithLossLayer should be preferred over separate + * SoftmaxLayer + MultinomialLogisticLossLayer + * as its gradient computation is more numerically stable. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$, a Blob with values in + * @f$ [0, 1] @f$ indicating the predicted probability of each of the + * @f$ K = CHW @f$ classes. Each prediction vector @f$ \hat{p}_n @f$ + * should sum to 1 as in a probability distribution: @f$ + * \forall n \sum\limits_{k=1}^K \hat{p}_{nk} = 1 @f$. + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed multinomial logistic loss: @f$ E = + * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) + * @f$ + */ +template +class MultinomialLogisticLossLayer : public LossLayer { + public: + explicit MultinomialLogisticLossLayer(const LayerParameter& param) + : LossLayer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "MultinomialLogisticLoss"; } + + protected: + /// @copydoc MultinomialLogisticLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the multinomial logistic loss error gradient w.r.t. the + * predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$; Backward computes diff + * @f$ \frac{\partial E}{\partial \hat{p}} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +} // namespace caffe + +#endif // CAFFE_MULTINOMIAL_LOGISTIC_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/mvn_layer.hpp b/include/caffe/layers/mvn_layer.hpp new file mode 100755 index 0000000..3a235ce --- /dev/null +++ b/include/caffe/layers/mvn_layer.hpp @@ -0,0 +1,48 @@ +#ifndef CAFFE_MVN_LAYER_HPP_ +#define CAFFE_MVN_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Normalizes the input to have 0-mean and/or unit (1) variance. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class MVNLayer : public Layer { + public: + explicit MVNLayer(const LayerParameter& param) + : Layer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "MVN"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob mean_, variance_, temp_; + + /// sum_multiplier is used to carry out sum using BLAS + Blob sum_multiplier_; + Dtype eps_; +}; + +} // namespace caffe + +#endif // CAFFE_MVN_LAYER_HPP_ diff --git a/include/caffe/layers/neuron_layer.hpp b/include/caffe/layers/neuron_layer.hpp new file mode 100755 index 0000000..10c108c --- /dev/null +++ b/include/caffe/layers/neuron_layer.hpp @@ -0,0 +1,32 @@ +#ifndef CAFFE_NEURON_LAYER_HPP_ +#define CAFFE_NEURON_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief An interface for layers that take one blob as input (@f$ x @f$) + * and produce one equally-sized blob as output (@f$ y @f$), where + * each element of the output depends only on the corresponding input + * element. + */ +template +class NeuronLayer : public Layer { + public: + explicit NeuronLayer(const LayerParameter& param) + : Layer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } +}; + +} // namespace caffe + +#endif // CAFFE_NEURON_LAYER_HPP_ diff --git a/include/caffe/layers/parameter_layer.hpp b/include/caffe/layers/parameter_layer.hpp new file mode 100755 index 0000000..188b92a --- /dev/null +++ b/include/caffe/layers/parameter_layer.hpp @@ -0,0 +1,45 @@ +#ifndef CAFFE_PARAMETER_LAYER_HPP_ +#define CAFFE_PARAMETER_LAYER_HPP_ + +#include + +#include "caffe/layer.hpp" + +namespace caffe { + +template +class ParameterLayer : public Layer { + public: + explicit ParameterLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top) { + if (this->blobs_.size() > 0) { + LOG(INFO) << "Skipping parameter initialization"; + } else { + this->blobs_.resize(1); + this->blobs_[0].reset(new Blob()); + this->blobs_[0]->Reshape(this->layer_param_.parameter_param().shape()); + } + top[0]->Reshape(this->layer_param_.parameter_param().shape()); + } + virtual void Reshape(const vector*>& bottom, + const vector*>& top) { } + virtual inline const char* type() const { return "Parameter"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top) { + top[0]->ShareData(*(this->blobs_[0])); + top[0]->ShareDiff(*(this->blobs_[0])); + } + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) + { } +}; + +} // namespace caffe + +#endif diff --git a/include/caffe/layers/pooling_layer.hpp b/include/caffe/layers/pooling_layer.hpp new file mode 100755 index 0000000..f4d6803 --- /dev/null +++ b/include/caffe/layers/pooling_layer.hpp @@ -0,0 +1,60 @@ +#ifndef CAFFE_POOLING_LAYER_HPP_ +#define CAFFE_POOLING_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Pools the input image by taking the max, average, etc. within regions. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class PoolingLayer : public Layer { + public: + explicit PoolingLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Pooling"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + // MAX POOL layers can output an extra top blob for the mask; + // others can only output the pooled inputs. + virtual inline int MaxTopBlobs() const { + return (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX) ? 2 : 1; + } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; + int pad_h_, pad_w_; + int channels_; + int height_, width_; + int pooled_height_, pooled_width_; + bool global_pooling_; + Blob rand_idx_; + Blob max_idx_; +}; + +} // namespace caffe + +#endif // CAFFE_POOLING_LAYER_HPP_ diff --git a/include/caffe/layers/power_layer.hpp b/include/caffe/layers/power_layer.hpp new file mode 100755 index 0000000..6ecbafc --- /dev/null +++ b/include/caffe/layers/power_layer.hpp @@ -0,0 +1,89 @@ +#ifndef CAFFE_POWER_LAYER_HPP_ +#define CAFFE_POWER_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" + +namespace caffe { + +/** + * @brief Computes @f$ y = (\alpha x + \beta) ^ \gamma @f$, + * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, + * and power @f$ \gamma @f$. + */ +template +class PowerLayer : public NeuronLayer { + public: + /** + * @param param provides PowerParameter power_param, + * with PowerLayer options: + * - scale (\b optional, default 1) the scale @f$ \alpha @f$ + * - shift (\b optional, default 0) the shift @f$ \beta @f$ + * - power (\b optional, default 1) the power @f$ \gamma @f$ + */ + explicit PowerLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Power"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = (\alpha x + \beta) ^ \gamma + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the power inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} = + * \frac{\partial E}{\partial y} + * \alpha \gamma (\alpha x + \beta) ^ {\gamma - 1} = + * \frac{\partial E}{\partial y} + * \frac{\alpha \gamma y}{\alpha x + \beta} + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + /// @brief @f$ \gamma @f$ from layer_param_.power_param() + Dtype power_; + /// @brief @f$ \alpha @f$ from layer_param_.power_param() + Dtype scale_; + /// @brief @f$ \beta @f$ from layer_param_.power_param() + Dtype shift_; + /// @brief Result of @f$ \alpha \gamma @f$ + Dtype diff_scale_; +}; + +} // namespace caffe + +#endif // CAFFE_POWER_LAYER_HPP_ diff --git a/include/caffe/layers/prelu_layer.hpp b/include/caffe/layers/prelu_layer.hpp new file mode 100755 index 0000000..3ddfb48 --- /dev/null +++ b/include/caffe/layers/prelu_layer.hpp @@ -0,0 +1,101 @@ +#ifndef CAFFE_PRELU_LAYER_HPP_ +#define CAFFE_PRELU_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" + +namespace caffe { + +/** + * @brief Parameterized Rectified Linear Unit non-linearity @f$ + * y_i = \max(0, x_i) + a_i \min(0, x_i) + * @f$. The differences from ReLULayer are 1) negative slopes are + * learnable though backprop and 2) negative slopes can vary across + * channels. The number of axes of input blob should be greater than or + * equal to 2. The 1st axis (0-based) is seen as channels. + */ +template +class PReLULayer : public NeuronLayer { + public: + /** + * @param param provides PReLUParameter prelu_param, + * with PReLULayer options: + * - filler (\b optional, FillerParameter, + * default {'type': constant 'value':0.25}). + * - channel_shared (\b optional, default false). + * negative slopes are shared across channels. + */ + explicit PReLULayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "PReLU"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times ...) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times ...) @f$ + * the computed outputs for each channel @f$i@f$ @f$ + * y_i = \max(0, x_i) + a_i \min(0, x_i) + * @f$. + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the PReLU inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times ...) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times ...) @f$ + * the inputs @f$ x @f$; For each channel @f$i@f$, backward fills their + * diff with gradients @f$ + * \frac{\partial E}{\partial x_i} = \left\{ + * \begin{array}{lr} + * a_i \frac{\partial E}{\partial y_i} & \mathrm{if} \; x_i \le 0 \\ + * \frac{\partial E}{\partial y_i} & \mathrm{if} \; x_i > 0 + * \end{array} \right. + * @f$. + * If param_propagate_down_[0] is true, it fills the diff with gradients + * @f$ + * \frac{\partial E}{\partial a_i} = \left\{ + * \begin{array}{lr} + * \sum_{x_i} x_i \frac{\partial E}{\partial y_i} & \mathrm{if} \; x_i \le 0 \\ + * 0 & \mathrm{if} \; x_i > 0 + * \end{array} \right. + * @f$. + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool channel_shared_; + Blob multiplier_; // dot multiplier for backward computation of params + Blob backward_buff_; // temporary buffer for backward computation + Blob bottom_memory_; // memory for in-place computation +}; + +} // namespace caffe + +#endif // CAFFE_PRELU_LAYER_HPP_ diff --git a/include/caffe/layers/python_layer.hpp b/include/caffe/layers/python_layer.hpp new file mode 100755 index 0000000..66dbbdf --- /dev/null +++ b/include/caffe/layers/python_layer.hpp @@ -0,0 +1,59 @@ +#ifndef CAFFE_PYTHON_LAYER_HPP_ +#define CAFFE_PYTHON_LAYER_HPP_ + +#include +#include + +#include "caffe/layer.hpp" + +namespace bp = boost::python; + +namespace caffe { + +template +class PythonLayer : public Layer { + public: + PythonLayer(PyObject* self, const LayerParameter& param) + : Layer(param), self_(bp::handle<>(bp::borrowed(self))) { } + + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top) { + // Disallow PythonLayer in MultiGPU training stage, due to GIL issues + // Details: https://github.com/BVLC/caffe/issues/2936 + if (this->phase_ == TRAIN && Caffe::solver_count() > 1 + && !ShareInParallel()) { + LOG(FATAL) << "PythonLayer is not implemented in Multi-GPU training"; + } + self_.attr("param_str") = bp::str( + this->layer_param_.python_param().param_str()); + self_.attr("phase") = static_cast(this->phase_); + self_.attr("setup")(bottom, top); + } + virtual void Reshape(const vector*>& bottom, + const vector*>& top) { + self_.attr("reshape")(bottom, top); + } + + virtual inline bool ShareInParallel() const { + return this->layer_param_.python_param().share_in_parallel(); + } + + virtual inline const char* type() const { return "Python"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top) { + self_.attr("forward")(bottom, top); + } + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + self_.attr("backward")(top, propagate_down, bottom); + } + + private: + bp::object self_; +}; + +} // namespace caffe + +#endif diff --git a/include/caffe/layers/recurrent_layer.hpp b/include/caffe/layers/recurrent_layer.hpp new file mode 100755 index 0000000..ca17371 --- /dev/null +++ b/include/caffe/layers/recurrent_layer.hpp @@ -0,0 +1,187 @@ +#ifndef CAFFE_RECURRENT_LAYER_HPP_ +#define CAFFE_RECURRENT_LAYER_HPP_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/net.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/format.hpp" + +namespace caffe { + +template class RecurrentLayer; + +/** + * @brief An abstract class for implementing recurrent behavior inside of an + * unrolled network. This Layer type cannot be instantiated -- instead, + * you should use one of its implementations which defines the recurrent + * architecture, such as RNNLayer or LSTMLayer. + */ +template +class RecurrentLayer : public Layer { + public: + explicit RecurrentLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual void Reset(); + + virtual inline const char* type() const { return "Recurrent"; } + virtual inline int MinBottomBlobs() const { + int min_bottoms = 2; + if (this->layer_param_.recurrent_param().expose_hidden()) { + vector inputs; + this->RecurrentInputBlobNames(&inputs); + min_bottoms += inputs.size(); + } + return min_bottoms; + } + virtual inline int MaxBottomBlobs() const { return MinBottomBlobs() + 1; } + virtual inline int ExactNumTopBlobs() const { + int num_tops = 1; + if (this->layer_param_.recurrent_param().expose_hidden()) { + vector outputs; + this->RecurrentOutputBlobNames(&outputs); + num_tops += outputs.size(); + } + return num_tops; + } + + virtual inline bool AllowForceBackward(const int bottom_index) const { + // Can't propagate to sequence continuation indicators. + return bottom_index != 1; + } + + protected: + /** + * @brief Fills net_param with the recurrent network architecture. Subclasses + * should define this -- see RNNLayer and LSTMLayer for examples. + */ + virtual void FillUnrolledNet(NetParameter* net_param) const = 0; + + /** + * @brief Fills names with the names of the 0th timestep recurrent input + * Blob&s. Subclasses should define this -- see RNNLayer and LSTMLayer + * for examples. + */ + virtual void RecurrentInputBlobNames(vector* names) const = 0; + + /** + * @brief Fills shapes with the shapes of the recurrent input Blob&s. + * Subclasses should define this -- see RNNLayer and LSTMLayer + * for examples. + */ + virtual void RecurrentInputShapes(vector* shapes) const = 0; + + /** + * @brief Fills names with the names of the Tth timestep recurrent output + * Blob&s. Subclasses should define this -- see RNNLayer and LSTMLayer + * for examples. + */ + virtual void RecurrentOutputBlobNames(vector* names) const = 0; + + /** + * @brief Fills names with the names of the output blobs, concatenated across + * all timesteps. Should return a name for each top Blob. + * Subclasses should define this -- see RNNLayer and LSTMLayer for + * examples. + */ + virtual void OutputBlobNames(vector* names) const = 0; + + /** + * @param bottom input Blob vector (length 2-3) + * + * -# @f$ (T \times N \times ...) @f$ + * the time-varying input @f$ x @f$. After the first two axes, whose + * dimensions must correspond to the number of timesteps @f$ T @f$ and + * the number of independent streams @f$ N @f$, respectively, its + * dimensions may be arbitrary. Note that the ordering of dimensions -- + * @f$ (T \times N \times ...) @f$, rather than + * @f$ (N \times T \times ...) @f$ -- means that the @f$ N @f$ + * independent input streams must be "interleaved". + * + * -# @f$ (T \times N) @f$ + * the sequence continuation indicators @f$ \delta @f$. + * These inputs should be binary (0 or 1) indicators, where + * @f$ \delta_{t,n} = 0 @f$ means that timestep @f$ t @f$ of stream + * @f$ n @f$ is the beginning of a new sequence, and hence the previous + * hidden state @f$ h_{t-1} @f$ is multiplied by @f$ \delta_t = 0 @f$ + * and has no effect on the cell's output at timestep @f$ t @f$, and + * a value of @f$ \delta_{t,n} = 1 @f$ means that timestep @f$ t @f$ of + * stream @f$ n @f$ is a continuation from the previous timestep + * @f$ t-1 @f$, and the previous hidden state @f$ h_{t-1} @f$ affects the + * updated hidden state and output. + * + * -# @f$ (N \times ...) @f$ (optional) + * the static (non-time-varying) input @f$ x_{static} @f$. + * After the first axis, whose dimension must be the number of + * independent streams, its dimensions may be arbitrary. + * This is mathematically equivalent to using a time-varying input of + * @f$ x'_t = [x_t; x_{static}] @f$ -- i.e., tiling the static input + * across the @f$ T @f$ timesteps and concatenating with the time-varying + * input. Note that if this input is used, all timesteps in a single + * batch within a particular one of the @f$ N @f$ streams must share the + * same static input, even if the sequence continuation indicators + * suggest that difference sequences are ending and beginning within a + * single batch. This may require padding and/or truncation for uniform + * length. + * + * @param top output Blob vector (length 1) + * -# @f$ (T \times N \times D) @f$ + * the time-varying output @f$ y @f$, where @f$ D @f$ is + * recurrent_param.num_output(). + * Refer to documentation for particular RecurrentLayer implementations + * (such as RNNLayer and LSTMLayer) for the definition of @f$ y @f$. + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + /// @brief A Net to implement the Recurrent functionality. + shared_ptr > unrolled_net_; + + /// @brief The number of independent streams to process simultaneously. + int N_; + + /** + * @brief The number of timesteps in the layer's input, and the number of + * timesteps over which to backpropagate through time. + */ + int T_; + + /// @brief Whether the layer has a "static" input copied across all timesteps. + bool static_input_; + + /** + * @brief The last layer to run in the network. (Any later layers are losses + * added to force the recurrent net to do backprop.) + */ + int last_layer_index_; + + /** + * @brief Whether the layer's hidden state at the first and last timesteps + * are layer inputs and outputs, respectively. + */ + bool expose_hidden_; + + vector* > recur_input_blobs_; + vector* > recur_output_blobs_; + vector* > output_blobs_; + Blob* x_input_blob_; + Blob* x_static_input_blob_; + Blob* cont_input_blob_; +}; + +} // namespace caffe + +#endif // CAFFE_RECURRENT_LAYER_HPP_ diff --git a/include/caffe/layers/reduction_layer.hpp b/include/caffe/layers/reduction_layer.hpp new file mode 100755 index 0000000..804a495 --- /dev/null +++ b/include/caffe/layers/reduction_layer.hpp @@ -0,0 +1,59 @@ +#ifndef CAFFE_REDUCTION_LAYER_HPP_ +#define CAFFE_REDUCTION_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Compute "reductions" -- operations that return a scalar output Blob + * for an input Blob of arbitrary size, such as the sum, absolute sum, + * and sum of squares. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class ReductionLayer : public Layer { + public: + explicit ReductionLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Reduction"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + /// @brief the reduction operation performed by the layer + ReductionParameter_ReductionOp op_; + /// @brief a scalar coefficient applied to all outputs + Dtype coeff_; + /// @brief the index of the first input axis to reduce + int axis_; + /// @brief the number of reductions performed + int num_; + /// @brief the input size of each reduction + int dim_; + /// @brief a helper Blob used for summation (op_ == SUM) + Blob sum_multiplier_; +}; + +} // namespace caffe + +#endif // CAFFE_REDUCTION_LAYER_HPP_ diff --git a/include/caffe/layers/relu_layer.hpp b/include/caffe/layers/relu_layer.hpp new file mode 100755 index 0000000..d7a73f7 --- /dev/null +++ b/include/caffe/layers/relu_layer.hpp @@ -0,0 +1,85 @@ +#ifndef CAFFE_RELU_LAYER_HPP_ +#define CAFFE_RELU_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" + +namespace caffe { + +/** + * @brief Rectified Linear Unit non-linearity @f$ y = \max(0, x) @f$. + * The simple max is fast to compute, and the function does not saturate. + */ +template +class ReLULayer : public NeuronLayer { + public: + /** + * @param param provides ReLUParameter relu_param, + * with ReLULayer options: + * - negative_slope (\b optional, default 0). + * the value @f$ \nu @f$ by which negative values are multiplied. + */ + explicit ReLULayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual inline const char* type() const { return "ReLU"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = \max(0, x) + * @f$ by default. If a non-zero negative_slope @f$ \nu @f$ is provided, + * the computed outputs are @f$ y = \max(0, x) + \nu \min(0, x) @f$. + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the ReLU inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} = \left\{ + * \begin{array}{lr} + * 0 & \mathrm{if} \; x \le 0 \\ + * \frac{\partial E}{\partial y} & \mathrm{if} \; x > 0 + * \end{array} \right. + * @f$ if propagate_down[0], by default. + * If a non-zero negative_slope @f$ \nu @f$ is provided, + * the computed gradients are @f$ + * \frac{\partial E}{\partial x} = \left\{ + * \begin{array}{lr} + * \nu \frac{\partial E}{\partial y} & \mathrm{if} \; x \le 0 \\ + * \frac{\partial E}{\partial y} & \mathrm{if} \; x > 0 + * \end{array} \right. + * @f$. + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +} // namespace caffe + +#endif // CAFFE_RELU_LAYER_HPP_ diff --git a/include/caffe/layers/reshape_layer.hpp b/include/caffe/layers/reshape_layer.hpp new file mode 100755 index 0000000..d11e063 --- /dev/null +++ b/include/caffe/layers/reshape_layer.hpp @@ -0,0 +1,52 @@ +#ifndef CAFFE_XXX_LAYER_HPP_ +#define CAFFE_XXX_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/* + * @brief Reshapes the input Blob into an arbitrary-sized output Blob. + * + * Note: similarly to FlattenLayer, this layer does not change the input values + * (see FlattenLayer, Blob::ShareData and Blob::ShareDiff). + */ +template +class ReshapeLayer : public Layer { + public: + explicit ReshapeLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Reshape"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top) {} + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + + /// @brief vector of axes indices whose dimensions we'll copy from the bottom + vector copy_axes_; + /// @brief the index of the axis whose dimension we infer, or -1 if none + int inferred_axis_; + /// @brief the product of the "constant" output dimensions + int constant_count_; +}; + +} // namespace caffe + +#endif // CAFFE_XXX_LAYER_HPP_ diff --git a/include/caffe/layers/rnn_layer.hpp b/include/caffe/layers/rnn_layer.hpp new file mode 100755 index 0000000..6dce238 --- /dev/null +++ b/include/caffe/layers/rnn_layer.hpp @@ -0,0 +1,47 @@ +#ifndef CAFFE_RNN_LAYER_HPP_ +#define CAFFE_RNN_LAYER_HPP_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/recurrent_layer.hpp" +#include "caffe/net.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +template class RecurrentLayer; + +/** + * @brief Processes time-varying inputs using a simple recurrent neural network + * (RNN). Implemented as a network unrolling the RNN computation in time. + * + * Given time-varying inputs @f$ x_t @f$, computes hidden state @f$ + * h_t := \tanh[ W_{hh} h_{t_1} + W_{xh} x_t + b_h ] + * @f$, and outputs @f$ + * o_t := \tanh[ W_{ho} h_t + b_o ] + * @f$. + */ +template +class RNNLayer : public RecurrentLayer { + public: + explicit RNNLayer(const LayerParameter& param) + : RecurrentLayer(param) {} + + virtual inline const char* type() const { return "RNN"; } + + protected: + virtual void FillUnrolledNet(NetParameter* net_param) const; + virtual void RecurrentInputBlobNames(vector* names) const; + virtual void RecurrentOutputBlobNames(vector* names) const; + virtual void RecurrentInputShapes(vector* shapes) const; + virtual void OutputBlobNames(vector* names) const; +}; + +} // namespace caffe + +#endif // CAFFE_RNN_LAYER_HPP_ diff --git a/include/caffe/layers/scale_layer.hpp b/include/caffe/layers/scale_layer.hpp new file mode 100755 index 0000000..45b714d --- /dev/null +++ b/include/caffe/layers/scale_layer.hpp @@ -0,0 +1,85 @@ +#ifndef CAFFE_SCALE_LAYER_HPP_ +#define CAFFE_SCALE_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/bias_layer.hpp" + +namespace caffe { + +/** + * @brief Computes the elementwise product of two input Blobs, with the shape of + * the latter Blob "broadcast" to match the shape of the former. + * Equivalent to tiling the latter Blob, then computing the elementwise + * product. Note: for efficiency and convenience, this layer can + * additionally perform a "broadcast" sum too when `bias_term: true` + * is set. + * + * The latter, scale input may be omitted, in which case it's learned as + * parameter of the layer (as is the bias, if it is included). + */ +template +class ScaleLayer: public Layer { + public: + explicit ScaleLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Scale"; } + // Scale + virtual inline int MinBottomBlobs() const { return 1; } + virtual inline int MaxBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + /** + * In the below shape specifications, @f$ i @f$ denotes the value of the + * `axis` field given by `this->layer_param_.scale_param().axis()`, after + * canonicalization (i.e., conversion from negative to positive index, + * if applicable). + * + * @param bottom input Blob vector (length 2) + * -# @f$ (d_0 \times ... \times + * d_i \times ... \times d_j \times ... \times d_n) @f$ + * the first factor @f$ x @f$ + * -# @f$ (d_i \times ... \times d_j) @f$ + * the second factor @f$ y @f$ + * @param top output Blob vector (length 1) + * -# @f$ (d_0 \times ... \times + * d_i \times ... \times d_j \times ... \times d_n) @f$ + * the product @f$ z = x y @f$ computed after "broadcasting" y. + * Equivalent to tiling @f$ y @f$ to have the same shape as @f$ x @f$, + * then computing the elementwise product. + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + shared_ptr > bias_layer_; + vector*> bias_bottom_vec_; + vector bias_propagate_down_; + int bias_param_id_; + + Blob sum_multiplier_; + Blob sum_result_; + Blob temp_; + int axis_; + int outer_dim_, scale_dim_, inner_dim_; +}; + + +} // namespace caffe + +#endif // CAFFE_SCALE_LAYER_HPP_ diff --git a/include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp b/include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp new file mode 100755 index 0000000..6452ea5 --- /dev/null +++ b/include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp @@ -0,0 +1,112 @@ +#ifndef CAFFE_SIGMOID_CROSS_ENTROPY_LOSS_LAYER_HPP_ +#define CAFFE_SIGMOID_CROSS_ENTROPY_LOSS_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/loss_layer.hpp" +#include "caffe/layers/sigmoid_layer.hpp" + +namespace caffe { + +/** + * @brief Computes the cross-entropy (logistic) loss @f$ + * E = \frac{-1}{n} \sum\limits_{n=1}^N \left[ + * p_n \log \hat{p}_n + + * (1 - p_n) \log(1 - \hat{p}_n) + * \right] + * @f$, often used for predicting targets interpreted as probabilities. + * + * This layer is implemented rather than separate + * SigmoidLayer + CrossEntropyLayer + * as its gradient computation is more numerically stable. + * At test time, this layer can be replaced simply by a SigmoidLayer. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the scores @f$ x \in [-\infty, +\infty]@f$, + * which this layer maps to probability predictions + * @f$ \hat{p}_n = \sigma(x_n) \in [0, 1] @f$ + * using the sigmoid function @f$ \sigma(.) @f$ (see SigmoidLayer). + * -# @f$ (N \times C \times H \times W) @f$ + * the targets @f$ y \in [0, 1] @f$ + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed cross-entropy loss: @f$ + * E = \frac{-1}{n} \sum\limits_{n=1}^N \left[ + * p_n \log \hat{p}_n + (1 - p_n) \log(1 - \hat{p}_n) + * \right] + * @f$ + */ +template +class SigmoidCrossEntropyLossLayer : public LossLayer { + public: + explicit SigmoidCrossEntropyLossLayer(const LayerParameter& param) + : LossLayer(param), + sigmoid_layer_(new SigmoidLayer(param)), + sigmoid_output_(new Blob()) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "SigmoidCrossEntropyLoss"; } + + protected: + /// @copydoc SigmoidCrossEntropyLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the + * predictions. + * + * Gradients cannot be computed with respect to the target inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as gradient computation with respect + * to the targets is not implemented. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$x@f$; Backward computes diff + * @f$ \frac{\partial E}{\partial x} = + * \frac{1}{n} \sum\limits_{n=1}^N (\hat{p}_n - p_n) + * @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + /// The internal SigmoidLayer used to map predictions to probabilities. + shared_ptr > sigmoid_layer_; + /// sigmoid_output stores the output of the SigmoidLayer. + shared_ptr > sigmoid_output_; + /// bottom vector holder to call the underlying SigmoidLayer::Forward + vector*> sigmoid_bottom_vec_; + /// top vector holder to call the underlying SigmoidLayer::Forward + vector*> sigmoid_top_vec_; +}; + +} // namespace caffe + +#endif // CAFFE_SIGMOID_CROSS_ENTROPY_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/sigmoid_layer.hpp b/include/caffe/layers/sigmoid_layer.hpp new file mode 100755 index 0000000..ac0f692 --- /dev/null +++ b/include/caffe/layers/sigmoid_layer.hpp @@ -0,0 +1,71 @@ +#ifndef CAFFE_SIGMOID_LAYER_HPP_ +#define CAFFE_SIGMOID_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" + +namespace caffe { + +/** + * @brief Sigmoid function non-linearity @f$ + * y = (1 + \exp(-x))^{-1} + * @f$, a classic choice in neural networks. + * + * Note that the gradient vanishes as the values move away from 0. + * The ReLULayer is often a better choice for this reason. + */ +template +class SigmoidLayer : public NeuronLayer { + public: + explicit SigmoidLayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual inline const char* type() const { return "Sigmoid"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = (1 + \exp(-x))^{-1} + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the sigmoid inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} + * = \frac{\partial E}{\partial y} y (1 - y) + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +} // namespace caffe + +#endif // CAFFE_SIGMOID_LAYER_HPP_ diff --git a/include/caffe/layers/silence_layer.hpp b/include/caffe/layers/silence_layer.hpp new file mode 100755 index 0000000..fba087f --- /dev/null +++ b/include/caffe/layers/silence_layer.hpp @@ -0,0 +1,43 @@ +#ifndef CAFFE_SILENCE_LAYER_HPP_ +#define CAFFE_SILENCE_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Ignores bottom blobs while producing no top blobs. (This is useful + * to suppress outputs during testing.) + */ +template +class SilenceLayer : public Layer { + public: + explicit SilenceLayer(const LayerParameter& param) + : Layer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "Silence"; } + virtual inline int MinBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 0; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top) {} + // We can't define Forward_gpu here, since STUB_GPU will provide + // its own definition for CPU_ONLY mode. + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +} // namespace caffe + +#endif // CAFFE_SILENCE_LAYER_HPP_ diff --git a/include/caffe/layers/slice_layer.hpp b/include/caffe/layers/slice_layer.hpp new file mode 100755 index 0000000..10a0abb --- /dev/null +++ b/include/caffe/layers/slice_layer.hpp @@ -0,0 +1,51 @@ +#ifndef CAFFE_SLICE_LAYER_HPP_ +#define CAFFE_SLICE_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Takes a Blob and slices it along either the num or channel dimension, + * outputting multiple sliced Blob results. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class SliceLayer : public Layer { + public: + explicit SliceLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Slice"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int count_; + int num_slices_; + int slice_size_; + int slice_axis_; + vector slice_point_; +}; + +} // namespace caffe + +#endif // CAFFE_SLICE_LAYER_HPP_ diff --git a/include/caffe/layers/softmax_layer.hpp b/include/caffe/layers/softmax_layer.hpp new file mode 100755 index 0000000..c65b870 --- /dev/null +++ b/include/caffe/layers/softmax_layer.hpp @@ -0,0 +1,50 @@ +#ifndef CAFFE_SOFTMAX_LAYER_HPP_ +#define CAFFE_SOFTMAX_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Computes the softmax function. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class SoftmaxLayer : public Layer { + public: + explicit SoftmaxLayer(const LayerParameter& param) + : Layer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Softmax"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int outer_num_; + int inner_num_; + int softmax_axis_; + /// sum_multiplier is used to carry out sum using BLAS + Blob sum_multiplier_; + /// scale is an intermediate Blob to hold temporary results. + Blob scale_; +}; + +} // namespace caffe + +#endif // CAFFE_SOFTMAX_LAYER_HPP_ diff --git a/include/caffe/layers/softmax_loss_layer.hpp b/include/caffe/layers/softmax_loss_layer.hpp new file mode 100755 index 0000000..f07e8a0 --- /dev/null +++ b/include/caffe/layers/softmax_loss_layer.hpp @@ -0,0 +1,130 @@ +#ifndef CAFFE_SOFTMAX_WITH_LOSS_LAYER_HPP_ +#define CAFFE_SOFTMAX_WITH_LOSS_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/loss_layer.hpp" +#include "caffe/layers/softmax_layer.hpp" + +namespace caffe { + +/** + * @brief Computes the multinomial logistic loss for a one-of-many + * classification task, passing real-valued predictions through a + * softmax to get a probability distribution over classes. + * + * This layer should be preferred over separate + * SoftmaxLayer + MultinomialLogisticLossLayer + * as its gradient computation is more numerically stable. + * At test time, this layer can be replaced simply by a SoftmaxLayer. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ x @f$, a Blob with values in + * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of + * the @f$ K = CHW @f$ classes. This layer maps these scores to a + * probability distribution over classes using the softmax function + * @f$ \hat{p}_{nk} = \exp(x_{nk}) / + * \left[\sum_{k'} \exp(x_{nk'})\right] @f$ (see SoftmaxLayer). + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed cross-entropy classification loss: @f$ E = + * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) + * @f$, for softmax output class probabilites @f$ \hat{p} @f$ + */ +template +class SoftmaxWithLossLayer : public LossLayer { + public: + /** + * @param param provides LossParameter loss_param, with options: + * - ignore_label (optional) + * Specify a label value that should be ignored when computing the loss. + * - normalize (optional, default true) + * If true, the loss is normalized by the number of (nonignored) labels + * present; otherwise the loss is simply summed over spatial locations. + */ + explicit SoftmaxWithLossLayer(const LayerParameter& param) + : LossLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "SoftmaxWithLoss"; } + virtual inline int ExactNumTopBlobs() const { return -1; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return 2; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + /** + * @brief Computes the softmax loss error gradient w.r.t. the predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ x @f$; Backward computes diff + * @f$ \frac{\partial E}{\partial x} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + /// Read the normalization mode parameter and compute the normalizer based + /// on the blob size. If normalization_mode is VALID, the count of valid + /// outputs will be read from valid_count, unless it is -1 in which case + /// all outputs are assumed to be valid. + virtual Dtype get_normalizer( + LossParameter_NormalizationMode normalization_mode, int valid_count); + + /// The internal SoftmaxLayer used to map predictions to a distribution. + shared_ptr > softmax_layer_; + /// prob stores the output probability predictions from the SoftmaxLayer. + Blob prob_; + /// bottom vector holder used in call to the underlying SoftmaxLayer::Forward + vector*> softmax_bottom_vec_; + /// top vector holder used in call to the underlying SoftmaxLayer::Forward + vector*> softmax_top_vec_; + /// Whether to ignore instances with a certain label. + bool has_ignore_label_; + /// The label indicating that an instance should be ignored. + int ignore_label_; + /// How to normalize the output loss. + LossParameter_NormalizationMode normalization_; + + int softmax_axis_, outer_num_, inner_num_; +}; + +} // namespace caffe + +#endif // CAFFE_SOFTMAX_WITH_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/split_layer.hpp b/include/caffe/layers/split_layer.hpp new file mode 100755 index 0000000..8140dfc --- /dev/null +++ b/include/caffe/layers/split_layer.hpp @@ -0,0 +1,45 @@ +#ifndef CAFFE_SPLIT_LAYER_HPP_ +#define CAFFE_SPLIT_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Creates a "split" path in the network by copying the bottom Blob + * into multiple top Blob%s to be used by multiple consuming layers. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class SplitLayer : public Layer { + public: + explicit SplitLayer(const LayerParameter& param) + : Layer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Split"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int count_; +}; + +} // namespace caffe + +#endif // CAFFE_SPLIT_LAYER_HPP_ diff --git a/include/caffe/layers/spp_layer.hpp b/include/caffe/layers/spp_layer.hpp new file mode 100755 index 0000000..9f145cc --- /dev/null +++ b/include/caffe/layers/spp_layer.hpp @@ -0,0 +1,76 @@ +#ifndef CAFFE_SPP_LAYER_HPP_ +#define CAFFE_SPP_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Does spatial pyramid pooling on the input image + * by taking the max, average, etc. within regions + * so that the result vector of different sized + * images are of the same size. + */ +template +class SPPLayer : public Layer { + public: + explicit SPPLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "SPP"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + // calculates the kernel and stride dimensions for the pooling layer, + // returns a correctly configured LayerParameter for a PoolingLayer + virtual LayerParameter GetPoolingParam(const int pyramid_level, + const int bottom_h, const int bottom_w, const SPPParameter spp_param); + + int pyramid_height_; + int bottom_h_, bottom_w_; + int num_; + int channels_; + int kernel_h_, kernel_w_; + int pad_h_, pad_w_; + bool reshaped_first_time_; + + /// the internal Split layer that feeds the pooling layers + shared_ptr > split_layer_; + /// top vector holder used in call to the underlying SplitLayer::Forward + vector*> split_top_vec_; + /// bottom vector holder used in call to the underlying PoolingLayer::Forward + vector*>*> pooling_bottom_vecs_; + /// the internal Pooling layers of different kernel sizes + vector > > pooling_layers_; + /// top vector holders used in call to the underlying PoolingLayer::Forward + vector*>*> pooling_top_vecs_; + /// pooling_outputs stores the outputs of the PoolingLayers + vector*> pooling_outputs_; + /// the internal Flatten layers that the Pooling layers feed into + vector*> flatten_layers_; + /// top vector holders used in call to the underlying FlattenLayer::Forward + vector*>*> flatten_top_vecs_; + /// flatten_outputs stores the outputs of the FlattenLayers + vector*> flatten_outputs_; + /// bottom vector holder used in call to the underlying ConcatLayer::Forward + vector*> concat_bottom_vec_; + /// the internal Concat layers that the Flatten layers feed into + shared_ptr > concat_layer_; +}; + +} // namespace caffe + +#endif // CAFFE_SPP_LAYER_HPP_ diff --git a/include/caffe/layers/tanh_layer.hpp b/include/caffe/layers/tanh_layer.hpp new file mode 100755 index 0000000..8f95e93 --- /dev/null +++ b/include/caffe/layers/tanh_layer.hpp @@ -0,0 +1,73 @@ +#ifndef CAFFE_TANH_LAYER_HPP_ +#define CAFFE_TANH_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" + +namespace caffe { + +/** + * @brief TanH hyperbolic tangent non-linearity @f$ + * y = \frac{\exp(2x) - 1}{\exp(2x) + 1} + * @f$, popular in auto-encoders. + * + * Note that the gradient vanishes as the values move away from 0. + * The ReLULayer is often a better choice for this reason. + */ +template +class TanHLayer : public NeuronLayer { + public: + explicit TanHLayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual inline const char* type() const { return "TanH"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = \frac{\exp(2x) - 1}{\exp(2x) + 1} + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the sigmoid inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} + * = \frac{\partial E}{\partial y} + * \left(1 - \left[\frac{\exp(2x) - 1}{exp(2x) + 1} \right]^2 \right) + * = \frac{\partial E}{\partial y} (1 - y^2) + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +} // namespace caffe + +#endif // CAFFE_TANH_LAYER_HPP_ diff --git a/include/caffe/layers/threshold_layer.hpp b/include/caffe/layers/threshold_layer.hpp new file mode 100755 index 0000000..3bf4db6 --- /dev/null +++ b/include/caffe/layers/threshold_layer.hpp @@ -0,0 +1,64 @@ +#ifndef CAFFE_THRESHOLD_LAYER_HPP_ +#define CAFFE_THRESHOLD_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" + +namespace caffe { + +/** + * @brief Tests whether the input exceeds a threshold: outputs 1 for inputs + * above threshold; 0 otherwise. + */ +template +class ThresholdLayer : public NeuronLayer { + public: + /** + * @param param provides ThresholdParameter threshold_param, + * with ThresholdLayer options: + * - threshold (\b optional, default 0). + * the threshold value @f$ t @f$ to which the input values are compared. + */ + explicit ThresholdLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Threshold"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = \left\{ + * \begin{array}{lr} + * 0 & \mathrm{if} \; x \le t \\ + * 1 & \mathrm{if} \; x > t + * \end{array} \right. + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + /// @brief Not implemented (non-differentiable function) + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + NOT_IMPLEMENTED; + } + + Dtype threshold_; +}; + +} // namespace caffe + +#endif // CAFFE_THRESHOLD_LAYER_HPP_ diff --git a/include/caffe/layers/tile_layer.hpp b/include/caffe/layers/tile_layer.hpp new file mode 100755 index 0000000..fbdbe2f --- /dev/null +++ b/include/caffe/layers/tile_layer.hpp @@ -0,0 +1,43 @@ +#ifndef CAFFE_TILE_LAYER_HPP_ +#define CAFFE_TILE_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Copy a Blob along specified dimensions. + */ +template +class TileLayer : public Layer { + public: + explicit TileLayer(const LayerParameter& param) + : Layer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Tile"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + unsigned int axis_, tiles_, outer_dim_, inner_dim_; +}; + +} // namespace caffe + +#endif // CAFFE_TILE_LAYER_HPP_ diff --git a/include/caffe/layers/window_data_layer.hpp b/include/caffe/layers/window_data_layer.hpp new file mode 100755 index 0000000..35f41b8 --- /dev/null +++ b/include/caffe/layers/window_data_layer.hpp @@ -0,0 +1,55 @@ +#ifndef CAFFE_WINDOW_DATA_LAYER_HPP_ +#define CAFFE_WINDOW_DATA_LAYER_HPP_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/data_transformer.hpp" +#include "caffe/internal_thread.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/base_data_layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Provides data to the Net from windows of images files, specified + * by a window data file. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class WindowDataLayer : public BasePrefetchingDataLayer { + public: + explicit WindowDataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param) {} + virtual ~WindowDataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "WindowData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + protected: + virtual unsigned int PrefetchRand(); + virtual void load_batch(Batch* batch); + + shared_ptr prefetch_rng_; + vector > > image_database_; + enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM }; + vector > fg_windows_; + vector > bg_windows_; + Blob data_mean_; + vector mean_values_; + bool has_mean_file_; + bool has_mean_values_; + bool cache_images_; + vector > image_database_cache_; +}; + +} // namespace caffe + +#endif // CAFFE_WINDOW_DATA_LAYER_HPP_ diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp new file mode 100755 index 0000000..5282663 --- /dev/null +++ b/include/caffe/loss_layers.hpp @@ -0,0 +1,768 @@ +#ifndef CAFFE_LOSS_LAYERS_HPP_ +#define CAFFE_LOSS_LAYERS_HPP_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/neuron_layers.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +const float kLOG_THRESHOLD = 1e-20; + +/** + * @brief Computes the classification accuracy for a one-of-many + * classification task. + */ +template +class AccuracyLayer : public Layer { + public: + /** + * @param param provides AccuracyParameter accuracy_param, + * with AccuracyLayer options: + * - top_k (\b optional, default 1). + * Sets the maximum rank @f$ k @f$ at which a prediction is considered + * correct. For example, if @f$ k = 5 @f$, a prediction is counted + * correct if the correct label is among the top 5 predicted labels. + */ + explicit AccuracyLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Accuracy"; } + virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + /** + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ x @f$, a Blob with values in + * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of + * the @f$ K = CHW @f$ classes. Each @f$ x_n @f$ is mapped to a predicted + * label @f$ \hat{l}_n @f$ given by its maximal index: + * @f$ \hat{l}_n = \arg\max\limits_k x_{nk} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed accuracy: @f$ + * \frac{1}{N} \sum\limits_{n=1}^N \delta\{ \hat{l}_n = l_n \} + * @f$, where @f$ + * \delta\{\mathrm{condition}\} = \left\{ + * \begin{array}{lr} + * 1 & \mbox{if condition} \\ + * 0 & \mbox{otherwise} + * \end{array} \right. + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + + /// @brief Not implemented -- AccuracyLayer cannot be used as a loss. + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + for (int i = 0; i < propagate_down.size(); ++i) { + if (propagate_down[i]) { NOT_IMPLEMENTED; } + } + } + + int label_axis_, outer_num_, inner_num_; + + int top_k_; + + /// Whether to ignore instances with a certain label. + bool has_ignore_label_; + /// The label indicating that an instance should be ignored. + int ignore_label_; +}; + +/** + * @brief An interface for Layer%s that take two Blob%s as input -- usually + * (1) predictions and (2) ground-truth labels -- and output a + * singleton Blob representing the loss. + * + * LossLayers are typically only capable of backpropagating to their first input + * -- the predictions. + */ +template +class LossLayer : public Layer { + public: + explicit LossLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp( + const vector*>& bottom, const vector*>& top); + virtual void Reshape( + const vector*>& bottom, const vector*>& top); + + virtual inline int ExactNumBottomBlobs() const { return 2; } + + /** + * @brief For convenience and backwards compatibility, instruct the Net to + * automatically allocate a single top Blob for LossLayers, into which + * they output their singleton loss, (even if the user didn't specify + * one in the prototxt, etc.). + */ + virtual inline bool AutoTopBlobs() const { return true; } + virtual inline int ExactNumTopBlobs() const { return 1; } + /** + * We usually cannot backpropagate to the labels; ignore force_backward for + * these inputs. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return bottom_index != 1; + } +}; + +/** + * @brief Computes the contrastive loss @f$ + * E = \frac{1}{2N} \sum\limits_{n=1}^N \left(y\right) d + + * \left(1-y\right) \max \left(margin-d, 0\right)^2 + * @f$ where @f$ + * d = \left| \left| a_n - b_n \right| \right|_2 @f$. This can be + * used to train siamese networks. + * + * @param bottom input Blob vector (length 3) + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$ a \in [-\infty, +\infty]@f$ + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$ b \in [-\infty, +\infty]@f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the binary similarity @f$ s \in [0, 1]@f$ + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed contrastive loss: @f$ E = + * \frac{1}{2N} \sum\limits_{n=1}^N \left(y\right) d + + * \left(1-y\right) \max \left(margin-d, 0\right)^2 + * @f$ where @f$ + * d = \left| \left| a_n - b_n \right| \right|_2 @f$. + * This can be used to train siamese networks. + */ +template +class ContrastiveLossLayer : public LossLayer { + public: + explicit ContrastiveLossLayer(const LayerParameter& param) + : LossLayer(param), diff_() {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline int ExactNumBottomBlobs() const { return 3; } + virtual inline const char* type() const { return "ContrastiveLoss"; } + /** + * Unlike most loss layers, in the ContrastiveLossLayer we can backpropagate + * to the first two inputs. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return bottom_index != 2; + } + + protected: + /// @copydoc ContrastiveLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the Contrastive error gradient w.r.t. the inputs. + * + * Computes the gradients with respect to the two input vectors (bottom[0] and + * bottom[1]), but not the similarity label (bottom[2]). + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$a@f$; Backward fills their diff with + * gradients if propagate_down[0] + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$b@f$; Backward fills their diff with gradients if + * propagate_down[1] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob diff_; // cached for backward pass + Blob dist_sq_; // cached for backward pass + Blob diff_sq_; // tmp storage for gpu forward pass + Blob summer_vec_; // tmp storage for gpu forward pass +}; + +/** + * @brief Computes the Euclidean (L2) loss @f$ + * E = \frac{1}{2N} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n + * \right| \right|_2^2 @f$ for real-valued regression tasks. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{y} \in [-\infty, +\infty]@f$ + * -# @f$ (N \times C \times H \times W) @f$ + * the targets @f$ y \in [-\infty, +\infty]@f$ + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed Euclidean loss: @f$ E = + * \frac{1}{2n} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n + * \right| \right|_2^2 @f$ + * + * This can be used for least-squares regression tasks. An InnerProductLayer + * input to a EuclideanLossLayer exactly formulates a linear least squares + * regression problem. With non-zero weight decay the problem becomes one of + * ridge regression -- see src/caffe/test/test_sgd_solver.cpp for a concrete + * example wherein we check that the gradients computed for a Net with exactly + * this structure match hand-computed gradient formulas for ridge regression. + * + * (Note: Caffe, and SGD in general, is certainly \b not the best way to solve + * linear least squares problems! We use it only as an instructive example.) + */ +template +class EuclideanLossLayer : public LossLayer { + public: + explicit EuclideanLossLayer(const LayerParameter& param) + : LossLayer(param), diff_() {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "EuclideanLoss"; } + /** + * Unlike most loss layers, in the EuclideanLossLayer we can backpropagate + * to both inputs -- override to return true and always allow force_backward. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return true; + } + + protected: + /// @copydoc EuclideanLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the Euclidean error gradient w.r.t. the inputs. + * + * Unlike other children of LossLayer, EuclideanLossLayer \b can compute + * gradients with respect to the label inputs bottom[1] (but still only will + * if propagate_down[1] is set, due to being produced by learnable parameters + * or if force_backward is set). In fact, this layer is "commutative" -- the + * result is the same regardless of the order of the two bottoms. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$\hat{y}@f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial \hat{y}} = + * \frac{1}{n} \sum\limits_{n=1}^N (\hat{y}_n - y_n) + * @f$ if propagate_down[0] + * -# @f$ (N \times C \times H \times W) @f$ + * the targets @f$y@f$; Backward fills their diff with gradients + * @f$ \frac{\partial E}{\partial y} = + * \frac{1}{n} \sum\limits_{n=1}^N (y_n - \hat{y}_n) + * @f$ if propagate_down[1] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob diff_; +}; + +/** + * @brief Computes the hinge loss for a one-of-many classification task. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ t @f$, a Blob with values in + * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of + * the @f$ K = CHW @f$ classes. In an SVM, @f$ t @f$ is the result of + * taking the inner product @f$ X^T W @f$ of the D-dimensional features + * @f$ X \in \mathcal{R}^{D \times N} @f$ and the learned hyperplane + * parameters @f$ W \in \mathcal{R}^{D \times K} @f$, so a Net with just + * an InnerProductLayer (with num_output = D) providing predictions to a + * HingeLossLayer and no other learnable parameters or losses is + * equivalent to an SVM. + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed hinge loss: @f$ E = + * \frac{1}{N} \sum\limits_{n=1}^N \sum\limits_{k=1}^K + * [\max(0, 1 - \delta\{l_n = k\} t_{nk})] ^ p + * @f$, for the @f$ L^p @f$ norm + * (defaults to @f$ p = 1 @f$, the L1 norm; L2 norm, as in L2-SVM, + * is also available), and @f$ + * \delta\{\mathrm{condition}\} = \left\{ + * \begin{array}{lr} + * 1 & \mbox{if condition} \\ + * -1 & \mbox{otherwise} + * \end{array} \right. + * @f$ + * + * In an SVM, @f$ t \in \mathcal{R}^{N \times K} @f$ is the result of taking + * the inner product @f$ X^T W @f$ of the features + * @f$ X \in \mathcal{R}^{D \times N} @f$ + * and the learned hyperplane parameters + * @f$ W \in \mathcal{R}^{D \times K} @f$. So, a Net with just an + * InnerProductLayer (with num_output = @f$k@f$) providing predictions to a + * HingeLossLayer is equivalent to an SVM (assuming it has no other learned + * outside the InnerProductLayer and no other losses outside the + * HingeLossLayer). + */ +template +class HingeLossLayer : public LossLayer { + public: + explicit HingeLossLayer(const LayerParameter& param) + : LossLayer(param) {} + + virtual inline const char* type() const { return "HingeLoss"; } + + protected: + /// @copydoc HingeLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the hinge loss error gradient w.r.t. the predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$t@f$; Backward computes diff + * @f$ \frac{\partial E}{\partial t} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +/** + * @brief A generalization of MultinomialLogisticLossLayer that takes an + * "information gain" (infogain) matrix specifying the "value" of all label + * pairs. + * + * Equivalent to the MultinomialLogisticLossLayer if the infogain matrix is the + * identity. + * + * @param bottom input Blob vector (length 2-3) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$, a Blob with values in + * @f$ [0, 1] @f$ indicating the predicted probability of each of the + * @f$ K = CHW @f$ classes. Each prediction vector @f$ \hat{p}_n @f$ + * should sum to 1 as in a probability distribution: @f$ + * \forall n \sum\limits_{k=1}^K \hat{p}_{nk} = 1 @f$. + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * -# @f$ (1 \times 1 \times K \times K) @f$ + * (\b optional) the infogain matrix @f$ H @f$. This must be provided as + * the third bottom blob input if not provided as the infogain_mat in the + * InfogainLossParameter. If @f$ H = I @f$, this layer is equivalent to the + * MultinomialLogisticLossLayer. + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed infogain multinomial logistic loss: @f$ E = + * \frac{-1}{N} \sum\limits_{n=1}^N H_{l_n} \log(\hat{p}_n) = + * \frac{-1}{N} \sum\limits_{n=1}^N \sum\limits_{k=1}^{K} H_{l_n,k} + * \log(\hat{p}_{n,k}) + * @f$, where @f$ H_{l_n} @f$ denotes row @f$l_n@f$ of @f$H@f$. + */ +template +class InfogainLossLayer : public LossLayer { + public: + explicit InfogainLossLayer(const LayerParameter& param) + : LossLayer(param), infogain_() {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + // InfogainLossLayer takes 2-3 bottom Blobs; if there are 3 the third should + // be the infogain matrix. (Otherwise the infogain matrix is loaded from a + // file specified by LayerParameter.) + virtual inline int ExactNumBottomBlobs() const { return -1; } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int MaxBottomBlobs() const { return 3; } + + virtual inline const char* type() const { return "InfogainLoss"; } + + protected: + /// @copydoc InfogainLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the infogain loss error gradient w.r.t. the predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. (The same applies to the infogain matrix, if + * provided as bottom[2] rather than in the layer_param.) + * + * @param top output Blob vector (length 1), providing the error gradient + * with respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels (similarly for propagate_down[2] and the + * infogain matrix, if provided as bottom[2]) + * @param bottom input Blob vector (length 2-3) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$; Backward computes diff + * @f$ \frac{\partial E}{\partial \hat{p}} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + * -# @f$ (1 \times 1 \times K \times K) @f$ + * (\b optional) the information gain matrix -- ignored as its error + * gradient computation is not implemented. + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob infogain_; +}; + +/** + * @brief Computes the multinomial logistic loss for a one-of-many + * classification task, directly taking a predicted probability + * distribution as input. + * + * When predictions are not already a probability distribution, you should + * instead use the SoftmaxWithLossLayer, which maps predictions to a + * distribution using the SoftmaxLayer, before computing the multinomial + * logistic loss. The SoftmaxWithLossLayer should be preferred over separate + * SoftmaxLayer + MultinomialLogisticLossLayer + * as its gradient computation is more numerically stable. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$, a Blob with values in + * @f$ [0, 1] @f$ indicating the predicted probability of each of the + * @f$ K = CHW @f$ classes. Each prediction vector @f$ \hat{p}_n @f$ + * should sum to 1 as in a probability distribution: @f$ + * \forall n \sum\limits_{k=1}^K \hat{p}_{nk} = 1 @f$. + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed multinomial logistic loss: @f$ E = + * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) + * @f$ + */ +template +class MultinomialLogisticLossLayer : public LossLayer { + public: + explicit MultinomialLogisticLossLayer(const LayerParameter& param) + : LossLayer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "MultinomialLogisticLoss"; } + + protected: + /// @copydoc MultinomialLogisticLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the multinomial logistic loss error gradient w.r.t. the + * predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$; Backward computes diff + * @f$ \frac{\partial E}{\partial \hat{p}} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +/** + * @brief Computes the cross-entropy (logistic) loss @f$ + * E = \frac{-1}{n} \sum\limits_{n=1}^N \left[ + * p_n \log \hat{p}_n + + * (1 - p_n) \log(1 - \hat{p}_n) + * \right] + * @f$, often used for predicting targets interpreted as probabilities. + * + * This layer is implemented rather than separate + * SigmoidLayer + CrossEntropyLayer + * as its gradient computation is more numerically stable. + * At test time, this layer can be replaced simply by a SigmoidLayer. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the scores @f$ x \in [-\infty, +\infty]@f$, + * which this layer maps to probability predictions + * @f$ \hat{p}_n = \sigma(x_n) \in [0, 1] @f$ + * using the sigmoid function @f$ \sigma(.) @f$ (see SigmoidLayer). + * -# @f$ (N \times C \times H \times W) @f$ + * the targets @f$ y \in [0, 1] @f$ + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed cross-entropy loss: @f$ + * E = \frac{-1}{n} \sum\limits_{n=1}^N \left[ + * p_n \log \hat{p}_n + (1 - p_n) \log(1 - \hat{p}_n) + * \right] + * @f$ + */ +template +class SigmoidCrossEntropyLossLayer : public LossLayer { + public: + explicit SigmoidCrossEntropyLossLayer(const LayerParameter& param) + : LossLayer(param), + sigmoid_layer_(new SigmoidLayer(param)), + sigmoid_output_(new Blob()) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "SigmoidCrossEntropyLoss"; } + + protected: + /// @copydoc SigmoidCrossEntropyLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the + * predictions. + * + * Gradients cannot be computed with respect to the target inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as gradient computation with respect + * to the targets is not implemented. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$x@f$; Backward computes diff + * @f$ \frac{\partial E}{\partial x} = + * \frac{1}{n} \sum\limits_{n=1}^N (\hat{p}_n - p_n) + * @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + /// The internal SigmoidLayer used to map predictions to probabilities. + shared_ptr > sigmoid_layer_; + /// sigmoid_output stores the output of the SigmoidLayer. + shared_ptr > sigmoid_output_; + /// bottom vector holder to call the underlying SigmoidLayer::Forward + vector*> sigmoid_bottom_vec_; + /// top vector holder to call the underlying SigmoidLayer::Forward + vector*> sigmoid_top_vec_; +}; + +// Forward declare SoftmaxLayer for use in SoftmaxWithLossLayer. +template class SoftmaxLayer; + +/** + * @brief Computes the multinomial logistic loss for a one-of-many + * classification task, passing real-valued predictions through a + * softmax to get a probability distribution over classes. + * + * This layer should be preferred over separate + * SoftmaxLayer + MultinomialLogisticLossLayer + * as its gradient computation is more numerically stable. + * At test time, this layer can be replaced simply by a SoftmaxLayer. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ x @f$, a Blob with values in + * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of + * the @f$ K = CHW @f$ classes. This layer maps these scores to a + * probability distribution over classes using the softmax function + * @f$ \hat{p}_{nk} = \exp(x_{nk}) / + * \left[\sum_{k'} \exp(x_{nk'})\right] @f$ (see SoftmaxLayer). + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed cross-entropy classification loss: @f$ E = + * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) + * @f$, for softmax output class probabilites @f$ \hat{p} @f$ + */ +template +class SoftmaxWithLossLayer : public LossLayer { + public: + /** + * @param param provides LossParameter loss_param, with options: + * - ignore_label (optional) + * Specify a label value that should be ignored when computing the loss. + * - normalize (optional, default true) + * If true, the loss is normalized by the number of (nonignored) labels + * present; otherwise the loss is simply summed over spatial locations. + */ + explicit SoftmaxWithLossLayer(const LayerParameter& param) + : LossLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "SoftmaxWithLoss"; } + virtual inline int ExactNumTopBlobs() const { return -1; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return 2; } + + protected: + /// @copydoc SoftmaxWithLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + /** + * @brief Computes the softmax loss error gradient w.r.t. the predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ x @f$; Backward computes diff + * @f$ \frac{\partial E}{\partial x} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + + /// The internal SoftmaxLayer used to map predictions to a distribution. + shared_ptr > softmax_layer_; + /// prob stores the output probability predictions from the SoftmaxLayer. + Blob prob_; + /// bottom vector holder used in call to the underlying SoftmaxLayer::Forward + vector*> softmax_bottom_vec_; + /// top vector holder used in call to the underlying SoftmaxLayer::Forward + vector*> softmax_top_vec_; + /// Whether to ignore instances with a certain label. + bool has_ignore_label_; + /// The label indicating that an instance should be ignored. + int ignore_label_; + /// Whether to normalize the loss by the total number of values present + /// (otherwise just by the batch size). + bool normalize_; + + int softmax_axis_, outer_num_, inner_num_; +}; + +} // namespace caffe + +#endif // CAFFE_LOSS_LAYERS_HPP_ diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp new file mode 100755 index 0000000..d393e62 --- /dev/null +++ b/include/caffe/net.hpp @@ -0,0 +1,315 @@ +#ifndef CAFFE_NET_HPP_ +#define CAFFE_NET_HPP_ + +#include +#include +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Connects Layer%s together into a directed acyclic graph (DAG) + * specified by a NetParameter. + * + * TODO(dox): more thorough description. + */ +template +class Net { + public: + explicit Net(const NetParameter& param, const Net* root_net = NULL); + explicit Net(const string& param_file, Phase phase, + const Net* root_net = NULL); + virtual ~Net() {} + + /// @brief Initialize a network with a NetParameter. + void Init(const NetParameter& param); + + /** + * @brief Run Forward with the input Blob%s already fed separately. + * + * You can get the input blobs using input_blobs(). + */ + const vector*>& ForwardPrefilled(Dtype* loss = NULL); + + /** + * The From and To variants of Forward and Backward operate on the + * (topological) ordering by which the net is specified. For general DAG + * networks, note that (1) computing from one layer to another might entail + * extra computation on unrelated branches, and (2) computation starting in + * the middle may be incorrect if all of the layers of a fan-in are not + * included. + */ + Dtype ForwardFromTo(int start, int end); + Dtype ForwardFrom(int start); + Dtype ForwardTo(int end); + /// @brief Run forward using a set of bottom blobs, and return the result. + const vector*>& Forward(const vector* > & bottom, + Dtype* loss = NULL); + /** + * @brief Run forward using a serialized BlobProtoVector and return the + * result as a serialized BlobProtoVector + */ + string Forward(const string& input_blob_protos, Dtype* loss = NULL); + + /** + * @brief Zeroes out the diffs of all net parameters. + * Should be run before Backward. + */ + void ClearParamDiffs(); + + /** + * The network backward should take no input and output, since it solely + * computes the gradient w.r.t the parameters, and the data has already been + * provided during the forward pass. + */ + void Backward(); + void BackwardFromTo(int start, int end); + void BackwardFrom(int start); + void BackwardTo(int end); + + /** + * @brief Reshape all layers from bottom to top. + * + * This is useful to propagate changes to layer sizes without running + * a forward pass, e.g. to compute output feature size. + */ + void Reshape(); + + Dtype ForwardBackward(const vector* > & bottom) { + Dtype loss; + Forward(bottom, &loss); + Backward(); + return loss; + } + + /// @brief Updates the network weights based on the diff values computed. + void Update(); + /** + * @brief Shares weight data of owner blobs with shared blobs. + * + * Note: this is called by Net::Init, and thus should normally not be + * called manually. + */ + void ShareWeights(); + + /** + * @brief For an already initialized net, implicitly copies (i.e., using no + * additional memory) the pre-trained layers from another Net. + */ + void ShareTrainedLayersWith(const Net* other); + // For an already initialized net, CopyTrainedLayersFrom() copies the already + // trained layers from another net parameter instance. + /** + * @brief For an already initialized net, copies the pre-trained layers from + * another Net. + */ + void CopyTrainedLayersFrom(const NetParameter& param); + void CopyTrainedLayersFrom(const string trained_filename); + void CopyTrainedLayersFromBinaryProto(const string trained_filename); + void CopyTrainedLayersFromHDF5(const string trained_filename); + /// @brief Writes the net to a proto. + void ToProto(NetParameter* param, bool write_diff = false) const; + /// @brief Writes the net to an HDF5 file. + void ToHDF5(const string& filename, bool write_diff = false) const; + + /// @brief returns the network name. + inline const string& name() const { return name_; } + /// @brief returns the layer names + inline const vector& layer_names() const { return layer_names_; } + /// @brief returns the blob names + inline const vector& blob_names() const { return blob_names_; } + /// @brief returns the blobs + inline const vector > >& blobs() const { + return blobs_; + } + /// @brief returns the layers + inline const vector > >& layers() const { + return layers_; + } + /// @brief returns the phase: TRAIN or TEST + inline Phase phase() const { return phase_; } + /** + * @brief returns the bottom vecs for each layer -- usually you won't + * need this unless you do per-layer checks such as gradients. + */ + inline const vector*> >& bottom_vecs() const { + return bottom_vecs_; + } + /** + * @brief returns the top vecs for each layer -- usually you won't + * need this unless you do per-layer checks such as gradients. + */ + inline const vector*> >& top_vecs() const { + return top_vecs_; + } + inline const vector >& bottom_need_backward() const { + return bottom_need_backward_; + } + inline const vector& blob_loss_weights() const { + return blob_loss_weights_; + } + inline const vector& layer_need_backward() const { + return layer_need_backward_; + } + /// @brief returns the parameters + inline const vector > >& params() const { + return params_; + } + inline const vector*>& learnable_params() const { + return learnable_params_; + } + /// @brief returns the learnable parameter learning rate multipliers + inline const vector& params_lr() const { return params_lr_; } + inline const vector& has_params_lr() const { return has_params_lr_; } + /// @brief returns the learnable parameter decay multipliers + inline const vector& params_weight_decay() const { + return params_weight_decay_; + } + inline const vector& has_params_decay() const { + return has_params_decay_; + } + const map& param_names_index() const { + return param_names_index_; + } + inline const vector& param_owners() const { return param_owners_; } + /// @brief Input and output blob numbers + inline int num_inputs() const { return net_input_blobs_.size(); } + inline int num_outputs() const { return net_output_blobs_.size(); } + inline const vector*>& input_blobs() const { + return net_input_blobs_; + } + inline const vector*>& output_blobs() const { + return net_output_blobs_; + } + inline const vector& input_blob_indices() const { + return net_input_blob_indices_; + } + inline const vector& output_blob_indices() const { + return net_output_blob_indices_; + } + /************ For dynamic network surgery ***************/ + inline void set_current_iter_num(const int iter_num) { + iter_ = iter_num; + for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) { + layers_[layer_id]->set_current_iter_num(iter_num); + } + } + /********************************************************/ + bool has_blob(const string& blob_name) const; + const shared_ptr > blob_by_name(const string& blob_name) const; + bool has_layer(const string& layer_name) const; + const shared_ptr > layer_by_name(const string& layer_name) const; + + void set_debug_info(const bool value) { debug_info_ = value; } + + // Helpers for Init. + /** + * @brief Remove layers that the user specified should be excluded given the current + * phase, level, and stage. + */ + static void FilterNet(const NetParameter& param, + NetParameter* param_filtered); + /// @brief return whether NetState state meets NetStateRule rule + static bool StateMeetsRule(const NetState& state, const NetStateRule& rule, + const string& layer_name); + + protected: + // Helpers for Init. + /// @brief Append a new input or top blob to the net. + void AppendTop(const NetParameter& param, const int layer_id, + const int top_id, set* available_blobs, + map* blob_name_to_idx); + /// @brief Append a new bottom blob to the net. + int AppendBottom(const NetParameter& param, const int layer_id, + const int bottom_id, set* available_blobs, + map* blob_name_to_idx); + /// @brief Append a new parameter blob to the net. + void AppendParam(const NetParameter& param, const int layer_id, + const int param_id); + + /// @brief Helper for displaying debug info in Forward about input Blobs. + void InputDebugInfo(const int layer_id); + /// @brief Helper for displaying debug info in Forward. + void ForwardDebugInfo(const int layer_id); + /// @brief Helper for displaying debug info in Backward. + void BackwardDebugInfo(const int layer_id); + /// @brief Helper for displaying debug info in Update. + void UpdateDebugInfo(const int param_id); + + /// @brief The network name + string name_; + /// @brief The phase: TRAIN or TEST + Phase phase_; + /// @brief The current iteration number + int iter_; + /// @brief Individual layers in the net + vector > > layers_; + vector layer_names_; + map layer_names_index_; + vector layer_need_backward_; + /// @brief the blobs storing intermediate results between the layer. + vector > > blobs_; + vector blob_names_; + map blob_names_index_; + vector blob_need_backward_; + /// bottom_vecs stores the vectors containing the input for each layer. + /// They don't actually host the blobs (blobs_ does), so we simply store + /// pointers. + vector*> > bottom_vecs_; + vector > bottom_id_vecs_; + vector > bottom_need_backward_; + /// top_vecs stores the vectors containing the output for each layer + vector*> > top_vecs_; + vector > top_id_vecs_; + /// Vector of weight in the loss (or objective) function of each net blob, + /// indexed by blob_id. + vector blob_loss_weights_; + vector > param_id_vecs_; + vector param_owners_; + vector param_display_names_; + vector > param_layer_indices_; + map param_names_index_; + /// blob indices for the input and the output of the net + vector net_input_blob_indices_; + vector net_output_blob_indices_; + vector*> net_input_blobs_; + vector*> net_output_blobs_; + /// The parameters in the network. + vector > > params_; + vector*> learnable_params_; + /** + * The mapping from params_ -> learnable_params_: we have + * learnable_param_ids_.size() == params_.size(), + * and learnable_params_[learnable_param_ids_[i]] == params_[i].get() + * if and only if params_[i] is an "owner"; otherwise, params_[i] is a sharer + * and learnable_params_[learnable_param_ids_[i]] gives its owner. + */ + vector learnable_param_ids_; + /// the index of mask parameters + vector mask_param_ids_; + /// the learning rate multipliers for learnable_params_ + vector params_lr_; + vector has_params_lr_; + /// the weight decay multipliers for learnable_params_ + vector params_weight_decay_; + vector has_params_decay_; + /// The bytes of memory used by this net + size_t memory_used_; + /// Whether to compute and display debug info for the net. + bool debug_info_; + /// The root net that actually holds the shared layers in data parallelism + const Net* const root_net_; + DISABLE_COPY_AND_ASSIGN(Net); +}; + + +} // namespace caffe + +#endif // CAFFE_NET_HPP_ diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp new file mode 100755 index 0000000..c2e0774 --- /dev/null +++ b/include/caffe/neuron_layers.hpp @@ -0,0 +1,809 @@ +#ifndef CAFFE_NEURON_LAYERS_HPP_ +#define CAFFE_NEURON_LAYERS_HPP_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#define HDF5_DATA_DATASET_NAME "data" +#define HDF5_DATA_LABEL_NAME "label" + +namespace caffe { + +/** + * @brief An interface for layers that take one blob as input (@f$ x @f$) + * and produce one equally-sized blob as output (@f$ y @f$), where + * each element of the output depends only on the corresponding input + * element. + */ +template +class NeuronLayer : public Layer { + public: + explicit NeuronLayer(const LayerParameter& param) + : Layer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } +}; + +/** + * @brief Computes @f$ y = |x| @f$ + * + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ y = |x| @f$ + */ +template +class AbsValLayer : public NeuronLayer { + public: + explicit AbsValLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "AbsVal"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + /// @copydoc AbsValLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the absolute value inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} = + * \mathrm{sign}(x) \frac{\partial E}{\partial y} + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +/** + * @brief Computes @f$ y = x + \log(1 + \exp(-x)) @f$ if @f$ x > 0 @f$; + * @f$ y = \log(1 + \exp(x)) @f$ otherwise. + * + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = \left\{ + * \begin{array}{ll} + * x + \log(1 + \exp(-x)) & \mbox{if } x > 0 \\ + * \log(1 + \exp(x)) & \mbox{otherwise} + * \end{array} \right. + * @f$ + */ +template +class BNLLLayer : public NeuronLayer { + public: + explicit BNLLLayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual inline const char* type() const { return "BNLL"; } + + protected: + /// @copydoc BNLLLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the BNLL inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +/** + * @brief During training only, sets a random portion of @f$x@f$ to 0, adjusting + * the rest of the vector magnitude accordingly. + * + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ y = |x| @f$ + */ +template +class DropoutLayer : public NeuronLayer { + public: + /** + * @param param provides DropoutParameter dropout_param, + * with DropoutLayer options: + * - dropout_ratio (\b optional, default 0.5). + * Sets the probability @f$ p @f$ that any given unit is dropped. + */ + explicit DropoutLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Dropout"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs. At training time, we have @f$ + * y_{\mbox{train}} = \left\{ + * \begin{array}{ll} + * \frac{x}{1 - p} & \mbox{if } u > p \\ + * 0 & \mbox{otherwise} + * \end{array} \right. + * @f$, where @f$ u \sim U(0, 1)@f$ is generated independently for each + * input at each iteration. At test time, we simply have + * @f$ y_{\mbox{test}} = \mathbb{E}[y_{\mbox{train}}] = x @f$. + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + /// when divided by UINT_MAX, the randomly generated values @f$u\sim U(0,1)@f$ + Blob rand_vec_; + /// the probability @f$ p @f$ of dropping any input + Dtype threshold_; + /// the scale for undropped inputs at train time @f$ 1 / (1 - p) @f$ + Dtype scale_; + unsigned int uint_thres_; +}; + +/** + * @brief Computes @f$ y = \gamma ^ {\alpha x + \beta} @f$, + * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, + * and base @f$ \gamma @f$. + */ +template +class ExpLayer : public NeuronLayer { + public: + /** + * @param param provides ExpParameter exp_param, + * with ExpLayer options: + * - scale (\b optional, default 1) the scale @f$ \alpha @f$ + * - shift (\b optional, default 0) the shift @f$ \beta @f$ + * - base (\b optional, default -1 for a value of @f$ e \approx 2.718 @f$) + * the base @f$ \gamma @f$ + */ + explicit ExpLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Exp"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = \gamma ^ {\alpha x + \beta} + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the exp inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} = + * \frac{\partial E}{\partial y} y \alpha \log_e(gamma) + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Dtype inner_scale_, outer_scale_; +}; + +/** + * @brief Computes @f$ y = log_{\gamma}(\alpha x + \beta) @f$, + * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, + * and base @f$ \gamma @f$. + */ +template +class LogLayer : public NeuronLayer { + public: + /** + * @param param provides LogParameter log_param, + * with LogLayer options: + * - scale (\b optional, default 1) the scale @f$ \alpha @f$ + * - shift (\b optional, default 0) the shift @f$ \beta @f$ + * - base (\b optional, default -1 for a value of @f$ e \approx 2.718 @f$) + * the base @f$ \gamma @f$ + */ + explicit LogLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Log"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = log_{\gamma}(\alpha x + \beta) + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the exp inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} = + * \frac{\partial E}{\partial y} y \alpha \log_e(gamma) + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Dtype base_scale_; + Dtype input_scale_, input_shift_; + Dtype backward_num_scale_; +}; + +/** + * @brief Computes @f$ y = (\alpha x + \beta) ^ \gamma @f$, + * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, + * and power @f$ \gamma @f$. + */ +template +class PowerLayer : public NeuronLayer { + public: + /** + * @param param provides PowerParameter power_param, + * with PowerLayer options: + * - scale (\b optional, default 1) the scale @f$ \alpha @f$ + * - shift (\b optional, default 0) the shift @f$ \beta @f$ + * - power (\b optional, default 1) the power @f$ \gamma @f$ + */ + explicit PowerLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Power"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = (\alpha x + \beta) ^ \gamma + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the power inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} = + * \frac{\partial E}{\partial y} + * \alpha \gamma (\alpha x + \beta) ^ {\gamma - 1} = + * \frac{\partial E}{\partial y} + * \frac{\alpha \gamma y}{\alpha x + \beta} + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + /// @brief @f$ \gamma @f$ from layer_param_.power_param() + Dtype power_; + /// @brief @f$ \alpha @f$ from layer_param_.power_param() + Dtype scale_; + /// @brief @f$ \beta @f$ from layer_param_.power_param() + Dtype shift_; + /// @brief Result of @f$ \alpha \gamma @f$ + Dtype diff_scale_; +}; + +/** + * @brief Rectified Linear Unit non-linearity @f$ y = \max(0, x) @f$. + * The simple max is fast to compute, and the function does not saturate. + */ +template +class ReLULayer : public NeuronLayer { + public: + /** + * @param param provides ReLUParameter relu_param, + * with ReLULayer options: + * - negative_slope (\b optional, default 0). + * the value @f$ \nu @f$ by which negative values are multiplied. + */ + explicit ReLULayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual inline const char* type() const { return "ReLU"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = \max(0, x) + * @f$ by default. If a non-zero negative_slope @f$ \nu @f$ is provided, + * the computed outputs are @f$ y = \max(0, x) + \nu \min(0, x) @f$. + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the ReLU inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} = \left\{ + * \begin{array}{lr} + * 0 & \mathrm{if} \; x \le 0 \\ + * \frac{\partial E}{\partial y} & \mathrm{if} \; x > 0 + * \end{array} \right. + * @f$ if propagate_down[0], by default. + * If a non-zero negative_slope @f$ \nu @f$ is provided, + * the computed gradients are @f$ + * \frac{\partial E}{\partial x} = \left\{ + * \begin{array}{lr} + * \nu \frac{\partial E}{\partial y} & \mathrm{if} \; x \le 0 \\ + * \frac{\partial E}{\partial y} & \mathrm{if} \; x > 0 + * \end{array} \right. + * @f$. + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +#ifdef USE_CUDNN +/** + * @brief CuDNN acceleration of ReLULayer. + */ +template +class CuDNNReLULayer : public ReLULayer { + public: + explicit CuDNNReLULayer(const LayerParameter& param) + : ReLULayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNReLULayer(); + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_; + cudnnTensorDescriptor_t top_desc_; +}; +#endif + +/** + * @brief Sigmoid function non-linearity @f$ + * y = (1 + \exp(-x))^{-1} + * @f$, a classic choice in neural networks. + * + * Note that the gradient vanishes as the values move away from 0. + * The ReLULayer is often a better choice for this reason. + */ +template +class SigmoidLayer : public NeuronLayer { + public: + explicit SigmoidLayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual inline const char* type() const { return "Sigmoid"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = (1 + \exp(-x))^{-1} + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the sigmoid inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} + * = \frac{\partial E}{\partial y} y (1 - y) + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +#ifdef USE_CUDNN +/** + * @brief CuDNN acceleration of SigmoidLayer. + */ +template +class CuDNNSigmoidLayer : public SigmoidLayer { + public: + explicit CuDNNSigmoidLayer(const LayerParameter& param) + : SigmoidLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNSigmoidLayer(); + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_; + cudnnTensorDescriptor_t top_desc_; +}; +#endif + +/** + * @brief TanH hyperbolic tangent non-linearity @f$ + * y = \frac{\exp(2x) - 1}{\exp(2x) + 1} + * @f$, popular in auto-encoders. + * + * Note that the gradient vanishes as the values move away from 0. + * The ReLULayer is often a better choice for this reason. + */ +template +class TanHLayer : public NeuronLayer { + public: + explicit TanHLayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual inline const char* type() const { return "TanH"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = \frac{\exp(2x) - 1}{\exp(2x) + 1} + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the sigmoid inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} + * = \frac{\partial E}{\partial y} + * \left(1 - \left[\frac{\exp(2x) - 1}{exp(2x) + 1} \right]^2 \right) + * = \frac{\partial E}{\partial y} (1 - y^2) + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +#ifdef USE_CUDNN +/** + * @brief CuDNN acceleration of TanHLayer. + */ +template +class CuDNNTanHLayer : public TanHLayer { + public: + explicit CuDNNTanHLayer(const LayerParameter& param) + : TanHLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNTanHLayer(); + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_; + cudnnTensorDescriptor_t top_desc_; +}; +#endif + +/** + * @brief Tests whether the input exceeds a threshold: outputs 1 for inputs + * above threshold; 0 otherwise. + */ +template +class ThresholdLayer : public NeuronLayer { + public: + /** + * @param param provides ThresholdParameter threshold_param, + * with ThresholdLayer options: + * - threshold (\b optional, default 0). + * the threshold value @f$ t @f$ to which the input values are compared. + */ + explicit ThresholdLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Threshold"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = \left\{ + * \begin{array}{lr} + * 0 & \mathrm{if} \; x \le t \\ + * 1 & \mathrm{if} \; x > t + * \end{array} \right. + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + /// @brief Not implemented (non-differentiable function) + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + NOT_IMPLEMENTED; + } + + Dtype threshold_; +}; + +/** + * @brief Parameterized Rectified Linear Unit non-linearity @f$ + * y_i = \max(0, x_i) + a_i \min(0, x_i) + * @f$. The differences from ReLULayer are 1) negative slopes are + * learnable though backprop and 2) negative slopes can vary across + * channels. The number of axes of input blob should be greater than or + * equal to 2. The 1st axis (0-based) is seen as channels. + */ +template +class PReLULayer : public NeuronLayer { + public: + /** + * @param param provides PReLUParameter prelu_param, + * with PReLULayer options: + * - filler (\b optional, FillerParameter, + * default {'type': constant 'value':0.25}). + * - channel_shared (\b optional, default false). + * negative slopes are shared across channels. + */ + explicit PReLULayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "PReLU"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times ...) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times ...) @f$ + * the computed outputs for each channel @f$i@f$ @f$ + * y_i = \max(0, x_i) + a_i \min(0, x_i) + * @f$. + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the PReLU inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times ...) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times ...) @f$ + * the inputs @f$ x @f$; For each channel @f$i@f$, backward fills their + * diff with gradients @f$ + * \frac{\partial E}{\partial x_i} = \left\{ + * \begin{array}{lr} + * a_i \frac{\partial E}{\partial y_i} & \mathrm{if} \; x_i \le 0 \\ + * \frac{\partial E}{\partial y_i} & \mathrm{if} \; x_i > 0 + * \end{array} \right. + * @f$. + * If param_propagate_down_[0] is true, it fills the diff with gradients + * @f$ + * \frac{\partial E}{\partial a_i} = \left\{ + * \begin{array}{lr} + * \sum_{x_i} x_i \frac{\partial E}{\partial y_i} & \mathrm{if} \; x_i \le 0 \\ + * 0 & \mathrm{if} \; x_i > 0 + * \end{array} \right. + * @f$. + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool channel_shared_; + Blob multiplier_; // dot multiplier for backward computation of params + Blob backward_buff_; // temporary buffer for backward computation + Blob bottom_memory_; // memory for in-place computation +}; + +} // namespace caffe + +#endif // CAFFE_NEURON_LAYERS_HPP_ diff --git a/include/caffe/parallel.hpp b/include/caffe/parallel.hpp new file mode 100755 index 0000000..85fc2b5 --- /dev/null +++ b/include/caffe/parallel.hpp @@ -0,0 +1,118 @@ +#ifndef CAFFE_PARALLEL_HPP_ +#define CAFFE_PARALLEL_HPP_ + +#include + +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/internal_thread.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/solver.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/blocking_queue.hpp" + +namespace caffe { + +// Represents a net parameters. Once a net is created, its parameter buffers can +// be replaced by ones from Params, to allow parallelization. Params ensures +// parameters are allocated in one consecutive array. +template +class Params { + public: + explicit Params(shared_ptr > root_solver); + virtual ~Params() { + } + + inline size_t size() const { + return size_; + } + inline Dtype* data() const { + return data_; + } + inline Dtype* diff() const { + return diff_; + } + + protected: + const size_t size_; // Size of buffers + Dtype* data_; // Network parameters + Dtype* diff_; // Gradient + +DISABLE_COPY_AND_ASSIGN(Params); +}; + +// Params stored in GPU memory. +template +class GPUParams : public Params { + public: + GPUParams(shared_ptr > root_solver, int device); + virtual ~GPUParams(); + + void configure(Solver* solver) const; + + protected: + using Params::size_; + using Params::data_; + using Params::diff_; +}; + +class DevicePair { + public: + DevicePair(int parent, int device) + : parent_(parent), + device_(device) { + } + inline int parent() { + return parent_; + } + inline int device() { + return device_; + } + + // Group GPUs in pairs, by proximity depending on machine's topology + static void compute(const vector devices, vector* pairs); + + protected: + int parent_; + int device_; +}; + +// Synchronous data parallelism using map-reduce between local GPUs. +template +class P2PSync : public GPUParams, public Solver::Callback, + public InternalThread { + public: + explicit P2PSync(shared_ptr > root_solver, + P2PSync* parent, const SolverParameter& param); + virtual ~P2PSync(); + + inline const shared_ptr >& solver() const { + return solver_; + } + + void run(const vector& gpus); + + protected: + void on_start(); + void on_gradients_ready(); + + void InternalThreadEntry(); + + P2PSync* parent_; + vector*> children_; + BlockingQueue*> queue_; + const int initial_iter_; + Dtype* parent_grads_; + shared_ptr > solver_; + + using Params::size_; + using Params::data_; + using Params::diff_; +}; + +} // namespace caffe + +#endif diff --git a/include/caffe/python_layer.hpp b/include/caffe/python_layer.hpp new file mode 100755 index 0000000..c43c1e8 --- /dev/null +++ b/include/caffe/python_layer.hpp @@ -0,0 +1,52 @@ +#ifndef CAFFE_PYTHON_LAYER_HPP_ +#define CAFFE_PYTHON_LAYER_HPP_ + +#include +#include + +#include "caffe/layer.hpp" + +namespace bp = boost::python; + +namespace caffe { + +template +class PythonLayer : public Layer { + public: + PythonLayer(PyObject* self, const LayerParameter& param) + : Layer(param), self_(bp::handle<>(bp::borrowed(self))) { } + + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top) { + self_.attr("param_str") = bp::str( + this->layer_param_.python_param().param_str()); + self_.attr("setup")(bottom, top); + } + virtual void Reshape(const vector*>& bottom, + const vector*>& top) { + self_.attr("reshape")(bottom, top); + } + + virtual inline bool ShareInParallel() const { + return this->layer_param_.python_param().share_in_parallel(); + } + + virtual inline const char* type() const { return "Python"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top) { + self_.attr("forward")(bottom, top); + } + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + self_.attr("backward")(top, propagate_down, bottom); + } + + private: + bp::object self_; +}; + +} // namespace caffe + +#endif diff --git a/include/caffe/sgd_solvers.hpp b/include/caffe/sgd_solvers.hpp new file mode 100755 index 0000000..1fc52d8 --- /dev/null +++ b/include/caffe/sgd_solvers.hpp @@ -0,0 +1,148 @@ +#ifndef CAFFE_SGD_SOLVERS_HPP_ +#define CAFFE_SGD_SOLVERS_HPP_ + +#include +#include + +#include "caffe/solver.hpp" + +namespace caffe { + +/** + * @brief Optimizes the parameters of a Net using + * stochastic gradient descent (SGD) with momentum. + */ +template +class SGDSolver : public Solver { + public: + explicit SGDSolver(const SolverParameter& param) + : Solver(param) { PreSolve(); } + explicit SGDSolver(const string& param_file) + : Solver(param_file) { PreSolve(); } + virtual inline const char* type() const { return "SGD"; } + + const vector > >& history() { return history_; } + + protected: + void PreSolve(); + Dtype GetLearningRate(); + virtual void ApplyUpdate(); + virtual void Normalize(int param_id); + virtual void Regularize(int param_id); + virtual void ComputeUpdateValue(int param_id, Dtype rate); + virtual void ClipGradients(); + virtual void SnapshotSolverState(const string& model_filename); + virtual void SnapshotSolverStateToBinaryProto(const string& model_filename); + virtual void SnapshotSolverStateToHDF5(const string& model_filename); + virtual void RestoreSolverStateFromHDF5(const string& state_file); + virtual void RestoreSolverStateFromBinaryProto(const string& state_file); + // history maintains the historical momentum data. + // update maintains update related data and is not needed in snapshots. + // temp maintains other information that might be needed in computation + // of gradients/updates and is not needed in snapshots + vector > > history_, update_, temp_; + + DISABLE_COPY_AND_ASSIGN(SGDSolver); +}; + +template +class NesterovSolver : public SGDSolver { + public: + explicit NesterovSolver(const SolverParameter& param) + : SGDSolver(param) {} + explicit NesterovSolver(const string& param_file) + : SGDSolver(param_file) {} + virtual inline const char* type() const { return "Nesterov"; } + + protected: + virtual void ComputeUpdateValue(int param_id, Dtype rate); + + DISABLE_COPY_AND_ASSIGN(NesterovSolver); +}; + +template +class AdaGradSolver : public SGDSolver { + public: + explicit AdaGradSolver(const SolverParameter& param) + : SGDSolver(param) { constructor_sanity_check(); } + explicit AdaGradSolver(const string& param_file) + : SGDSolver(param_file) { constructor_sanity_check(); } + virtual inline const char* type() const { return "AdaGrad"; } + + protected: + virtual void ComputeUpdateValue(int param_id, Dtype rate); + void constructor_sanity_check() { + CHECK_EQ(0, this->param_.momentum()) + << "Momentum cannot be used with AdaGrad."; + } + + DISABLE_COPY_AND_ASSIGN(AdaGradSolver); +}; + + +template +class RMSPropSolver : public SGDSolver { + public: + explicit RMSPropSolver(const SolverParameter& param) + : SGDSolver(param) { constructor_sanity_check(); } + explicit RMSPropSolver(const string& param_file) + : SGDSolver(param_file) { constructor_sanity_check(); } + virtual inline const char* type() const { return "RMSProp"; } + + protected: + virtual void ComputeUpdateValue(int param_id, Dtype rate); + void constructor_sanity_check() { + CHECK_EQ(0, this->param_.momentum()) + << "Momentum cannot be used with RMSProp."; + CHECK_GE(this->param_.rms_decay(), 0) + << "rms_decay should lie between 0 and 1."; + CHECK_LT(this->param_.rms_decay(), 1) + << "rms_decay should lie between 0 and 1."; + } + + DISABLE_COPY_AND_ASSIGN(RMSPropSolver); +}; + +template +class AdaDeltaSolver : public SGDSolver { + public: + explicit AdaDeltaSolver(const SolverParameter& param) + : SGDSolver(param) { AdaDeltaPreSolve(); } + explicit AdaDeltaSolver(const string& param_file) + : SGDSolver(param_file) { AdaDeltaPreSolve(); } + virtual inline const char* type() const { return "AdaDelta"; } + + protected: + void AdaDeltaPreSolve(); + virtual void ComputeUpdateValue(int param_id, Dtype rate); + + DISABLE_COPY_AND_ASSIGN(AdaDeltaSolver); +}; + +/** + * @brief AdamSolver, an algorithm for first-order gradient-based optimization + * of stochastic objective functions, based on adaptive estimates of + * lower-order moments. Described in [1]. + * + * [1] D. P. Kingma and J. L. Ba, "ADAM: A Method for Stochastic Optimization." + * arXiv preprint arXiv:1412.6980v8 (2014). + */ +template +class AdamSolver : public SGDSolver { + public: + explicit AdamSolver(const SolverParameter& param) + : SGDSolver(param) { AdamPreSolve();} + explicit AdamSolver(const string& param_file) + : SGDSolver(param_file) { AdamPreSolve(); } + virtual inline const char* type() const { return "Adam"; } + + protected: + void AdamPreSolve(); + virtual void ComputeUpdateValue(int param_id, Dtype rate); + + DISABLE_COPY_AND_ASSIGN(AdamSolver); +}; + +} // namespace caffe + +#endif // CAFFE_SGD_SOLVERS_HPP_ diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp new file mode 100755 index 0000000..ab12ef1 --- /dev/null +++ b/include/caffe/solver.hpp @@ -0,0 +1,268 @@ +#ifndef CAFFE_OPTIMIZATION_SOLVER_HPP_ +#define CAFFE_OPTIMIZATION_SOLVER_HPP_ + +#include +#include + +#include "caffe/net.hpp" + +namespace caffe { + +/** + * @brief An interface for classes that perform optimization on Net%s. + * + * Requires implementation of ApplyUpdate to compute a parameter update + * given the current state of the Net parameters. + */ +template +class Solver { + public: + explicit Solver(const SolverParameter& param, + const Solver* root_solver = NULL); + explicit Solver(const string& param_file, const Solver* root_solver = NULL); + void Init(const SolverParameter& param); + void InitTrainNet(); + void InitTestNets(); + // The main entry of the solver function. In default, iter will be zero. Pass + // in a non-zero iter number to resume training for a pre-trained net. + virtual void Solve(const char* resume_file = NULL); + inline void Solve(const string resume_file) { Solve(resume_file.c_str()); } + void Step(int iters); + // The Restore method simply dispatches to one of the + // RestoreSolverStateFrom___ protected methods. You should implement these + // methods to restore the state from the appropriate snapshot type. + void Restore(const char* resume_file); + virtual ~Solver() {} + inline const SolverParameter& param() const { return param_; } + inline shared_ptr > net() { return net_; } + inline const vector > >& test_nets() { + return test_nets_; + } + int iter() { return iter_; } + + // Invoked at specific points during an iteration + class Callback { + protected: + virtual void on_start() = 0; + virtual void on_gradients_ready() = 0; + + template + friend class Solver; + }; + const vector& callbacks() const { return callbacks_; } + void add_callback(Callback* value) { + callbacks_.push_back(value); + } + + protected: + // Make and apply the update value for the current iteration. + virtual void ApplyUpdate() = 0; + // The Solver::Snapshot function implements the basic snapshotting utility + // that stores the learned net. You should implement the SnapshotSolverState() + // function that produces a SolverState protocol buffer that needs to be + // written to disk together with the learned net. + void Snapshot(); + string SnapshotFilename(const string extension); + string SnapshotToBinaryProto(); + string SnapshotToHDF5(); + // The test routine + void TestAll(); + void Test(const int test_net_id = 0); + virtual void SnapshotSolverState(const string& model_filename) = 0; + virtual void RestoreSolverStateFromHDF5(const string& state_file) = 0; + virtual void RestoreSolverStateFromBinaryProto(const string& state_file) = 0; + void DisplayOutputBlobs(const int net_id); + + SolverParameter param_; + int iter_; + int current_step_; + shared_ptr > net_; + vector > > test_nets_; + vector callbacks_; + + // The root solver that holds root nets (actually containing shared layers) + // in data parallelism + const Solver* const root_solver_; + + DISABLE_COPY_AND_ASSIGN(Solver); +}; + +/** + * @brief Solver that only computes gradients, used as worker + * for multi-GPU training. + */ +template +class WorkerSolver : public Solver { + public: + explicit WorkerSolver(const SolverParameter& param, + const Solver* root_solver = NULL) + : Solver(param, root_solver) {} + + protected: + void ApplyUpdate() {} + void SnapshotSolverState(const string& model_filename) { + LOG(FATAL) << "Should not be called on worker solver."; + } + void RestoreSolverStateFromBinaryProto(const string& state_file) { + LOG(FATAL) << "Should not be called on worker solver."; + } + void RestoreSolverStateFromHDF5(const string& state_file) { + LOG(FATAL) << "Should not be called on worker solver."; + } +}; + +/** + * @brief Optimizes the parameters of a Net using + * stochastic gradient descent (SGD) with momentum. + */ +template +class SGDSolver : public Solver { + public: + explicit SGDSolver(const SolverParameter& param) + : Solver(param) { PreSolve(); } + explicit SGDSolver(const string& param_file) + : Solver(param_file) { PreSolve(); } + + const vector > >& history() { return history_; } + + protected: + void PreSolve(); + Dtype GetLearningRate(); + virtual void ApplyUpdate(); + virtual void Normalize(int param_id); + virtual void Regularize(int param_id); + virtual void ComputeUpdateValue(int param_id, Dtype rate); + virtual void ClipGradients(); + virtual void SnapshotSolverState(const string& model_filename); + virtual void SnapshotSolverStateToBinaryProto(const string& model_filename); + virtual void SnapshotSolverStateToHDF5(const string& model_filename); + virtual void RestoreSolverStateFromHDF5(const string& state_file); + virtual void RestoreSolverStateFromBinaryProto(const string& state_file); + // history maintains the historical momentum data. + // update maintains update related data and is not needed in snapshots. + // temp maintains other information that might be needed in computation + // of gradients/updates and is not needed in snapshots + vector > > history_, update_, temp_; + + DISABLE_COPY_AND_ASSIGN(SGDSolver); +}; + +template +class NesterovSolver : public SGDSolver { + public: + explicit NesterovSolver(const SolverParameter& param) + : SGDSolver(param) {} + explicit NesterovSolver(const string& param_file) + : SGDSolver(param_file) {} + + protected: + virtual void ComputeUpdateValue(int param_id, Dtype rate); + + DISABLE_COPY_AND_ASSIGN(NesterovSolver); +}; + +template +class AdaGradSolver : public SGDSolver { + public: + explicit AdaGradSolver(const SolverParameter& param) + : SGDSolver(param) { constructor_sanity_check(); } + explicit AdaGradSolver(const string& param_file) + : SGDSolver(param_file) { constructor_sanity_check(); } + + protected: + virtual void ComputeUpdateValue(int param_id, Dtype rate); + void constructor_sanity_check() { + CHECK_EQ(0, this->param_.momentum()) + << "Momentum cannot be used with AdaGrad."; + } + + DISABLE_COPY_AND_ASSIGN(AdaGradSolver); +}; + + +template +class RMSPropSolver : public SGDSolver { + public: + explicit RMSPropSolver(const SolverParameter& param) + : SGDSolver(param) { constructor_sanity_check(); } + explicit RMSPropSolver(const string& param_file) + : SGDSolver(param_file) { constructor_sanity_check(); } + + protected: + virtual void ComputeUpdateValue(int param_id, Dtype rate); + void constructor_sanity_check() { + CHECK_EQ(0, this->param_.momentum()) + << "Momentum cannot be used with RMSProp."; + CHECK_GE(this->param_.rms_decay(), 0) + << "rms_decay should lie between 0 and 1."; + CHECK_LT(this->param_.rms_decay(), 1) + << "rms_decay should lie between 0 and 1."; + } + + DISABLE_COPY_AND_ASSIGN(RMSPropSolver); +}; + +template +class AdaDeltaSolver : public SGDSolver { + public: + explicit AdaDeltaSolver(const SolverParameter& param) + : SGDSolver(param) { AdaDeltaPreSolve(); } + explicit AdaDeltaSolver(const string& param_file) + : SGDSolver(param_file) { AdaDeltaPreSolve(); } + + protected: + void AdaDeltaPreSolve(); + virtual void ComputeUpdateValue(int param_id, Dtype rate); + + DISABLE_COPY_AND_ASSIGN(AdaDeltaSolver); +}; + +/** + * @brief AdamSolver, an algorithm for first-order gradient-based optimization + * of stochastic objective functions, based on adaptive estimates of + * lower-order moments. Described in [1]. + * + * [1] D. P. Kingma and J. L. Ba, "ADAM: A Method for Stochastic Optimization." + * arXiv preprint arXiv:1412.6980v8 (2014). + */ +template +class AdamSolver : public SGDSolver { + public: + explicit AdamSolver(const SolverParameter& param) + : SGDSolver(param) { AdamPreSolve();} + explicit AdamSolver(const string& param_file) + : SGDSolver(param_file) { AdamPreSolve(); } + + protected: + void AdamPreSolve(); + virtual void ComputeUpdateValue(int param_id, Dtype rate); + + DISABLE_COPY_AND_ASSIGN(AdamSolver); +}; + +template +Solver* GetSolver(const SolverParameter& param) { + SolverParameter_SolverType type = param.solver_type(); + + switch (type) { + case SolverParameter_SolverType_SGD: + return new SGDSolver(param); + case SolverParameter_SolverType_NESTEROV: + return new NesterovSolver(param); + case SolverParameter_SolverType_ADAGRAD: + return new AdaGradSolver(param); + case SolverParameter_SolverType_RMSPROP: + return new RMSPropSolver(param); + case SolverParameter_SolverType_ADADELTA: + return new AdaDeltaSolver(param); + case SolverParameter_SolverType_ADAM: + return new AdamSolver(param); + default: + LOG(FATAL) << "Unknown SolverType: " << type; + } + return (Solver*) NULL; +} + +} // namespace caffe + +#endif // CAFFE_OPTIMIZATION_SOLVER_HPP_ diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp new file mode 100755 index 0000000..62aadef --- /dev/null +++ b/include/caffe/syncedmem.hpp @@ -0,0 +1,83 @@ +#ifndef CAFFE_SYNCEDMEM_HPP_ +#define CAFFE_SYNCEDMEM_HPP_ + +#include + +#include "caffe/common.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +// If CUDA is available and in GPU mode, host memory will be allocated pinned, +// using cudaMallocHost. It avoids dynamic pinning for transfers (DMA). +// The improvement in performance seems negligible in the single GPU case, +// but might be more significant for parallel training. Most importantly, +// it improved stability for large models on many GPUs. +inline void CaffeMallocHost(void** ptr, size_t size) { +#ifndef CPU_ONLY + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaMallocHost(ptr, size)); + return; + } +#endif + *ptr = malloc(size); + CHECK(*ptr) << "host allocation of size " << size << " failed"; +} + +inline void CaffeFreeHost(void* ptr) { +#ifndef CPU_ONLY + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaFreeHost(ptr)); + return; + } +#endif + free(ptr); +} + + +/** + * @brief Manages memory allocation and synchronization between the host (CPU) + * and device (GPU). + * + * TODO(dox): more thorough description. + */ +class SyncedMemory { + public: + SyncedMemory() + : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), + own_cpu_data_(false), own_gpu_data_(false), gpu_device_(-1) {} + explicit SyncedMemory(size_t size) + : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED), + own_cpu_data_(false), own_gpu_data_(false), gpu_device_(-1) {} + ~SyncedMemory(); + const void* cpu_data(); + void set_cpu_data(void* data); + const void* gpu_data(); + void set_gpu_data(void* data); + void* mutable_cpu_data(); + void* mutable_gpu_data(); + enum SyncedHead { UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED }; + SyncedHead head() { return head_; } + size_t size() { return size_; } + +#ifndef CPU_ONLY + void async_gpu_push(const cudaStream_t& stream); +#endif + + private: + void to_cpu(); + void to_gpu(); + void* cpu_ptr_; + void* gpu_ptr_; + size_t size_; + SyncedHead head_; + bool own_cpu_data_; + bool own_gpu_data_; + int gpu_device_; + + DISABLE_COPY_AND_ASSIGN(SyncedMemory); +}; // class SyncedMemory + +} // namespace caffe + +#endif // CAFFE_SYNCEDMEM_HPP_ diff --git a/include/caffe/test/test_caffe_main.hpp b/include/caffe/test/test_caffe_main.hpp new file mode 100755 index 0000000..fc15609 --- /dev/null +++ b/include/caffe/test/test_caffe_main.hpp @@ -0,0 +1,78 @@ +// The main caffe test code. Your test cpp code should include this hpp +// to allow a main function to be compiled into the binary. +#ifndef CAFFE_TEST_TEST_CAFFE_MAIN_HPP_ +#define CAFFE_TEST_TEST_CAFFE_MAIN_HPP_ + +#include +#include + +#include +#include + +#include "caffe/common.hpp" + +using std::cout; +using std::endl; + +#ifdef CMAKE_BUILD + #include "caffe_config.h" +#else + #define CUDA_TEST_DEVICE -1 + #define CMAKE_SOURCE_DIR "src/" + #define EXAMPLES_SOURCE_DIR "examples/" + #define CMAKE_EXT "" +#endif + +int main(int argc, char** argv); + +namespace caffe { + +template +class MultiDeviceTest : public ::testing::Test { + public: + typedef typename TypeParam::Dtype Dtype; + protected: + MultiDeviceTest() { + Caffe::set_mode(TypeParam::device); + } + virtual ~MultiDeviceTest() {} +}; + +typedef ::testing::Types TestDtypes; + +template +struct CPUDevice { + typedef TypeParam Dtype; + static const Caffe::Brew device = Caffe::CPU; +}; + +template +class CPUDeviceTest : public MultiDeviceTest > { +}; + +#ifdef CPU_ONLY + +typedef ::testing::Types, + CPUDevice > TestDtypesAndDevices; + +#else + +template +struct GPUDevice { + typedef TypeParam Dtype; + static const Caffe::Brew device = Caffe::GPU; +}; + +template +class GPUDeviceTest : public MultiDeviceTest > { +}; + +typedef ::testing::Types, CPUDevice, + GPUDevice, GPUDevice > + TestDtypesAndDevices; + +#endif + +} // namespace caffe + +#endif // CAFFE_TEST_TEST_CAFFE_MAIN_HPP_ diff --git a/include/caffe/test/test_gradient_check_util.hpp b/include/caffe/test/test_gradient_check_util.hpp new file mode 100755 index 0000000..cc5dcba --- /dev/null +++ b/include/caffe/test/test_gradient_check_util.hpp @@ -0,0 +1,260 @@ +#ifndef CAFFE_TEST_GRADIENT_CHECK_UTIL_H_ +#define CAFFE_TEST_GRADIENT_CHECK_UTIL_H_ + +#include +#include + +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/net.hpp" + +namespace caffe { + +// The gradient checker adds a L2 normalization loss function on top of the +// top blobs, and checks the gradient. +template +class GradientChecker { + public: + // kink and kink_range specify an ignored nonsmooth region of the form + // kink - kink_range <= |feature value| <= kink + kink_range, + // which accounts for all nonsmoothness in use by caffe + GradientChecker(const Dtype stepsize, const Dtype threshold, + const unsigned int seed = 1701, const Dtype kink = 0., + const Dtype kink_range = -1) + : stepsize_(stepsize), threshold_(threshold), seed_(seed), + kink_(kink), kink_range_(kink_range) {} + // Checks the gradient of a layer, with provided bottom layers and top + // layers. + // Note that after the gradient check, we do not guarantee that the data + // stored in the layer parameters and the blobs are unchanged. + void CheckGradient(Layer* layer, const vector*>& bottom, + const vector*>& top, int check_bottom = -1) { + layer->SetUp(bottom, top); + CheckGradientSingle(layer, bottom, top, check_bottom, -1, -1); + } + void CheckGradientExhaustive(Layer* layer, + const vector*>& bottom, const vector*>& top, + int check_bottom = -1); + + // CheckGradientEltwise can be used to test layers that perform element-wise + // computation only (e.g., neuron layers) -- where (d y_i) / (d x_j) = 0 when + // i != j. + void CheckGradientEltwise(Layer* layer, + const vector*>& bottom, const vector*>& top); + + void CheckGradientSingle(Layer* layer, + const vector*>& bottom, const vector*>& top, + int check_bottom, int top_id, int top_data_id, bool element_wise = false); + + // Checks the gradient of a network. This network should not have any data + // layers or loss layers, since the function does not explicitly deal with + // such cases yet. All input blobs and parameter blobs are going to be + // checked, layer-by-layer to avoid numerical problems to accumulate. + void CheckGradientNet(const Net& net, + const vector*>& input); + + protected: + Dtype GetObjAndGradient(const Layer& layer, + const vector*>& top, int top_id = -1, int top_data_id = -1); + Dtype stepsize_; + Dtype threshold_; + unsigned int seed_; + Dtype kink_; + Dtype kink_range_; +}; + + +template +void GradientChecker::CheckGradientSingle(Layer* layer, + const vector*>& bottom, const vector*>& top, + int check_bottom, int top_id, int top_data_id, bool element_wise) { + if (element_wise) { + CHECK_EQ(0, layer->blobs().size()); + CHECK_LE(0, top_id); + CHECK_LE(0, top_data_id); + const int top_count = top[top_id]->count(); + for (int blob_id = 0; blob_id < bottom.size(); ++blob_id) { + CHECK_EQ(top_count, bottom[blob_id]->count()); + } + } + // First, figure out what blobs we need to check against, and zero init + // parameter blobs. + vector*> blobs_to_check; + vector propagate_down(bottom.size(), check_bottom < 0); + for (int i = 0; i < layer->blobs().size(); ++i) { + Blob* blob = layer->blobs()[i].get(); + caffe_set(blob->count(), static_cast(0), blob->mutable_cpu_diff()); + blobs_to_check.push_back(blob); + } + if (check_bottom < 0) { + for (int i = 0; i < bottom.size(); ++i) { + blobs_to_check.push_back(bottom[i]); + } + } else { + CHECK_LT(check_bottom, bottom.size()); + blobs_to_check.push_back(bottom[check_bottom]); + propagate_down[check_bottom] = true; + } + // Compute the gradient analytically using Backward + Caffe::set_random_seed(seed_); + // Ignore the loss from the layer (it's just the weighted sum of the losses + // from the top blobs, whose gradients we may want to test individually). + layer->Forward(bottom, top); + // Get additional loss from the objective + GetObjAndGradient(*layer, top, top_id, top_data_id); + layer->Backward(top, propagate_down, bottom); + // Store computed gradients for all checked blobs + vector > > + computed_gradient_blobs(blobs_to_check.size()); + for (int blob_id = 0; blob_id < blobs_to_check.size(); ++blob_id) { + Blob* current_blob = blobs_to_check[blob_id]; + computed_gradient_blobs[blob_id].reset(new Blob()); + computed_gradient_blobs[blob_id]->ReshapeLike(*current_blob); + const int count = blobs_to_check[blob_id]->count(); + const Dtype* diff = blobs_to_check[blob_id]->cpu_diff(); + Dtype* computed_gradients = + computed_gradient_blobs[blob_id]->mutable_cpu_data(); + caffe_copy(count, diff, computed_gradients); + } + // Compute derivative of top w.r.t. each bottom and parameter input using + // finite differencing. + // LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs."; + for (int blob_id = 0; blob_id < blobs_to_check.size(); ++blob_id) { + Blob* current_blob = blobs_to_check[blob_id]; + const Dtype* computed_gradients = + computed_gradient_blobs[blob_id]->cpu_data(); + // LOG(ERROR) << "Blob " << blob_id << ": checking " + // << current_blob->count() << " parameters."; + for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) { + // For an element-wise layer, we only need to do finite differencing to + // compute the derivative of top[top_id][top_data_id] w.r.t. + // bottom[blob_id][i] only for i == top_data_id. For any other + // i != top_data_id, we know the derivative is 0 by definition, and simply + // check that that's true. + Dtype estimated_gradient = 0; + Dtype positive_objective = 0; + Dtype negative_objective = 0; + if (!element_wise || (feat_id == top_data_id)) { + // Do finite differencing. + // Compute loss with stepsize_ added to input. + current_blob->mutable_cpu_data()[feat_id] += stepsize_; + Caffe::set_random_seed(seed_); + layer->Forward(bottom, top); + positive_objective = + GetObjAndGradient(*layer, top, top_id, top_data_id); + // Compute loss with stepsize_ subtracted from input. + current_blob->mutable_cpu_data()[feat_id] -= stepsize_ * 2; + Caffe::set_random_seed(seed_); + layer->Forward(bottom, top); + negative_objective = + GetObjAndGradient(*layer, top, top_id, top_data_id); + // Recover original input value. + current_blob->mutable_cpu_data()[feat_id] += stepsize_; + estimated_gradient = (positive_objective - negative_objective) / + stepsize_ / 2.; + } + Dtype computed_gradient = computed_gradients[feat_id]; + Dtype feature = current_blob->cpu_data()[feat_id]; + // LOG(ERROR) << "debug: " << current_blob->cpu_data()[feat_id] << " " + // << current_blob->cpu_diff()[feat_id]; + if (kink_ - kink_range_ > fabs(feature) + || fabs(feature) > kink_ + kink_range_) { + // We check relative accuracy, but for too small values, we threshold + // the scale factor by 1. + Dtype scale = std::max( + std::max(fabs(computed_gradient), fabs(estimated_gradient)), 1.); + EXPECT_NEAR(computed_gradient, estimated_gradient, threshold_ * scale) + << "debug: (top_id, top_data_id, blob_id, feat_id)=" + << top_id << "," << top_data_id << "," << blob_id << "," << feat_id + << "; feat = " << feature + << "; objective+ = " << positive_objective + << "; objective- = " << negative_objective; + } + // LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id]; + // LOG(ERROR) << "computed gradient: " << computed_gradient + // << " estimated_gradient: " << estimated_gradient; + } + } +} + +template +void GradientChecker::CheckGradientExhaustive(Layer* layer, + const vector*>& bottom, const vector*>& top, + int check_bottom) { + layer->SetUp(bottom, top); + CHECK_GT(top.size(), 0) << "Exhaustive mode requires at least one top blob."; + // LOG(ERROR) << "Exhaustive Mode."; + for (int i = 0; i < top.size(); ++i) { + // LOG(ERROR) << "Exhaustive: blob " << i << " size " << top[i]->count(); + for (int j = 0; j < top[i]->count(); ++j) { + // LOG(ERROR) << "Exhaustive: blob " << i << " data " << j; + CheckGradientSingle(layer, bottom, top, check_bottom, i, j); + } + } +} + +template +void GradientChecker::CheckGradientEltwise(Layer* layer, + const vector*>& bottom, const vector*>& top) { + layer->SetUp(bottom, top); + CHECK_GT(top.size(), 0) << "Eltwise mode requires at least one top blob."; + const int check_bottom = -1; + const bool element_wise = true; + for (int i = 0; i < top.size(); ++i) { + for (int j = 0; j < top[i]->count(); ++j) { + CheckGradientSingle(layer, bottom, top, check_bottom, i, j, element_wise); + } + } +} + +template +void GradientChecker::CheckGradientNet( + const Net& net, const vector*>& input) { + const vector > >& layers = net.layers(); + vector*> >& bottom_vecs = net.bottom_vecs(); + vector*> >& top_vecs = net.top_vecs(); + for (int i = 0; i < layers.size(); ++i) { + net.Forward(input); + LOG(ERROR) << "Checking gradient for " << layers[i]->layer_param().name(); + CheckGradientExhaustive(*(layers[i].get()), bottom_vecs[i], top_vecs[i]); + } +} + +template +Dtype GradientChecker::GetObjAndGradient(const Layer& layer, + const vector*>& top, int top_id, int top_data_id) { + Dtype loss = 0; + if (top_id < 0) { + // the loss will be half of the sum of squares of all outputs + for (int i = 0; i < top.size(); ++i) { + Blob* top_blob = top[i]; + const Dtype* top_blob_data = top_blob->cpu_data(); + Dtype* top_blob_diff = top_blob->mutable_cpu_diff(); + int count = top_blob->count(); + for (int j = 0; j < count; ++j) { + loss += top_blob_data[j] * top_blob_data[j]; + } + // set the diff: simply the data. + caffe_copy(top_blob->count(), top_blob_data, top_blob_diff); + } + loss /= 2.; + } else { + // the loss will be the top_data_id-th element in the top_id-th blob. + for (int i = 0; i < top.size(); ++i) { + Blob* top_blob = top[i]; + Dtype* top_blob_diff = top_blob->mutable_cpu_diff(); + caffe_set(top_blob->count(), Dtype(0), top_blob_diff); + } + const Dtype loss_weight = 2; + loss = top[top_id]->cpu_data()[top_data_id] * loss_weight; + top[top_id]->mutable_cpu_diff()[top_data_id] = loss_weight; + } + return loss; +} + +} // namespace caffe + +#endif // CAFFE_TEST_GRADIENT_CHECK_UTIL_H_ diff --git a/include/caffe/util/benchmark.hpp b/include/caffe/util/benchmark.hpp new file mode 100755 index 0000000..d635827 --- /dev/null +++ b/include/caffe/util/benchmark.hpp @@ -0,0 +1,52 @@ +#ifndef CAFFE_UTIL_BENCHMARK_H_ +#define CAFFE_UTIL_BENCHMARK_H_ + +#include + +#include "caffe/util/device_alternate.hpp" + +namespace caffe { + +class Timer { + public: + Timer(); + virtual ~Timer(); + virtual void Start(); + virtual void Stop(); + virtual float MilliSeconds(); + virtual float MicroSeconds(); + virtual float Seconds(); + + inline bool initted() { return initted_; } + inline bool running() { return running_; } + inline bool has_run_at_least_once() { return has_run_at_least_once_; } + + protected: + void Init(); + + bool initted_; + bool running_; + bool has_run_at_least_once_; +#ifndef CPU_ONLY + cudaEvent_t start_gpu_; + cudaEvent_t stop_gpu_; +#endif + boost::posix_time::ptime start_cpu_; + boost::posix_time::ptime stop_cpu_; + float elapsed_milliseconds_; + float elapsed_microseconds_; +}; + +class CPUTimer : public Timer { + public: + explicit CPUTimer(); + virtual ~CPUTimer() {} + virtual void Start(); + virtual void Stop(); + virtual float MilliSeconds(); + virtual float MicroSeconds(); +}; + +} // namespace caffe + +#endif // CAFFE_UTIL_BENCHMARK_H_ diff --git a/include/caffe/util/blocking_queue.hpp b/include/caffe/util/blocking_queue.hpp new file mode 100755 index 0000000..955e12c --- /dev/null +++ b/include/caffe/util/blocking_queue.hpp @@ -0,0 +1,47 @@ +#ifndef CAFFE_UTIL_BLOCKING_QUEUE_HPP_ +#define CAFFE_UTIL_BLOCKING_QUEUE_HPP_ + +#include +#include + +#include "caffe/common.hpp" + +namespace caffe { + +template +class BlockingQueue { + public: + explicit BlockingQueue(); + + void push(const T& t); + + bool try_pop(T* t); + + // This logs a message if the threads needs to be blocked + // useful for detecting e.g. when data feeding is too slow + T pop(const string& log_on_wait = ""); + + bool try_peek(T* t); + + // Return element without removing it + T peek(); + + size_t size() const; + + protected: + /** + Move synchronization fields out instead of including boost/thread.hpp + to avoid a boost/NVCC issues (#1009, #1010) on OSX. Also fails on + Linux CUDA 7.0.18. + */ + class sync; + + std::queue queue_; + shared_ptr sync_; + +DISABLE_COPY_AND_ASSIGN(BlockingQueue); +}; + +} // namespace caffe + +#endif diff --git a/include/caffe/util/cudnn.hpp b/include/caffe/util/cudnn.hpp new file mode 100755 index 0000000..b531dd5 --- /dev/null +++ b/include/caffe/util/cudnn.hpp @@ -0,0 +1,132 @@ +#ifndef CAFFE_UTIL_CUDNN_H_ +#define CAFFE_UTIL_CUDNN_H_ +#ifdef USE_CUDNN + +#include + +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" + +#define CUDNN_CHECK(condition) \ + do { \ + cudnnStatus_t status = condition; \ + CHECK_EQ(status, CUDNN_STATUS_SUCCESS) << " "\ + << cudnnGetErrorString(status); \ + } while (0) + +inline const char* cudnnGetErrorString(cudnnStatus_t status) { + switch (status) { + case CUDNN_STATUS_SUCCESS: + return "CUDNN_STATUS_SUCCESS"; + case CUDNN_STATUS_NOT_INITIALIZED: + return "CUDNN_STATUS_NOT_INITIALIZED"; + case CUDNN_STATUS_ALLOC_FAILED: + return "CUDNN_STATUS_ALLOC_FAILED"; + case CUDNN_STATUS_BAD_PARAM: + return "CUDNN_STATUS_BAD_PARAM"; + case CUDNN_STATUS_INTERNAL_ERROR: + return "CUDNN_STATUS_INTERNAL_ERROR"; + case CUDNN_STATUS_INVALID_VALUE: + return "CUDNN_STATUS_INVALID_VALUE"; + case CUDNN_STATUS_ARCH_MISMATCH: + return "CUDNN_STATUS_ARCH_MISMATCH"; + case CUDNN_STATUS_MAPPING_ERROR: + return "CUDNN_STATUS_MAPPING_ERROR"; + case CUDNN_STATUS_EXECUTION_FAILED: + return "CUDNN_STATUS_EXECUTION_FAILED"; + case CUDNN_STATUS_NOT_SUPPORTED: + return "CUDNN_STATUS_NOT_SUPPORTED"; + case CUDNN_STATUS_LICENSE_ERROR: + return "CUDNN_STATUS_LICENSE_ERROR"; + } + return "Unknown cudnn status"; +} + +namespace caffe { + +namespace cudnn { + +template class dataType; +template<> class dataType { + public: + static const cudnnDataType_t type = CUDNN_DATA_FLOAT; + static float oneval, zeroval; + static const void *one, *zero; +}; +template<> class dataType { + public: + static const cudnnDataType_t type = CUDNN_DATA_DOUBLE; + static double oneval, zeroval; + static const void *one, *zero; +}; + +template +inline void createTensor4dDesc(cudnnTensorDescriptor_t* desc) { + CUDNN_CHECK(cudnnCreateTensorDescriptor(desc)); +} + +template +inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc, + int n, int c, int h, int w, + int stride_n, int stride_c, int stride_h, int stride_w) { + CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(*desc, dataType::type, + n, c, h, w, stride_n, stride_c, stride_h, stride_w)); +} + +template +inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc, + int n, int c, int h, int w) { + const int stride_w = 1; + const int stride_h = w * stride_w; + const int stride_c = h * stride_h; + const int stride_n = c * stride_c; + setTensor4dDesc(desc, n, c, h, w, + stride_n, stride_c, stride_h, stride_w); +} + +template +inline void createFilterDesc(cudnnFilterDescriptor_t* desc, + int n, int c, int h, int w) { + CUDNN_CHECK(cudnnCreateFilterDescriptor(desc)); + CUDNN_CHECK(cudnnSetFilter4dDescriptor(*desc, dataType::type, + n, c, h, w)); +} + +template +inline void createConvolutionDesc(cudnnConvolutionDescriptor_t* conv) { + CUDNN_CHECK(cudnnCreateConvolutionDescriptor(conv)); +} + +template +inline void setConvolutionDesc(cudnnConvolutionDescriptor_t* conv, + cudnnTensorDescriptor_t bottom, cudnnFilterDescriptor_t filter, + int pad_h, int pad_w, int stride_h, int stride_w) { + CUDNN_CHECK(cudnnSetConvolution2dDescriptor(*conv, + pad_h, pad_w, stride_h, stride_w, 1, 1, CUDNN_CROSS_CORRELATION)); +} + +template +inline void createPoolingDesc(cudnnPoolingDescriptor_t* pool_desc, + PoolingParameter_PoolMethod poolmethod, cudnnPoolingMode_t* mode, + int h, int w, int pad_h, int pad_w, int stride_h, int stride_w) { + switch (poolmethod) { + case PoolingParameter_PoolMethod_MAX: + *mode = CUDNN_POOLING_MAX; + break; + case PoolingParameter_PoolMethod_AVE: + *mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING; + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } + CUDNN_CHECK(cudnnCreatePoolingDescriptor(pool_desc)); + CUDNN_CHECK(cudnnSetPooling2dDescriptor(*pool_desc, *mode, h, w, + pad_h, pad_w, stride_h, stride_w)); +} + +} // namespace cudnn + +} // namespace caffe + +#endif // USE_CUDNN +#endif // CAFFE_UTIL_CUDNN_H_ diff --git a/include/caffe/util/db.hpp b/include/caffe/util/db.hpp new file mode 100755 index 0000000..59ec3d3 --- /dev/null +++ b/include/caffe/util/db.hpp @@ -0,0 +1,54 @@ +#ifndef CAFFE_UTIL_DB_HPP +#define CAFFE_UTIL_DB_HPP + +#include + +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { namespace db { + +enum Mode { READ, WRITE, NEW }; + +class Cursor { + public: + Cursor() { } + virtual ~Cursor() { } + virtual void SeekToFirst() = 0; + virtual void Next() = 0; + virtual string key() = 0; + virtual string value() = 0; + virtual bool valid() = 0; + + DISABLE_COPY_AND_ASSIGN(Cursor); +}; + +class Transaction { + public: + Transaction() { } + virtual ~Transaction() { } + virtual void Put(const string& key, const string& value) = 0; + virtual void Commit() = 0; + + DISABLE_COPY_AND_ASSIGN(Transaction); +}; + +class DB { + public: + DB() { } + virtual ~DB() { } + virtual void Open(const string& source, Mode mode) = 0; + virtual void Close() = 0; + virtual Cursor* NewCursor() = 0; + virtual Transaction* NewTransaction() = 0; + + DISABLE_COPY_AND_ASSIGN(DB); +}; + +DB* GetDB(DataParameter::DB backend); +DB* GetDB(const string& backend); + +} // namespace db +} // namespace caffe + +#endif // CAFFE_UTIL_DB_HPP diff --git a/include/caffe/util/db_leveldb.hpp b/include/caffe/util/db_leveldb.hpp new file mode 100755 index 0000000..1062355 --- /dev/null +++ b/include/caffe/util/db_leveldb.hpp @@ -0,0 +1,73 @@ +#ifndef CAFFE_UTIL_DB_LEVELDB_HPP +#define CAFFE_UTIL_DB_LEVELDB_HPP + +#include + +#include "leveldb/db.h" +#include "leveldb/write_batch.h" + +#include "caffe/util/db.hpp" + +namespace caffe { namespace db { + +class LevelDBCursor : public Cursor { + public: + explicit LevelDBCursor(leveldb::Iterator* iter) + : iter_(iter) { SeekToFirst(); } + ~LevelDBCursor() { delete iter_; } + virtual void SeekToFirst() { iter_->SeekToFirst(); } + virtual void Next() { iter_->Next(); } + virtual string key() { return iter_->key().ToString(); } + virtual string value() { return iter_->value().ToString(); } + virtual bool valid() { return iter_->Valid(); } + + private: + leveldb::Iterator* iter_; +}; + +class LevelDBTransaction : public Transaction { + public: + explicit LevelDBTransaction(leveldb::DB* db) : db_(db) { CHECK_NOTNULL(db_); } + virtual void Put(const string& key, const string& value) { + batch_.Put(key, value); + } + virtual void Commit() { + leveldb::Status status = db_->Write(leveldb::WriteOptions(), &batch_); + CHECK(status.ok()) << "Failed to write batch to leveldb " + << std::endl << status.ToString(); + } + + private: + leveldb::DB* db_; + leveldb::WriteBatch batch_; + + DISABLE_COPY_AND_ASSIGN(LevelDBTransaction); +}; + +class LevelDB : public DB { + public: + LevelDB() : db_(NULL) { } + virtual ~LevelDB() { Close(); } + virtual void Open(const string& source, Mode mode); + virtual void Close() { + if (db_ != NULL) { + delete db_; + db_ = NULL; + } + } + virtual LevelDBCursor* NewCursor() { + return new LevelDBCursor(db_->NewIterator(leveldb::ReadOptions())); + } + virtual LevelDBTransaction* NewTransaction() { + return new LevelDBTransaction(db_); + } + + private: + leveldb::DB* db_; +}; + + +} // namespace db +} // namespace caffe + +#endif // CAFFE_UTIL_DB_LEVELDB_HPP diff --git a/include/caffe/util/db_lmdb.hpp b/include/caffe/util/db_lmdb.hpp new file mode 100755 index 0000000..cc7c90a --- /dev/null +++ b/include/caffe/util/db_lmdb.hpp @@ -0,0 +1,91 @@ +#ifndef CAFFE_UTIL_DB_LMDB_HPP +#define CAFFE_UTIL_DB_LMDB_HPP + +#include + +#include "lmdb.h" + +#include "caffe/util/db.hpp" + +namespace caffe { namespace db { + +inline void MDB_CHECK(int mdb_status) { + CHECK_EQ(mdb_status, MDB_SUCCESS) << mdb_strerror(mdb_status); +} + +class LMDBCursor : public Cursor { + public: + explicit LMDBCursor(MDB_txn* mdb_txn, MDB_cursor* mdb_cursor) + : mdb_txn_(mdb_txn), mdb_cursor_(mdb_cursor), valid_(false) { + SeekToFirst(); + } + virtual ~LMDBCursor() { + mdb_cursor_close(mdb_cursor_); + mdb_txn_abort(mdb_txn_); + } + virtual void SeekToFirst() { Seek(MDB_FIRST); } + virtual void Next() { Seek(MDB_NEXT); } + virtual string key() { + return string(static_cast(mdb_key_.mv_data), mdb_key_.mv_size); + } + virtual string value() { + return string(static_cast(mdb_value_.mv_data), + mdb_value_.mv_size); + } + virtual bool valid() { return valid_; } + + private: + void Seek(MDB_cursor_op op) { + int mdb_status = mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, op); + if (mdb_status == MDB_NOTFOUND) { + valid_ = false; + } else { + MDB_CHECK(mdb_status); + valid_ = true; + } + } + + MDB_txn* mdb_txn_; + MDB_cursor* mdb_cursor_; + MDB_val mdb_key_, mdb_value_; + bool valid_; +}; + +class LMDBTransaction : public Transaction { + public: + explicit LMDBTransaction(MDB_dbi* mdb_dbi, MDB_txn* mdb_txn) + : mdb_dbi_(mdb_dbi), mdb_txn_(mdb_txn) { } + virtual void Put(const string& key, const string& value); + virtual void Commit() { MDB_CHECK(mdb_txn_commit(mdb_txn_)); } + + private: + MDB_dbi* mdb_dbi_; + MDB_txn* mdb_txn_; + + DISABLE_COPY_AND_ASSIGN(LMDBTransaction); +}; + +class LMDB : public DB { + public: + LMDB() : mdb_env_(NULL) { } + virtual ~LMDB() { Close(); } + virtual void Open(const string& source, Mode mode); + virtual void Close() { + if (mdb_env_ != NULL) { + mdb_dbi_close(mdb_env_, mdb_dbi_); + mdb_env_close(mdb_env_); + mdb_env_ = NULL; + } + } + virtual LMDBCursor* NewCursor(); + virtual LMDBTransaction* NewTransaction(); + + private: + MDB_env* mdb_env_; + MDB_dbi mdb_dbi_; +}; + +} // namespace db +} // namespace caffe + +#endif // CAFFE_UTIL_DB_LMDB_HPP diff --git a/include/caffe/util/device_alternate.hpp b/include/caffe/util/device_alternate.hpp new file mode 100755 index 0000000..6ea595d --- /dev/null +++ b/include/caffe/util/device_alternate.hpp @@ -0,0 +1,102 @@ +#ifndef CAFFE_UTIL_DEVICE_ALTERNATE_H_ +#define CAFFE_UTIL_DEVICE_ALTERNATE_H_ + +#ifdef CPU_ONLY // CPU-only Caffe. + +#include + +// Stub out GPU calls as unavailable. + +#define NO_GPU LOG(FATAL) << "Cannot use GPU in CPU-only Caffe: check mode." + +#define STUB_GPU(classname) \ +template \ +void classname::Forward_gpu(const vector*>& bottom, \ + const vector*>& top) { NO_GPU; } \ +template \ +void classname::Backward_gpu(const vector*>& top, \ + const vector& propagate_down, \ + const vector*>& bottom) { NO_GPU; } \ + +#define STUB_GPU_FORWARD(classname, funcname) \ +template \ +void classname::funcname##_##gpu(const vector*>& bottom, \ + const vector*>& top) { NO_GPU; } \ + +#define STUB_GPU_BACKWARD(classname, funcname) \ +template \ +void classname::funcname##_##gpu(const vector*>& top, \ + const vector& propagate_down, \ + const vector*>& bottom) { NO_GPU; } \ + +#else // Normal GPU + CPU Caffe. + +#include +#include +#include +#include +#include // cuda driver types +#ifdef USE_CUDNN // cuDNN acceleration library. +#include "caffe/util/cudnn.hpp" +#endif + +// +// CUDA macros +// + +// CUDA: various checks for different function calls. +#define CUDA_CHECK(condition) \ + /* Code block avoids redefinition of cudaError_t error */ \ + do { \ + cudaError_t error = condition; \ + CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ + } while (0) + +#define CUBLAS_CHECK(condition) \ + do { \ + cublasStatus_t status = condition; \ + CHECK_EQ(status, CUBLAS_STATUS_SUCCESS) << " " \ + << caffe::cublasGetErrorString(status); \ + } while (0) + +#define CURAND_CHECK(condition) \ + do { \ + curandStatus_t status = condition; \ + CHECK_EQ(status, CURAND_STATUS_SUCCESS) << " " \ + << caffe::curandGetErrorString(status); \ + } while (0) + +// CUDA: grid stride looping +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ + i < (n); \ + i += blockDim.x * gridDim.x) + +// CUDA: check for error after kernel execution and exit loudly if there is one. +#define CUDA_POST_KERNEL_CHECK CUDA_CHECK(cudaPeekAtLastError()) + +namespace caffe { + +// CUDA: library error reporting. +const char* cublasGetErrorString(cublasStatus_t error); +const char* curandGetErrorString(curandStatus_t error); + +// CUDA: thread number configuration. +// Use 1024 threads per block, which requires cuda sm_2x or above, +// or fall back to attempt compatibility (best of luck to you). +#if __CUDA_ARCH__ >= 200 + const int CAFFE_CUDA_NUM_THREADS = 1024; +#else + const int CAFFE_CUDA_NUM_THREADS = 512; +#endif + +// CUDA: number of blocks for threads. +inline int CAFFE_GET_BLOCKS(const int N) { + return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; +} + +} // namespace caffe + +#endif // CPU_ONLY + +#endif // CAFFE_UTIL_DEVICE_ALTERNATE_H_ diff --git a/include/caffe/util/hdf5.hpp b/include/caffe/util/hdf5.hpp new file mode 100755 index 0000000..ce568c5 --- /dev/null +++ b/include/caffe/util/hdf5.hpp @@ -0,0 +1,39 @@ +#ifndef CAFFE_UTIL_HDF5_H_ +#define CAFFE_UTIL_HDF5_H_ + +#include + +#include "hdf5.h" +#include "hdf5_hl.h" + +#include "caffe/blob.hpp" + +namespace caffe { + +template +void hdf5_load_nd_dataset_helper( + hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, + Blob* blob); + +template +void hdf5_load_nd_dataset( + hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, + Blob* blob); + +template +void hdf5_save_nd_dataset( + const hid_t file_id, const string& dataset_name, const Blob& blob, + bool write_diff = false); + +int hdf5_load_int(hid_t loc_id, const string& dataset_name); +void hdf5_save_int(hid_t loc_id, const string& dataset_name, int i); +string hdf5_load_string(hid_t loc_id, const string& dataset_name); +void hdf5_save_string(hid_t loc_id, const string& dataset_name, + const string& s); + +int hdf5_get_num_links(hid_t loc_id); +string hdf5_get_name_by_idx(hid_t loc_id, int idx); + +} // namespace caffe + +#endif // CAFFE_UTIL_HDF5_H_ diff --git a/include/caffe/util/im2col.hpp b/include/caffe/util/im2col.hpp new file mode 100755 index 0000000..0051e2f --- /dev/null +++ b/include/caffe/util/im2col.hpp @@ -0,0 +1,32 @@ +#ifndef _CAFFE_UTIL_IM2COL_HPP_ +#define _CAFFE_UTIL_IM2COL_HPP_ + +namespace caffe { + +template +void im2col_cpu(const Dtype* data_im, const int channels, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, Dtype* data_col); + +template +void col2im_cpu(const Dtype* data_col, const int channels, + const int height, const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, Dtype* data_im); + +template +void im2col_gpu(const Dtype* data_im, const int channels, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, Dtype* data_col); + +template +void col2im_gpu(const Dtype* data_col, const int channels, + const int height, const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, Dtype* data_im); + +} // namespace caffe + +#endif // CAFFE_UTIL_IM2COL_HPP_ diff --git a/include/caffe/util/insert_splits.hpp b/include/caffe/util/insert_splits.hpp new file mode 100755 index 0000000..446abb8 --- /dev/null +++ b/include/caffe/util/insert_splits.hpp @@ -0,0 +1,26 @@ +#ifndef _CAFFE_UTIL_INSERT_SPLITS_HPP_ +#define _CAFFE_UTIL_INSERT_SPLITS_HPP_ + +#include + +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +// Copy NetParameters with SplitLayers added to replace any shared bottom +// blobs with unique bottom blobs provided by the SplitLayer. +void InsertSplits(const NetParameter& param, NetParameter* param_split); + +void ConfigureSplitLayer(const string& layer_name, const string& blob_name, + const int blob_idx, const int split_count, const float loss_weight, + LayerParameter* split_layer_param); + +string SplitLayerName(const string& layer_name, const string& blob_name, + const int blob_idx); + +string SplitBlobName(const string& layer_name, const string& blob_name, + const int blob_idx, const int split_idx); + +} // namespace caffe + +#endif // CAFFE_UTIL_INSERT_SPLITS_HPP_ diff --git a/include/caffe/util/io.hpp b/include/caffe/util/io.hpp new file mode 100755 index 0000000..c0938ad --- /dev/null +++ b/include/caffe/util/io.hpp @@ -0,0 +1,141 @@ +#ifndef CAFFE_UTIL_IO_H_ +#define CAFFE_UTIL_IO_H_ + +#include +#include + +#include "google/protobuf/message.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +using ::google::protobuf::Message; + +inline void MakeTempFilename(string* temp_filename) { + temp_filename->clear(); + *temp_filename = "/tmp/caffe_test.XXXXXX"; + char* temp_filename_cstr = new char[temp_filename->size() + 1]; + // NOLINT_NEXT_LINE(runtime/printf) + strcpy(temp_filename_cstr, temp_filename->c_str()); + int fd = mkstemp(temp_filename_cstr); + CHECK_GE(fd, 0) << "Failed to open a temporary file at: " << *temp_filename; + close(fd); + *temp_filename = temp_filename_cstr; + delete[] temp_filename_cstr; +} + +inline void MakeTempDir(string* temp_dirname) { + temp_dirname->clear(); + *temp_dirname = "/tmp/caffe_test.XXXXXX"; + char* temp_dirname_cstr = new char[temp_dirname->size() + 1]; + // NOLINT_NEXT_LINE(runtime/printf) + strcpy(temp_dirname_cstr, temp_dirname->c_str()); + char* mkdtemp_result = mkdtemp(temp_dirname_cstr); + CHECK(mkdtemp_result != NULL) + << "Failed to create a temporary directory at: " << *temp_dirname; + *temp_dirname = temp_dirname_cstr; + delete[] temp_dirname_cstr; +} + +bool ReadProtoFromTextFile(const char* filename, Message* proto); + +inline bool ReadProtoFromTextFile(const string& filename, Message* proto) { + return ReadProtoFromTextFile(filename.c_str(), proto); +} + +inline void ReadProtoFromTextFileOrDie(const char* filename, Message* proto) { + CHECK(ReadProtoFromTextFile(filename, proto)); +} + +inline void ReadProtoFromTextFileOrDie(const string& filename, Message* proto) { + ReadProtoFromTextFileOrDie(filename.c_str(), proto); +} + +void WriteProtoToTextFile(const Message& proto, const char* filename); +inline void WriteProtoToTextFile(const Message& proto, const string& filename) { + WriteProtoToTextFile(proto, filename.c_str()); +} + +bool ReadProtoFromBinaryFile(const char* filename, Message* proto); + +inline bool ReadProtoFromBinaryFile(const string& filename, Message* proto) { + return ReadProtoFromBinaryFile(filename.c_str(), proto); +} + +inline void ReadProtoFromBinaryFileOrDie(const char* filename, Message* proto) { + CHECK(ReadProtoFromBinaryFile(filename, proto)); +} + +inline void ReadProtoFromBinaryFileOrDie(const string& filename, + Message* proto) { + ReadProtoFromBinaryFileOrDie(filename.c_str(), proto); +} + + +void WriteProtoToBinaryFile(const Message& proto, const char* filename); +inline void WriteProtoToBinaryFile( + const Message& proto, const string& filename) { + WriteProtoToBinaryFile(proto, filename.c_str()); +} + +bool ReadFileToDatum(const string& filename, const int label, Datum* datum); + +inline bool ReadFileToDatum(const string& filename, Datum* datum) { + return ReadFileToDatum(filename, -1, datum); +} + +bool ReadImageToDatum(const string& filename, const int label, + const int height, const int width, const bool is_color, + const std::string & encoding, Datum* datum); + +inline bool ReadImageToDatum(const string& filename, const int label, + const int height, const int width, const bool is_color, Datum* datum) { + return ReadImageToDatum(filename, label, height, width, is_color, + "", datum); +} + +inline bool ReadImageToDatum(const string& filename, const int label, + const int height, const int width, Datum* datum) { + return ReadImageToDatum(filename, label, height, width, true, datum); +} + +inline bool ReadImageToDatum(const string& filename, const int label, + const bool is_color, Datum* datum) { + return ReadImageToDatum(filename, label, 0, 0, is_color, datum); +} + +inline bool ReadImageToDatum(const string& filename, const int label, + Datum* datum) { + return ReadImageToDatum(filename, label, 0, 0, true, datum); +} + +inline bool ReadImageToDatum(const string& filename, const int label, + const std::string & encoding, Datum* datum) { + return ReadImageToDatum(filename, label, 0, 0, true, encoding, datum); +} + +bool DecodeDatumNative(Datum* datum); +bool DecodeDatum(Datum* datum, bool is_color); + +cv::Mat ReadImageToCVMat(const string& filename, + const int height, const int width, const bool is_color); + +cv::Mat ReadImageToCVMat(const string& filename, + const int height, const int width); + +cv::Mat ReadImageToCVMat(const string& filename, + const bool is_color); + +cv::Mat ReadImageToCVMat(const string& filename); + +cv::Mat DecodeDatumToCVMatNative(const Datum& datum); +cv::Mat DecodeDatumToCVMat(const Datum& datum, bool is_color); + +void CVMatToDatum(const cv::Mat& cv_img, Datum* datum); + +} // namespace caffe + +#endif // CAFFE_UTIL_IO_H_ diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp new file mode 100755 index 0000000..2cacd8e --- /dev/null +++ b/include/caffe/util/math_functions.hpp @@ -0,0 +1,280 @@ +#ifndef CAFFE_UTIL_MATH_FUNCTIONS_H_ +#define CAFFE_UTIL_MATH_FUNCTIONS_H_ + +#include +#include // for std::fabs and std::signbit + +#include "glog/logging.h" + +#include "caffe/common.hpp" +#include "caffe/util/device_alternate.hpp" +#include "caffe/util/mkl_alternate.hpp" + +namespace caffe { + +// Caffe gemm provides a simpler interface to the gemm functions, with the +// limitation that the data has to be contiguous in memory. +template +void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, + const Dtype alpha, const Dtype* A, const Dtype* B, const Dtype beta, + Dtype* C); + +template +void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N, + const Dtype alpha, const Dtype* A, const Dtype* x, const Dtype beta, + Dtype* y); + +template +void caffe_axpy(const int N, const Dtype alpha, const Dtype* X, + Dtype* Y); + +template +void caffe_cpu_axpby(const int N, const Dtype alpha, const Dtype* X, + const Dtype beta, Dtype* Y); + +template +void caffe_copy(const int N, const Dtype *X, Dtype *Y); + +template +void caffe_set(const int N, const Dtype alpha, Dtype *X); + +inline void caffe_memset(const size_t N, const int alpha, void* X) { + memset(X, alpha, N); // NOLINT(caffe/alt_fn) +} + +template +void caffe_add_scalar(const int N, const Dtype alpha, Dtype *X); + +template +void caffe_scal(const int N, const Dtype alpha, Dtype *X); + +template +void caffe_sqr(const int N, const Dtype* a, Dtype* y); + +template +void caffe_add(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_sub(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_mul(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_div(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); + +unsigned int caffe_rng_rand(); + +template +Dtype caffe_nextafter(const Dtype b); + +template +void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r); + +template +void caffe_rng_gaussian(const int n, const Dtype mu, const Dtype sigma, + Dtype* r); + +template +void caffe_rng_bernoulli(const int n, const Dtype p, int* r); + +template +void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r); + +template +void caffe_exp(const int n, const Dtype* a, Dtype* y); + +template +void caffe_log(const int n, const Dtype* a, Dtype* y); + +template +void caffe_abs(const int n, const Dtype* a, Dtype* y); + +template +Dtype caffe_cpu_dot(const int n, const Dtype* x, const Dtype* y); + +template +Dtype caffe_cpu_strided_dot(const int n, const Dtype* x, const int incx, + const Dtype* y, const int incy); + +template +int caffe_cpu_hamming_distance(const int n, const Dtype* x, const Dtype* y); + +// Returns the sum of the absolute values of the elements of vector x +template +Dtype caffe_cpu_asum(const int n, const Dtype* x); + +// the branchless, type-safe version from +// http://stackoverflow.com/questions/1903954/is-there-a-standard-sign-function-signum-sgn-in-c-c +template +inline int8_t caffe_sign(Dtype val) { + return (Dtype(0) < val) - (val < Dtype(0)); +} + +// The following two macros are modifications of DEFINE_VSL_UNARY_FUNC +// in include/caffe/util/mkl_alternate.hpp authored by @Rowland Depp. +// Please refer to commit 7e8ef25c7 of the boost-eigen branch. +// Git cherry picking that commit caused a conflict hard to resolve and +// copying that file in convenient for code reviewing. +// So they have to be pasted here temporarily. +#define DEFINE_CAFFE_CPU_UNARY_FUNC(name, operation) \ + template \ + void caffe_cpu_##name(const int n, const Dtype* x, Dtype* y) { \ + CHECK_GT(n, 0); CHECK(x); CHECK(y); \ + for (int i = 0; i < n; ++i) { \ + operation; \ + } \ + } + +// output is 1 for the positives, 0 for zero, and -1 for the negatives +DEFINE_CAFFE_CPU_UNARY_FUNC(sign, y[i] = caffe_sign(x[i])); + +// This returns a nonzero value if the input has its sign bit set. +// The name sngbit is meant to avoid conflicts with std::signbit in the macro. +// The extra parens are needed because CUDA < 6.5 defines signbit as a macro, +// and we don't want that to expand here when CUDA headers are also included. +DEFINE_CAFFE_CPU_UNARY_FUNC(sgnbit, \ + y[i] = static_cast((std::signbit)(x[i]))); + +DEFINE_CAFFE_CPU_UNARY_FUNC(fabs, y[i] = std::fabs(x[i])); + +template +void caffe_cpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y); + +#ifndef CPU_ONLY // GPU + +// Decaf gpu gemm provides an interface that is almost the same as the cpu +// gemm function - following the c convention and calling the fortran-order +// gpu code under the hood. +template +void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, + const Dtype alpha, const Dtype* A, const Dtype* B, const Dtype beta, + Dtype* C); + +template +void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N, + const Dtype alpha, const Dtype* A, const Dtype* x, const Dtype beta, + Dtype* y); + +template +void caffe_gpu_axpy(const int N, const Dtype alpha, const Dtype* X, + Dtype* Y); + +template +void caffe_gpu_axpby(const int N, const Dtype alpha, const Dtype* X, + const Dtype beta, Dtype* Y); + +void caffe_gpu_memcpy(const size_t N, const void *X, void *Y); + +template +void caffe_gpu_set(const int N, const Dtype alpha, Dtype *X); + +inline void caffe_gpu_memset(const size_t N, const int alpha, void* X) { +#ifndef CPU_ONLY + CUDA_CHECK(cudaMemset(X, alpha, N)); // NOLINT(caffe/alt_fn) +#else + NO_GPU; +#endif +} + +template +void caffe_gpu_add_scalar(const int N, const Dtype alpha, Dtype *X); + +template +void caffe_gpu_scal(const int N, const Dtype alpha, Dtype *X); + +template +void caffe_gpu_add(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_gpu_sub(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_gpu_mul(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_gpu_div(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_gpu_abs(const int n, const Dtype* a, Dtype* y); + +template +void caffe_gpu_exp(const int n, const Dtype* a, Dtype* y); + +template +void caffe_gpu_log(const int n, const Dtype* a, Dtype* y); + +template +void caffe_gpu_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); + +// caffe_gpu_rng_uniform with two arguments generates integers in the range +// [0, UINT_MAX]. +void caffe_gpu_rng_uniform(const int n, unsigned int* r); + +// caffe_gpu_rng_uniform with four arguments generates floats in the range +// (a, b] (strictly greater than a, less than or equal to b) due to the +// specification of curandGenerateUniform. With a = 0, b = 1, just calls +// curandGenerateUniform; with other limits will shift and scale the outputs +// appropriately after calling curandGenerateUniform. +template +void caffe_gpu_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r); + +template +void caffe_gpu_rng_gaussian(const int n, const Dtype mu, const Dtype sigma, + Dtype* r); + +template +void caffe_gpu_rng_bernoulli(const int n, const Dtype p, int* r); + +template +void caffe_gpu_dot(const int n, const Dtype* x, const Dtype* y, Dtype* out); + +template +uint32_t caffe_gpu_hamming_distance(const int n, const Dtype* x, + const Dtype* y); + +template +void caffe_gpu_asum(const int n, const Dtype* x, Dtype* y); + +template +void caffe_gpu_sign(const int n, const Dtype* x, Dtype* y); + +template +void caffe_gpu_sgnbit(const int n, const Dtype* x, Dtype* y); + +template +void caffe_gpu_fabs(const int n, const Dtype* x, Dtype* y); + +template +void caffe_gpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y); + +#define DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(name, operation) \ +template \ +__global__ void name##_kernel(const int n, const Dtype* x, Dtype* y) { \ + CUDA_KERNEL_LOOP(index, n) { \ + operation; \ + } \ +} \ +template <> \ +void caffe_gpu_##name(const int n, const float* x, float* y) { \ + /* NOLINT_NEXT_LINE(whitespace/operators) */ \ + name##_kernel<<>>( \ + n, x, y); \ +} \ +template <> \ +void caffe_gpu_##name(const int n, const double* x, double* y) { \ + /* NOLINT_NEXT_LINE(whitespace/operators) */ \ + name##_kernel<<>>( \ + n, x, y); \ +} + +#endif // !CPU_ONLY + +} // namespace caffe + +#endif // CAFFE_UTIL_MATH_FUNCTIONS_H_ diff --git a/include/caffe/util/mkl_alternate.hpp b/include/caffe/util/mkl_alternate.hpp new file mode 100755 index 0000000..3355b66 --- /dev/null +++ b/include/caffe/util/mkl_alternate.hpp @@ -0,0 +1,97 @@ +#ifndef CAFFE_UTIL_MKL_ALTERNATE_H_ +#define CAFFE_UTIL_MKL_ALTERNATE_H_ + +#ifdef USE_MKL + +#include + +#else // If use MKL, simply include the MKL header + +extern "C" { +#include +} +#include + +// Functions that caffe uses but are not present if MKL is not linked. + +// A simple way to define the vsl unary functions. The operation should +// be in the form e.g. y[i] = sqrt(a[i]) +#define DEFINE_VSL_UNARY_FUNC(name, operation) \ + template \ + void v##name(const int n, const Dtype* a, Dtype* y) { \ + CHECK_GT(n, 0); CHECK(a); CHECK(y); \ + for (int i = 0; i < n; ++i) { operation; } \ + } \ + inline void vs##name( \ + const int n, const float* a, float* y) { \ + v##name(n, a, y); \ + } \ + inline void vd##name( \ + const int n, const double* a, double* y) { \ + v##name(n, a, y); \ + } + +DEFINE_VSL_UNARY_FUNC(Sqr, y[i] = a[i] * a[i]); +DEFINE_VSL_UNARY_FUNC(Exp, y[i] = exp(a[i])); +DEFINE_VSL_UNARY_FUNC(Ln, y[i] = log(a[i])); +DEFINE_VSL_UNARY_FUNC(Abs, y[i] = fabs(a[i])); + +// A simple way to define the vsl unary functions with singular parameter b. +// The operation should be in the form e.g. y[i] = pow(a[i], b) +#define DEFINE_VSL_UNARY_FUNC_WITH_PARAM(name, operation) \ + template \ + void v##name(const int n, const Dtype* a, const Dtype b, Dtype* y) { \ + CHECK_GT(n, 0); CHECK(a); CHECK(y); \ + for (int i = 0; i < n; ++i) { operation; } \ + } \ + inline void vs##name( \ + const int n, const float* a, const float b, float* y) { \ + v##name(n, a, b, y); \ + } \ + inline void vd##name( \ + const int n, const double* a, const float b, double* y) { \ + v##name(n, a, b, y); \ + } + +DEFINE_VSL_UNARY_FUNC_WITH_PARAM(Powx, y[i] = pow(a[i], b)); + +// A simple way to define the vsl binary functions. The operation should +// be in the form e.g. y[i] = a[i] + b[i] +#define DEFINE_VSL_BINARY_FUNC(name, operation) \ + template \ + void v##name(const int n, const Dtype* a, const Dtype* b, Dtype* y) { \ + CHECK_GT(n, 0); CHECK(a); CHECK(b); CHECK(y); \ + for (int i = 0; i < n; ++i) { operation; } \ + } \ + inline void vs##name( \ + const int n, const float* a, const float* b, float* y) { \ + v##name(n, a, b, y); \ + } \ + inline void vd##name( \ + const int n, const double* a, const double* b, double* y) { \ + v##name(n, a, b, y); \ + } + +DEFINE_VSL_BINARY_FUNC(Add, y[i] = a[i] + b[i]); +DEFINE_VSL_BINARY_FUNC(Sub, y[i] = a[i] - b[i]); +DEFINE_VSL_BINARY_FUNC(Mul, y[i] = a[i] * b[i]); +DEFINE_VSL_BINARY_FUNC(Div, y[i] = a[i] / b[i]); + +// In addition, MKL comes with an additional function axpby that is not present +// in standard blas. We will simply use a two-step (inefficient, of course) way +// to mimic that. +inline void cblas_saxpby(const int N, const float alpha, const float* X, + const int incX, const float beta, float* Y, + const int incY) { + cblas_sscal(N, beta, Y, incY); + cblas_saxpy(N, alpha, X, incX, Y, incY); +} +inline void cblas_daxpby(const int N, const double alpha, const double* X, + const int incX, const double beta, double* Y, + const int incY) { + cblas_dscal(N, beta, Y, incY); + cblas_daxpy(N, alpha, X, incX, Y, incY); +} + +#endif // USE_MKL +#endif // CAFFE_UTIL_MKL_ALTERNATE_H_ diff --git a/include/caffe/util/rng.hpp b/include/caffe/util/rng.hpp new file mode 100755 index 0000000..8f1cf0d --- /dev/null +++ b/include/caffe/util/rng.hpp @@ -0,0 +1,43 @@ +#ifndef CAFFE_RNG_CPP_HPP_ +#define CAFFE_RNG_CPP_HPP_ + +#include +#include + +#include "boost/random/mersenne_twister.hpp" +#include "boost/random/uniform_int.hpp" + +#include "caffe/common.hpp" + +namespace caffe { + +typedef boost::mt19937 rng_t; + +inline rng_t* caffe_rng() { + return static_cast(Caffe::rng_stream().generator()); +} + +// Fisher–Yates algorithm +template +inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end, + RandomGenerator* gen) { + typedef typename std::iterator_traits::difference_type + difference_type; + typedef typename boost::uniform_int dist_type; + + difference_type length = std::distance(begin, end); + if (length <= 0) return; + + for (difference_type i = length - 1; i > 0; --i) { + dist_type dist(0, i); + std::iter_swap(begin + i, begin + dist(*gen)); + } +} + +template +inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end) { + shuffle(begin, end, caffe_rng()); +} +} // namespace caffe + +#endif // CAFFE_RNG_HPP_ diff --git a/include/caffe/util/upgrade_proto.hpp b/include/caffe/util/upgrade_proto.hpp new file mode 100755 index 0000000..c1f21a0 --- /dev/null +++ b/include/caffe/util/upgrade_proto.hpp @@ -0,0 +1,64 @@ +#ifndef CAFFE_UTIL_UPGRADE_PROTO_H_ +#define CAFFE_UTIL_UPGRADE_PROTO_H_ + +#include + +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +// Return true iff the net is not the current version. +bool NetNeedsUpgrade(const NetParameter& net_param); + +// Return true iff any layer contains parameters specified using +// deprecated V0LayerParameter. +bool NetNeedsV0ToV1Upgrade(const NetParameter& net_param); + +// Perform all necessary transformations to upgrade a V0NetParameter into a +// NetParameter (including upgrading padding layers and LayerParameters). +bool UpgradeV0Net(const NetParameter& v0_net_param, NetParameter* net_param); + +// Upgrade NetParameter with padding layers to pad-aware conv layers. +// For any padding layer, remove it and put its pad parameter in any layers +// taking its top blob as input. +// Error if any of these above layers are not-conv layers. +void UpgradeV0PaddingLayers(const NetParameter& param, + NetParameter* param_upgraded_pad); + +// Upgrade a single V0LayerConnection to the V1LayerParameter format. +bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, + V1LayerParameter* layer_param); + +V1LayerParameter_LayerType UpgradeV0LayerType(const string& type); + +// Return true iff any layer contains deprecated data transformation parameters. +bool NetNeedsDataUpgrade(const NetParameter& net_param); + +// Perform all necessary transformations to upgrade old transformation fields +// into a TransformationParameter. +void UpgradeNetDataTransformation(NetParameter* net_param); + +// Return true iff the Net contains any layers specified as V1LayerParameters. +bool NetNeedsV1ToV2Upgrade(const NetParameter& net_param); + +// Perform all necessary transformations to upgrade a NetParameter with +// deprecated V1LayerParameters. +bool UpgradeV1Net(const NetParameter& v1_net_param, NetParameter* net_param); + +bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, + LayerParameter* layer_param); + +const char* UpgradeV1LayerType(const V1LayerParameter_LayerType type); + +// Check for deprecations and upgrade the NetParameter as needed. +bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param); + +// Read parameters from a file into a NetParameter proto message. +void ReadNetParamsFromTextFileOrDie(const string& param_file, + NetParameter* param); +void ReadNetParamsFromBinaryFileOrDie(const string& param_file, + NetParameter* param); + +} // namespace caffe + +#endif // CAFFE_UTIL_UPGRADE_PROTO_H_ diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp new file mode 100755 index 0000000..3ff1a71 --- /dev/null +++ b/include/caffe/vision_layers.hpp @@ -0,0 +1,564 @@ +#ifndef CAFFE_VISION_LAYERS_HPP_ +#define CAFFE_VISION_LAYERS_HPP_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/common_layers.hpp" +#include "caffe/data_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/neuron_layers.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Abstract base class that factors out the BLAS code common to + * ConvolutionLayer and DeconvolutionLayer. + */ +template +class BaseConvolutionLayer : public Layer { + public: + explicit BaseConvolutionLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline int MinBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline bool EqualNumBottomTopBlobs() const { return true; } + + protected: + // Helper functions that abstract away the column buffer and gemm arguments. + // The last argument in forward_cpu_gemm is so that we can skip the im2col if + // we just called weight_cpu_gemm with the same input. + void forward_cpu_gemm(const Dtype* input, const Dtype* weights, + Dtype* output, bool skip_im2col = false); + void forward_cpu_bias(Dtype* output, const Dtype* bias); + void backward_cpu_gemm(const Dtype* input, const Dtype* weights, + Dtype* output); + void weight_cpu_gemm(const Dtype* input, const Dtype* output, Dtype* + weights); + void backward_cpu_bias(Dtype* bias, const Dtype* input); + +#ifndef CPU_ONLY + void forward_gpu_gemm(const Dtype* col_input, const Dtype* weights, + Dtype* output, bool skip_im2col = false); + void forward_gpu_bias(Dtype* output, const Dtype* bias); + void backward_gpu_gemm(const Dtype* input, const Dtype* weights, + Dtype* col_output); + void weight_gpu_gemm(const Dtype* col_input, const Dtype* output, Dtype* + weights); + void backward_gpu_bias(Dtype* bias, const Dtype* input); +#endif + + // reverse_dimensions should return true iff we are implementing deconv, so + // that conv helpers know which dimensions are which. + virtual bool reverse_dimensions() = 0; + // Compute height_out_ and width_out_ from other parameters. + virtual void compute_output_shape() = 0; + + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; + int num_; + int channels_; + int pad_h_, pad_w_; + int height_, width_; + int group_; + int num_output_; + int height_out_, width_out_; + bool bias_term_; + bool is_1x1_; + + private: + // wrap im2col/col2im so we don't have to remember the (long) argument lists + inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) { + im2col_cpu(data, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_buff); + } + inline void conv_col2im_cpu(const Dtype* col_buff, Dtype* data) { + col2im_cpu(col_buff, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); + } +#ifndef CPU_ONLY + inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) { + im2col_gpu(data, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_buff); + } + inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) { + col2im_gpu(col_buff, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); + } +#endif + + int conv_out_channels_; + int conv_in_channels_; + int conv_out_spatial_dim_; + int conv_in_height_; + int conv_in_width_; + int kernel_dim_; + int weight_offset_; + int col_offset_; + int output_offset_; + + Blob col_buffer_; + Blob bias_multiplier_; +}; + +/** + * @brief Convolves the input image with a bank of learned filters, + * and (optionally) adds biases. + * + * Caffe convolves by reduction to matrix multiplication. This achieves + * high-throughput and generality of input and filter dimensions but comes at + * the cost of memory for matrices. This makes use of efficiency in BLAS. + * + * The input is "im2col" transformed to a channel K' x H x W data matrix + * for multiplication with the N x K' x H x W filter matrix to yield a + * N' x H x W output matrix that is then "col2im" restored. K' is the + * input channel * kernel height * kernel width dimension of the unrolled + * inputs so that the im2col matrix has a column for each input region to + * be filtered. col2im restores the output spatial structure by rolling up + * the output channel N' columns of the output matrix. + */ +template +class ConvolutionLayer : public BaseConvolutionLayer { + public: + /** + * @param param provides ConvolutionParameter convolution_param, + * with ConvolutionLayer options: + * - num_output. The number of filters. + * - kernel_size / kernel_h / kernel_w. The filter dimensions, given by + * kernel_size for square filters or kernel_h and kernel_w for rectangular + * filters. + * - stride / stride_h / stride_w (\b optional, default 1). The filter + * stride, given by stride_size for equal dimensions or stride_h and stride_w + * for different strides. By default the convolution is dense with stride 1. + * - pad / pad_h / pad_w (\b optional, default 0). The zero-padding for + * convolution, given by pad for equal dimensions or pad_h and pad_w for + * different padding. Input padding is computed implicitly instead of + * actually padding. + * - group (\b optional, default 1). The number of filter groups. Group + * convolution is a method for reducing parameterization by selectively + * connecting input and output channels. The input and output channel dimensions must be divisible + * by the number of groups. For group @f$ \geq 1 @f$, the + * convolutional filters' input and output channels are separated s.t. each + * group takes 1 / group of the input channels and makes 1 / group of the + * output channels. Concretely 4 input channels, 8 output channels, and + * 2 groups separate input channels 1-2 and output channels 1-4 into the + * first group and input channels 3-4 and output channels 5-8 into the second + * group. + * - bias_term (\b optional, default true). Whether to have a bias. + * - engine: convolution has CAFFE (matrix multiplication) and CUDNN (library + * kernels + stream parallelism) engines. + */ + explicit ConvolutionLayer(const LayerParameter& param) + : BaseConvolutionLayer(param) {} + + virtual inline const char* type() const { return "Convolution"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual inline bool reverse_dimensions() { return false; } + virtual void compute_output_shape(); +}; + +/** + * @brief Convolves the input with a bank of compressed filters, + * and (optionally) adds biases. + */ + +template +class CConvolutionLayer : public BaseConvolutionLayer { + public: + + explicit CConvolutionLayer(const LayerParameter& param) + : BaseConvolutionLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual inline const char* type() const { return "CConvolution"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual inline bool reverse_dimensions() { return false; } + virtual void compute_output_shape(); + + private: + Blob weight_tmp_; + Blob bias_tmp_; + Blob rand_weight_m_; + Blob rand_bias_m_; + Dtype gamma,power; + Dtype crate; + Dtype mu,std; + int iter_stop_; +}; + + + +/** + * @brief Convolve the input with a bank of learned filters, and (optionally) + * add biases, treating filters and convolution parameters in the + * opposite sense as ConvolutionLayer. + * + * ConvolutionLayer computes each output value by dotting an input window with + * a filter; DeconvolutionLayer multiplies each input value by a filter + * elementwise, and sums over the resulting output windows. In other words, + * DeconvolutionLayer is ConvolutionLayer with the forward and backward passes + * reversed. DeconvolutionLayer reuses ConvolutionParameter for its + * parameters, but they take the opposite sense as in ConvolutionLayer (so + * padding is removed from the output rather than added to the input, and + * stride results in upsampling rather than downsampling). + */ +template +class DeconvolutionLayer : public BaseConvolutionLayer { + public: + explicit DeconvolutionLayer(const LayerParameter& param) + : BaseConvolutionLayer(param) {} + + virtual inline const char* type() const { return "Deconvolution"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual inline bool reverse_dimensions() { return true; } + virtual void compute_output_shape(); +}; + +#ifdef USE_CUDNN +/* + * @brief cuDNN implementation of ConvolutionLayer. + * Fallback to ConvolutionLayer for CPU mode. + * + * cuDNN accelerates convolution through forward kernels for filtering and bias + * plus backward kernels for the gradient w.r.t. the filters, biases, and + * inputs. Caffe + cuDNN further speeds up the computation through forward + * parallelism across groups and backward parallelism across gradients. + * + * The CUDNN engine does not have memory overhead for matrix buffers. For many + * input and filter regimes the CUDNN engine is faster than the CAFFE engine, + * but for fully-convolutional models and large inputs the CAFFE engine can be + * faster as long as it fits in memory. +*/ +template +class CuDNNConvolutionLayer : public ConvolutionLayer { + public: + explicit CuDNNConvolutionLayer(const LayerParameter& param) + : ConvolutionLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNConvolutionLayer(); + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t* handle_; + cudaStream_t* stream_; + vector bottom_descs_, top_descs_; + cudnnTensorDescriptor_t bias_desc_; + cudnnFilterDescriptor_t filter_desc_; + vector conv_descs_; + int bottom_offset_, top_offset_, weight_offset_, bias_offset_; + size_t workspaceSizeInBytes; + void *workspace; +}; +#endif + +/** + * @brief A helper for image operations that rearranges image regions into + * column vectors. Used by ConvolutionLayer to perform convolution + * by matrix multiplication. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class Im2colLayer : public Layer { + public: + explicit Im2colLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Im2col"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; + int channels_; + int height_, width_; + int pad_h_, pad_w_; +}; + +// Forward declare PoolingLayer and SplitLayer for use in LRNLayer. +template class PoolingLayer; +template class SplitLayer; + +/** + * @brief Normalize the input in a local region across or within feature maps. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class LRNLayer : public Layer { + public: + explicit LRNLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "LRN"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + virtual void CrossChannelForward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void CrossChannelForward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void WithinChannelForward(const vector*>& bottom, + const vector*>& top); + virtual void CrossChannelBackward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void CrossChannelBackward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void WithinChannelBackward(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int size_; + int pre_pad_; + Dtype alpha_; + Dtype beta_; + Dtype k_; + int num_; + int channels_; + int height_; + int width_; + + // Fields used for normalization ACROSS_CHANNELS + // scale_ stores the intermediate summing results + Blob scale_; + + // Fields used for normalization WITHIN_CHANNEL + shared_ptr > split_layer_; + vector*> split_top_vec_; + shared_ptr > square_layer_; + Blob square_input_; + Blob square_output_; + vector*> square_bottom_vec_; + vector*> square_top_vec_; + shared_ptr > pool_layer_; + Blob pool_output_; + vector*> pool_top_vec_; + shared_ptr > power_layer_; + Blob power_output_; + vector*> power_top_vec_; + shared_ptr > product_layer_; + Blob product_input_; + vector*> product_bottom_vec_; +}; + + +/** + * @brief Pools the input image by taking the max, average, etc. within regions. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class PoolingLayer : public Layer { + public: + explicit PoolingLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Pooling"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + // MAX POOL layers can output an extra top blob for the mask; + // others can only output the pooled inputs. + virtual inline int MaxTopBlobs() const { + return (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX) ? 2 : 1; + } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; + int pad_h_, pad_w_; + int channels_; + int height_, width_; + int pooled_height_, pooled_width_; + bool global_pooling_; + Blob rand_idx_; + Blob max_idx_; +}; + +#ifdef USE_CUDNN +/* + * @brief cuDNN implementation of PoolingLayer. + * Fallback to PoolingLayer for CPU mode. +*/ +template +class CuDNNPoolingLayer : public PoolingLayer { + public: + explicit CuDNNPoolingLayer(const LayerParameter& param) + : PoolingLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNPoolingLayer(); + // Currently, cuDNN does not support the extra top blob. + virtual inline int MinTopBlobs() const { return -1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_, top_desc_; + cudnnPoolingDescriptor_t pooling_desc_; + cudnnPoolingMode_t mode_; +}; +#endif + +/** + * @brief Does spatial pyramid pooling on the input image + * by taking the max, average, etc. within regions + * so that the result vector of different sized + * images are of the same size. + */ +template +class SPPLayer : public Layer { + public: + explicit SPPLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "SPP"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + // MAX POOL layers can output an extra top blob for the mask; + // others can only output the pooled inputs. + virtual inline int MaxTopBlobs() const { + return (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX) ? 2 : 1; + } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + // calculates the kernel and stride dimensions for the pooling layer, + // returns a correctly configured LayerParameter for a PoolingLayer + virtual LayerParameter GetPoolingParam(const int pyramid_level, + const int bottom_h, const int bottom_w, const SPPParameter spp_param); + + int pyramid_height_; + int bottom_h_, bottom_w_; + int channels_; + int kernel_h_, kernel_w_; + int pad_h_, pad_w_; + + /// the internal Split layer that feeds the pooling layers + shared_ptr > split_layer_; + /// top vector holder used in call to the underlying SplitLayer::Forward + vector*> split_top_vec_; + /// bottom vector holder used in call to the underlying PoolingLayer::Forward + vector*>*> pooling_bottom_vecs_; + /// the internal Pooling layers of different kernel sizes + vector > > pooling_layers_; + /// top vector holders used in call to the underlying PoolingLayer::Forward + vector*>*> pooling_top_vecs_; + /// pooling_outputs stores the outputs of the PoolingLayers + vector*> pooling_outputs_; + /// the internal Flatten layers that the Pooling layers feed into + vector*> flatten_layers_; + /// top vector holders used in call to the underlying FlattenLayer::Forward + vector*>*> flatten_top_vecs_; + /// flatten_outputs stores the outputs of the FlattenLayers + vector*> flatten_outputs_; + /// bottom vector holder used in call to the underlying ConcatLayer::Forward + vector*> concat_bottom_vec_; + /// the internal Concat layers that the Flatten layers feed into + shared_ptr > concat_layer_; +}; + +} // namespace caffe + +#endif // CAFFE_VISION_LAYERS_HPP_ diff --git a/lcg_random.py b/lcg_random.py new file mode 100755 index 0000000..4588625 --- /dev/null +++ b/lcg_random.py @@ -0,0 +1 @@ +lcg_rand=[0.840188,0.394383,0.783099,0.79844,0.911647,0.197551,0.335223,0.76823,0.277775,0.55397,0.477397,0.628871,0.364784,0.513401,0.95223,0.916195,0.635712,0.717297,0.141603,0.606969,0.0163006,0.242887,0.137232,0.804177,0.156679,0.400944,0.12979,0.108809,0.998924,0.218257,0.512932,0.839112,0.61264,0.296032,0.637552,0.524287,0.493583,0.972775,0.292517,0.771358,0.526745,0.769914,0.400229,0.891529,0.283315,0.352458,0.807725,0.919026,0.0697553,0.949327,0.525995,0.0860558,0.192214,0.663227,0.890233,0.348893,0.0641713,0.020023,0.457702,0.0630958,0.23828,0.970634,0.902208,0.85092,0.266666,0.53976,0.375207,0.760249,0.512535,0.667724,0.531606,0.0392803,0.437638,0.931835,0.93081,0.720952,0.284293,0.738534,0.639979,0.354049,0.687861,0.165974,0.440105,0.880075,0.829201,0.330337,0.228968,0.893372,0.35036,0.68667,0.956468,0.58864,0.657304,0.858676,0.43956,0.92397,0.398437,0.814767,0.684219,0.910972,0.482491,0.215825,0.950252,0.920128,0.14766,0.881062,0.641081,0.431953,0.619596,0.281059,0.786002,0.307458,0.447034,0.226107,0.187533,0.276235,0.556444,0.416501,0.169607,0.906804,0.103171,0.126075,0.495444,0.760475,0.984752,0.935004,0.684445,0.383188,0.749771,0.368664,0.29416,0.232262,0.584489,0.244413,0.15239,0.732149,0.125475,0.79347,0.164102,0.745071,0.0745298,0.950104,0.0525293,0.521563,0.176211,0.240062,0.797798,0.732654,0.656564,0.967405,0.639458,0.759735,0.0934805,0.134902,0.52021,0.0782321,0.0699064,0.204655,0.46142,0.819677,0.573319,0.755581,0.0519388,0.157807,0.999994,0.204329,0.889956,0.125468,0.997799,0.0540576,0.87054,0.0723288,0.00416161,0.923069,0.593892,0.180372,0.163132,0.39169,0.913027,0.819695,0.359095,0.552485,0.57943,0.452576,0.687387,0.0996401,0.530808,0.757294,0.304295,0.992228,0.576971,0.877614,0.747809,0.62891,0.0354209,0.747803,0.833239,0.925377,0.873271,0.831038,0.979434,0.743811,0.903366,0.983596,0.66688,0.497259,0.163968,0.830012,0.888949,0.0769947,0.649707,0.248044,0.62948,0.229137,0.70062,0.316867,0.328777,0.231428,0.074161,0.633072,0.223656,0.651132,0.510686,0.971466,0.280042,0.546107,0.719269,0.113281,0.471483,0.59254,0.944318,0.450918,0.336351,0.847684,0.434513,0.00323146,0.344943,0.598481,0.833243,0.233892,0.675476,0.48295,0.481936,0.304956,0.712087,0.182556,0.621823,0.0408643,0.413984,0.695984,0.673936,0.63764,0.347116,0.184622,0.609106,0.627158,0.730729,0.328374,0.740438,0.202213,0.920914,0.684757,0.65313,0.257265,0.532441,0.0876436,0.260497,0.877384,0.686125,0.0937402,0.111276,0.361601,0.57669,0.593211,0.666557,0.288778,0.775767,0.288379,0.329642,0.189751,0.984363,0.00357857,0.827391,0.331479,0.188201,0.436497,0.958637,0.91893,0.764871,0.699075,0.121143,0.685786,0.383832,0.774274,0.943051,0.916273,0.861917,0.203548,0.793657,0.548042,0.297288,0.904932,0.909643,0.873979,0.498144,0.5762,0.162757,0.273911,0.864579,0.492399,0.463662,0.848942,0.495977,0.291053,0.180421,0.684178,0.72755,0.139058,0.603109,0.492422,0.838134,0.724252,0.178208,0.221966,0.498525,0.121259,0.138238,0.360443,0.324807,0.931895,0.908485,0.622095,0.836828,0.818128,0.496074,0.334972,0.394327,0.658831,0.608883,0.258906,0.15123,0.072545,0.107848,0.647207,0.363598,0.28827,0.331386,0.0911486,0.427328,0.934495,0.58357,0.265461,0.658747,0.761778,0.487427,0.157272,0.883037,0.625665,0.517715,0.207844,0.557561,0.426199,0.829939,0.394388,0.244327,0.326013,0.72936,0.638654,0.984845,0.338243,0.89756,0.136075,0.410788,0.00540855,0.783282,0.774386,0.293678,0.114668,0.865535,0.721006,0.0491625,0.449105,0.986467,0.707909,0.210883,0.473894,0.865181,0.0939195,0.0995593,0.382896,0.301763,0.65712,0.809095,0.131702,0.0515083,0.0534223,0.457716,0.780868,0.692076,0.44256,0.119111,0.589637,0.578635,0.529899,0.595045,0.361917,0.304285,0.888723,0.476585,0.16982,0.609729,0.525747,0.618925,0.596196,0.233656,0.829808,0.0700902,0.0988374,0.923728,0.169649,0.481733,0.225491,0.826769,0.290829,0.357193,0.878278,0.344251,0.814909,0.659146,0.0363274,0.257469,0.778257,0.625964,0.836104,0.308157,0.221009,0.198021,0.612442,0.109733,0.674605,0.782262,0.719462,0.200352,0.401188,0.315658,0.434009,0.230996,0.385748,0.532846,0.154724,0.555398,0.0145793,0.380215,0.382167,0.305408,0.737408,0.260445,0.649659,0.552316,0.919591,0.685986,0.809785,0.697848,0.31195,0.645889,0.00600477,0.53296,0.84391,0.618447,0.642693,0.518515,0.400709,0.362154,0.718867,0.801897,0.677812,0.152876,0.0328927,0.0635606,0.685722,0.187616,0.618958,0.700301,0.567831,0.00112548,0.00570914,0.305239,0.26157,0.655368,0.857555,0.181161,0.341354,0.667341,0.879009,0.653305,0.31323,0.885014,0.186265,0.157139,0.503461,0.828957,0.675654,0.90417,0.191112,0.394521,0.706067,0.868924,0.547397,0.738959,0.932485,0.233119,0.926576,0.551443,0.93342,0.494407,0.552568,0.939129,0.799646,0.814139,0.594497,0.657201,0.9953,0.935852,0.324541,0.874309,0.589157,0.637771,0.759324,0.775421,0.79491,0.262785,0.604379,0.470564,0.166955,0.79549,0.865085,0.873021,0.664414,0.412483,0.611981,0.596899,0.645602,0.538557,0.148342,0.579022,0.0329634,0.70091,0.518151,0.832609,0.515049,0.112648,0.48981,0.510349,0.0484997,0.814351,0.384658,0.637656,0.452122,0.143982,0.413078,0.247033,0.406767,0.0174566,0.717597,0.573721,0.812947,0.582682,0.446743,0.477361,0.995165,0.0587232,0.0742604,0.640766,0.59728,0.222602,0.219788,0.630243,0.923513,0.737939,0.462852,0.438562,0.850586,0.952662,0.948911,0.899086,0.767014,0.333569,0.536743,0.219136,0.477551,0.94982,0.466169,0.884318,0.967277,0.183765,0.458039,0.780224,0.766448,0.904782,0.257585,0.761612,0.963505,0.331846,0.402379,0.560785,0.554448,0.622167,0.191028,0.477961,0.360105,0.65388,0.916523,0.210692,0.606542,0.865434,0.109778,0.373556,0.199003,0.64652,0.592692,0.676554,0.596341,0.0588605,0.560872,0.563617,0.242626,0.0189108,0.343841,0.00907344,0.923692,0.601427,0.770686,0.887197,0.933273,0.173065,0.447982,0.487721,0.795231,0.639009,0.965682,0.155336,0.292889,0.882204,0.366028,0.899431,0.747638,0.475806,0.272987,0.94664,0.122326,0.865679,0.623194,0.718666,0.92454,0.184066,0.282284,0.167165,0.202977,0.626125,0.176239,0.126669,0.227552,0.946925,0.0138663,0.160824,0.119989,0.461848,0.648545,0.915221,0.100857,0.614227,0.070557,0.393746,0.496431,0.436585,0.293177,0.244069,0.912391,0.566164,0.190709,0.0347164,0.431844,0.813904,0.753383,0.356383,0.99797,0.0356664,0.523548,0.200947,0.661792,0.699787,0.327616,0.889343,0.646712,0.341482,0.0501679,0.766701,0.80333,0.698713,0.681922,0.904187,0.31294,0.752479,0.297933,0.809371,0.189064,0.591111,0.0534394,0.101454,0.157275,0.244149,0.136171,0.589119,0.0580523,0.889553,0.945502,0.0560222,0.92522,0.46905,0.256969,0.587011,0.168837,0.584585,0.476355,0.815549,0.926068,0.526523,0.58225,0.729398,0.225236,0.264172,0.633585,0.538175,0.0166506,0.931518,0.347546,0.205714,0.522629,0.400985,0.307168,0.679904,0.645134,0.443339,0.269022,0.703186,0.332892,0.214524,0.759208,0.258112,0.683574,0.0161775,0.845123,0.852411,0.600763,0.321478,0.66796,0.52683,0.848,0.25021,0.256228,0.0732357,0.514382,0.889813,0.611411,0.531033,0.821331,0.958957,0.736747,0.343959,0.359942,0.0439153,0.0238632,0.0050762,0.487254,0.292886,0.708262,0.820146,0.50741,0.467471,0.0782579,0.190984,0.483648,0.923381,0.0433947,0.084411,0.244858,0.711355,0.611241,0.0928584,0.961565,0.867469,0.166094,0.475947,0.757282,0.777505,0.00698012,0.578613,0.736462,0.743727,0.922572,0.0964041,0.787642,0.946435,0.10148,0.274897,0.239321,0.809743,0.0950428,0.74673,0.277214,0.173301,0.937714,0.760862,0.0966814,0.981109,0.845273,0.34154,0.692463,0.456514,0.434398,0.654029,0.323983,0.600492,0.129976,0.081265,0.377997,0.136956,0.659878,0.114459,0.880683,0.58245,0.210863,0.668326,0.528885,0.312343,0.943222,0.768206,0.122086,0.0382648,0.514936,0.3993,0.211565,0.45265,0.160162,0.308247,0.433758,0.00543489,0.649787,0.126222,0.461949,0.0841846,0.78025,0.785932,0.684677,0.910227,0.867197,0.0626739,0.0471826,0.527075,0.177133,0.927866,0.109525,0.387996,0.596191,0.638409,0.70034,0.539413,0.406615,0.822426,0.577678,0.921551,0.221726,0.789244,0.374201,0.381888,0.0974906,0.807959,0.387323,0.747277,0.934181,0.849272,0.831462,0.714432,0.635204,0.516139,0.624658,0.502401,0.578813,0.671841,0.0294762,0.755946,0.599707,0.139001,0.143942,0.195898,0.77741,0.844281,0.735311,0.184025,0.666707,0.31299,0.105576,0.888433,0.102233,0.479777,0.270321,0.199724,0.287736,0.657643,0.947001,0.221918,0.506915,0.778463,0.936349,0.142119,0.294601,0.561007,0.64452,0.873414,0.232848,0.673996,0.629359,0.832555,0.812997,0.773301,0.0284525,0.590407,0.617582,0.763764,0.774432,0.284289,0.0767534,0.880009,0.172722,0.178987,0.359786,0.443043,0.37871,0.647522,0.100686,0.325711,0.86944,0.6076,0.104174,0.805789,0.749719,0.398775,0.366796,0.394239,0.272189,0.599644,0.0682348,0.901549,0.432199,0.881232,0.67485,0.460652,0.471639,0.292432,0.224415,0.246071,0.576721,0.301169,0.12608,0.749443,0.480155,0.485866,0.192486,0.858866,0.133388,0.293171,0.184577,0.00282779,0.900772,0.288752,0.808617,0.650491,0.687527,0.175413,0.0447295,0.959716,0.775058,0.112964,0.861265,0.207257,0.994196,0.536115,0.667908,0.465835,0.828546,0.892324,0.711906,0.405267,0.193493,0.837986,0.154711,0.673648,0.323852,0.347196,0.532514,0.45724,0.640368,0.717092,0.460067,0.54114,0.00584319,0.268684,0.19163,0.69337,0.444097,0.23636,0.653087,0.219155,0.349324,0.514352,0.426412,0.34352,0.0504663,0.0943199,0.809355,0.879013,0.986644,0.521261,0.28428,0.180136,0.359247,0.43899,0.853785,0.683098,0.786187,0.386299,0.140338,0.426555,0.10339,0.600405,0.967694,0.109233,0.86909,0.159324,0.802604,0.313187,0.395684,0.45569,0.532342,0.745008,0.970042,0.958753,0.0885283,0.0205084,0.0530733,0.897883,0.899521,0.039717,0.419144,0.183801,0.219853,0.778391,0.622791,0.0736379,0.461489,0.408978,0.459936,0.601827,0.835533,0.563327,0.202232,0.803227,0.67256,0.071322,0.962551,0.475164,0.384509,0.358235,0.930854,0.916851,0.103244,0.900896,0.875604,0.191772,0.921405,0.928678,0.089655,0.820926,0.968395,0.508799,0.00472651,0.188248,0.287189,0.627518,0.261886,0.748678,0.0364959,0.721822,0.350505,0.872028,0.285149,0.552738,0.675255,0.957709,0.62406,0.637806,0.432873,0.00856861,0.996042,0.363727,0.92542,0.0992851,0.264624,0.801024,0.291057,0.186029,0.729702,0.380712,0.00695418,0.698096,0.889511,0.0116807,0.886344,0.1767,0.639199,0.14823,0.925379,0.675694,0.870053,0.275884,0.547723,0.155202,0.828622,0.222978,0.112911,0.452681,0.860784,0.545784,0.46125,0.856826,0.909512,0.386669,0.956111,0.174136,0.187693,0.247168,0.360164,0.917395,0.62788,0.367118,0.615491,0.517391,0.378799,0.501835,0.694091,0.0179977,0.650066,0.61947,0.693692,0.520118,0.895354,0.241415,0.67532,0.723975,0.464393,0.788231,0.176656,0.325177,0.334016,0.637906,0.182003,0.243528,0.0245755,0.138114,0.417663,0.212269,0.385282,0.777828,0.129663,0.0131615,0.144946,0.745154,0.530552,0.523745,0.24699,0.224643,0.541743,0.897055,0.844113,0.235435,0.417174,0.739467,0.47685,0.0924937,0.463442,0.941243,0.880725,0.640098,0.26642,0.214741,0.278005,0.448423,0.458269,0.30258,0.586537,0.875932,0.514849,0.971818,0.65376,0.644512,0.98498,0.798706,0.389667,0.515532,0.322451,0.636656,0.740175,0.864194,0.533712,0.584288,0.0996293,0.950885,0.323755,0.576479,0.043379,0.787197,0.517722,0.924104,0.427295,0.784142,0.138845,0.7053,0.232565,0.597113,0.00788008,0.819102,0.473045,0.522729,0.79092,0.126805,0.167241,0.775899,0.925511,0.556908,0.291431,0.247962,0.193564,0.0316064,0.112157,0.727276,0.615894,0.211786,0.678161,0.939649,0.788265,0.72154,0.726846,0.305987,0.645644,0.154141,0.0901297,0.784489,0.859441,0.322695,0.381603,0.867321,0.141796,0.854648,0.39005,0.932716,0.981453,0.557291,0.708616,0.906964,0.114199,4.68777e-05,0.154927,0.307763,0.0316532,0.267083,0.0350389,0.647548,0.478869,0.7132,0.587197,0.267134,0.43474,0.314043,0.573122,0.080384,0.468185,0.663252,0.864873,0.327626,0.985946,0.246476,0.194948,0.127743,0.101124,0.584998,0.0604589,0.082577,0.142289,0.769074,0.989541,0.256489,0.769121,0.144468,0.564252,0.800775,0.411551,0.599291,0.448322,0.89042,0.312491,0.0355193,0.157555,0.747231,0.349562,0.730677,0.827615,0.817747,0.393928,0.692488,0.145373,0.379874,0.938963,0.340321,0.507617,0.0400871,0.925318,0.568076,0.122664,0.0676078,0.33715,0.112205,0.324096,0.106272,0.256673,0.888348,0.907046,0.668225,0.487639,0.355369,0.558645,0.800129,0.390888,0.716199,0.54736,0.740451,0.446876,0.374975,0.558198,0.840804,0.0674623,0.703571,0.220679,0.0064256,0.0438913,0.728296,0.0465127,0.96921,0.296372,0.169177,0.0368176,0.633522,0.281382,0.360914,0.739794,0.538055,0.249262,0.64684,0.20628,0.736901,0.00220922,0.764925,0.53703,0.393097,0.481124,0.0843902,0.133548,0.928,0.459365,0.691745,0.768804,0.526827,0.395316,0.989483,0.533253,0.439207,0.717779,0.579765,0.408417,0.0141502,0.748942,0.445235,0.647672,0.030324,0.806149,0.387466,0.568379,0.0554106,0.0343068,0.774659,0.792311,0.036516,0.539584,0.329342,0.429613,0.0207081,0.413732,0.563161,0.948708,0.873097,0.254906,0.717512,0.399924,0.650222,0.706995,0.933176,0.0894297,0.424774,0.512941,0.497847,0.438924,0.261883,0.943081,0.0865962,0.292207,0.74923,0.474063,0.860587,0.804641,0.508369,0.635246,0.596952,0.544885,0.17483,0.926294,0.974499,0.195538,0.340026,0.53766,0.144246,0.213122,0.792566,0.861759,0.613046,0.442789,0.568754,0.546222,0.532218,0.993528,0.0591633,0.0300651,0.432452,0.321047,0.973147,0.519048,0.613254,0.722377,0.99311,0.473841,0.527017,0.50148,0.109087,0.123969,0.0463652,0.283917,0.0502633,0.0208639,0.479455,0.390289,0.558524,0.623701,0.603411,0.35109,0.48546,0.216457,0.793878,0.054214,0.762679,0.326097,0.0477418,0.821842,0.356162,0.480193,0.142889,0.329309,0.999241,0.756143,0.0516852,0.992352,0.229984,0.578702,0.493831,0.339071,0.702672,0.540197,0.622988,0.752935,0.56106,0.102443,0.143224,0.119584,0.726144,0.746635,0.470674,0.211604,0.963092,0.264553,0.265818,0.725771,0.590649,0.31356,0.547613,0.946811,0.793753,0.690502,0.27612,0.792995,0.446645,0.327805,0.785346,0.676628,0.906507,0.279178,0.0156992,0.609179,0.819374,0.638687,0.362115,0.380434,0.74113,0.505339,0.500019,0.467274,0.251974,0.970693,0.678878,0.215066,0.235245,0.944697,0.940837,0.825895,0.258257,0.48845,0.772706,0.0520103,0.178952,0.0488258,0.845005,0.625596,0.376631,0.630351,0.302225,0.283138,0.909529,0.317924,0.892318,0.728903,0.956611,0.254432,0.109337,0.697741,0.759771,0.609356,0.165015,0.0117453,0.580048,0.843894,0.226811,0.815294,0.78859,0.167648,0.641188,0.0468473,0.656098,0.413894,0.0988576,0.83505,0.46272,0.943863,0.460646,0.839351,0.574214,0.762871,0.122489,0.483742,0.0807953,0.0148069,0.212645,0.0374063,0.269239,0.321982,0.735147,0.0290106,0.931338,0.900163,0.0407559,0.511386,0.744056,0.267567,0.32668,0.532647,0.435216,0.967868,0.579494,0.0913142,0.381762,0.678351,0.926364,0.844482,0.622214,0.387011,0.683833,0.196428,0.149882,0.806322,0.68017,0.230677,0.821129,0.892815,0.268084,0.0903681,0.214797,0.00323105,0.119379,0.146135,0.903394,0.160135,0.657522,0.64745,0.427702,0.984202,0.180096,0.862918,0.95207,0.75959,0.954232,0.333832,0.437942,0.880596,0.178314,0.0601557,0.267607,0.862147,0.256583,0.417489,0.668469,0.936754,0.648167,0.489597,0.829569,0.91625,0.579965,0.0443661,0.919482,0.699344,0.190501,0.822875,0.859479,0.848023,0.470325,0.287181,0.832225,0.650421,0.150099,0.784295,0.410011,0.104331,0.118126,0.847953,0.984927,0.29644,0.908109,0.252535,0.158587,0.164692,0.670024,0.827056,0.101446,0.318191,0.316653,0.931014,0.234441,0.896618,0.97538,0.153923,0.595963,0.165882,0.976798,0.455442,0.0139049,0.447123,0.742623,0.84613,0.0975437,0.892721,0.630424,0.507555,0.997052,0.748551,0.355508,0.98198,0.044991,0.263617,0.234514,0.203578,0.428309,0.904539,0.0306337,0.529754,0.222729,0.347287,0.460768,0.457171,0.243905,0.436149,0.611093,0.839868,0.60203,0.587891,0.295309,0.615935,0.0350138,0.037932,0.462065,0.132558,0.930653,0.0924893,0.640113,0.927706,0.84104,0.99562,0.909686,0.886031,0.259237,0.1442,0.089609,0.687545,0.0487385,0.120243,0.2173,0.271468,0.467529,0.678068,0.728638,0.711434,0.114216,0.339732,0.551302,0.716247,0.927623,0.846611,0.332182,0.962637,0.884543,0.794247,0.0951945,0.815197,0.886736,0.735307,0.742903,0.727776,0.730928,0.652588,0.613807,0.990165,0.796788,0.703416,0.67771,0.845527,0.823659,0.89501,0.116995,0.291188,0.573077,0.845633,0.00262258,0.687294,0.185365,0.553925,0.403541,0.112988,0.400536,0.735723,0.0756249,0.28508,0.52997,0.170819,0.100276,0.416706,0.906126,0.843179,0.144482,0.637054,0.495767,0.75829,0.627218,0.292555,0.461706,0.304929,0.138082,0.285365,0.199938,0.255076,0.576553,0.773016,0.100709,0.579176,0.460309,0.286074,0.1331,0.86385,0.399062,0.533636,0.599573,0.474687,0.818716,0.129542,0.645507,0.918992,0.546248,0.551633,0.762171,0.690731,0.188687,0.257939,0.44902,0.815905,0.550494,0.910726,0.120834,0.688576,0.196091,0.320772,0.943652,0.772644,0.0937877,0.0443619,0.351819,0.554097,0.330436,0.484919,0.417947,0.729499,0.0185558,0.0175198,0.204186,0.837272,0.147062,0.849692,0.756264,0.693311,0.401325,0.518436,0.384041,0.590012,0.776374,0.833062,0.405917,0.326868,0.743788,0.526751,0.0154439,0.939879,0.847523,0.959096,0.712522,0.941311,0.00345808,0.0643416,0.495408,0.333894,0.549261,0.913355,0.0633928,0.567817,0.930875,0.267578,0.405089,0.0779371,0.117271,0.161353,0.771248,0.518596,0.679788,0.155289,0.108608,0.456162,0.988351,0.514525,0.783031,0.732138,0.041276,0.798474,0.672017,0.888799,0.757571,0.384539,0.83011,0.761029,0.448881,0.325518,0.094923,0.998142,0.238873,0.158316,0.565959,0.169748,0.425894,0.971047,0.247685,0.543165,0.1324,0.0189329,0.0617606,0.812189,0.174222,0.170368,0.268351,0.162573,0.684893,0.0513816,0.894711,0.726169,0.849856,0.566728,0.614968,0.607427,0.951267,0.445079,0.368455,0.400148,0.770597,0.463378,0.39829,0.00947013,0.621694,0.964248,0.179218,0.0475882,0.935296,0.426904,0.590753,0.067696,0.445836,0.652514,0.879885,0.620058,0.822882,0.148236,0.782631,0.507775,0.199617,0.677342,0.233944,0.0494732,0.244069,0.848912,0.6569,0.195336,0.293991,0.0253549,0.595484,0.0645876,0.488733,0.993774,0.0740577,0.110427,0.958022,0.253276,0.158015,0.893318,0.68018,0.748768,0.961014,0.126016,0.401282,0.840899,0.746074,0.224164,0.989134,0.528705,0.731938,0.188752,0.206047,0.965882,0.238225,0.450117,0.814794,0.895125,0.645453,0.108785,0.920479,0.240937,0.173373,0.409213,0.234711,0.247431,0.51964,0.192734,0.500707,0.677655,0.0860517,0.180886,0.426423,0.0470658,0.306902,0.827705,0.887964,0.0529768,0.0518689,0.877099,0.581682,0.783807,0.0658506,0.787729,0.74969,0.304075,0.237846,0.564484,0.1992,0.883299,0.673269,0.119679,0.124236,0.846642,0.528892,0.358948,0.0940727,0.0485317,0.551681,0.594779,0.726187,0.637733,0.775665,0.15261,0.684799,0.0825677,0.980315,0.572763,0.135545,0.0321839,0.449862,0.717227,0.815991,0.515712,0.504956,0.565681,0.819788,0.742802,0.130165,0.0189879,0.626101,0.803434,0.138667,0.750337,0.650077,0.667559,0.109285,0.744149,0.716091,0.660966,0.338929,0.442278,0.298698,0.114594,0.594888,0.983497,0.197162,0.575203,0.55626,0.332706,0.607387,0.00612195,0.0499329,0.423378,0.521834,0.554889,0.989059,0.341622,0.297691,0.119224,0.36061,0.923792,0.922658,0.499278,0.674129,0.572735,0.166837,0.783413,0.316884,0.882928,0.444379,0.655813,0.325206,0.743078,0.770407,0.920094,0.726575,0.967568,0.495297,0.282835,0.300275,0.102683,0.288957,0.350207,0.526061,0.810791,0.905096,0.51512,0.152413,0.202787,0.634344,0.513024,0.126579,0.557002,0.0123013,0.800708,0.129737,0.179138,0.584121,0.446622,0.0620666,0.0285004,0.102434,0.387273,0.771578,0.872841,0.307366,0.498153,0.840409,0.802663,0.780987,0.140684,0.905346,0.0699438,0.490891,0.431407,0.880735,0.395988,0.946527,0.0331482,0.598775,0.580872,0.546172,0.725354,0.137874,0.558473,0.526062,0.267611,0.737611,0.110183,0.714233,0.799678,0.138684,0.816667,0.186951,0.910262,0.689509,0.494317,0.408414,0.529918,0.29698,0.189401,0.670602,0.202326,0.259345,0.161494,0.633734,0.14008,0.557481,0.580261,0.173228,0.156257,0.161133,0.7194,0.881611,0.299007,0.277873,0.407673,0.566618,0.0154844,0.517856,0.280851,0.815162,0.656539,0.0975185,0.00211305,0.566801,0.787027,0.49643,0.975215,0.316945,0.79341,0.164617,0.987547,0.995737,0.423962,0.149041,0.629471,0.564042,0.706522,0.209732,0.73727,0.862779,0.370865,0.45667,0.74439,0.669871,0.734543,0.152062,0.23649,0.750027,0.669918,0.517341,0.565189,0.326458,0.614859,0.567302,0.893259,0.401886,0.0637327,0.868474,0.718831,0.857143,0.0330907,0.706378,0.85288,0.457052,0.855419,0.48235,0.021094,0.561941,0.692082,0.758364,0.42472,0.0629468,0.215033,0.169109,0.732818,0.949576,0.321171,0.969308,0.699603,0.99109,0.486648,0.264793,0.317548,0.101507,0.832095,0.210806,0.503394,0.895828,0.0792805,0.222225,0.752971,0.112371,0.928603,0.60585,0.569424,0.784022,0.0882007,0.590518,0.345963,0.780283,0.348881,0.770682,0.84323,0.563915,0.939791,0.576048,0.513491,0.260963,0.545356,0.213094,0.252053,0.0320041,0.477887,0.5696,0.133512,0.309982,0.780407,0.636905,0.205809,0.859687,0.85913,0.95878,0.972059,0.787733,0.56463,0.541482,0.571755,0.652831,0.132,0.917717,0.433114,0.480881,0.688399,0.276344,0.0447959,0.628191,0.852391,0.558287,0.889154,0.397747,0.771381,0.141207,0.429751,0.249267,0.710807,0.563263,0.559249,0.491214,0.200168,0.765058,0.350901,0.0592977,0.723838,0.32296,0.847031,0.288468,0.864442,0.418785,0.941299,0.996442,0.336502,0.374413,0.477323,0.0249018,0.650757,0.522119,0.653093,0.503148,0.0804055,0.542247,0.900895,0.851786,0.683453,0.330647,0.101054,0.39426,0.893909,0.660302,0.885474,0.0940773,0.425361,0.236375,0.153375,0.149199,0.559335,0.000405591,0.437667,0.423777,0.419191,0.378966,0.420218,0.755693,0.753379,0.897541,0.780595,0.404136,0.41966,0.433687,0.907284,0.500065,0.975934,0.808179,0.351851,0.659387,0.138826,0.452905,0.0536473,0.0327351,0.113207,0.939121,0.126812,0.538568,0.175496,0.280187,0.687766,0.734831,0.280593,0.125433,0.158607,0.699784,0.504399,0.578825,0.455477,0.257778,0.476366,0.236071,0.661914,0.896026,0.669759,0.569198,0.396091,0.645693,0.377377,0.747942,0.30508,0.516203,0.200847,0.358727,0.548938,0.314054,0.297849,0.67575,0.852622,0.473345,0.955938,0.540389,0.208175,0.236531,0.665822,0.366783,0.936315,0.170221,0.945608,0.391792,0.427999,0.421975,0.627863,0.0899137,0.318,0.297622,0.659112,0.714091,0.943315,0.0364889,0.462033,0.248395,0.552692,0.66288,0.607122,0.10163,0.976935,0.904971,0.77738,0.829557,0.378316,0.733318,0.369946,0.586491,0.969849,0.0357675,0.953274,0.906164,0.205989,0.898882,0.297956,0.633988,0.320857,0.925819,0.723902,0.638857,0.223441,0.383013,0.352948,0.166755,0.419502,0.814982,0.41515,0.972194,0.477862,0.0222729,0.0738238,0.454797,0.927244,0.851204,0.284354,0.30556,0.584522,0.654299,0.892051,0.554371,0.690067,0.845325,0.460535,0.896055,0.744207,0.758491,0.530043,0.0650635,0.684309,0.253945,0.703921,0.90775,0.636959,0.056869,0.0745054,0.0564608,0.871851,0.489656,0.0286548,0.349713,0.511929,0.102479,0.80451,0.439173,0.953683,0.0888636,0.744732,0.538205,0.743163,0.636783,0.092576,0.43323,0.482107,0.553111,0.329285,0.226314,0.311602,0.859329,0.291378,0.995912,0.113274,0.995298,0.903662,0.750232,0.0521674,0.978167,0.806693,0.924018,0.467823,0.835348,0.273731,0.979752,0.937826,0.0782409,0.418925,0.891509,0.167104,0.163657,0.429714,0.910267,0.80044,0.52229,0.343497,0.282547,0.0754011,0.672782,0.508861,0.387003,0.532111,0.800239,0.382915,0.645385,0.795537,0.286577,0.395617,0.847705,0.264745,0.20231,0.771723,0.732568,0.0376578,0.0454541,0.71232,0.975484,0.123695,0.131244,0.866993,0.290799,0.294901,0.296707,0.201067,0.0953408,0.818996,0.544564,0.377888,0.894397,0.217346,0.886749,0.281401,0.749457,0.686988,0.664316,0.394842,0.482525,0.950893,0.790458,0.33023,0.215638,0.992768,0.101953,0.948206,0.0304262,0.147407,0.660526,0.00591033,0.271102,0.79177,0.872903,0.561902,0.0866714,0.16961,0.762968,0.182012,0.988606,0.307532,0.5599,0.883004,0.524878,0.446649,0.164405,0.274335,0.133637,0.828721,0.669177,0.616163,0.779615,0.459635,0.946393,0.995253,0.452404,0.048346,0.943459,0.48283,0.195753,0.603984,0.48874,0.466855,0.395754,0.361644,0.028757,0.482426,0.531254,0.791725,0.664438,0.51986,0.0992575,0.224338,0.402864,0.624136,0.670987,0.567269,0.898471,0.804625,0.39599,0.567648,0.420787,0.175605,0.0272838,0.36718,0.170857,0.479688,0.415526,0.114316,0.962518,0.611279,0.7183,0.451258,0.0781347,0.114054,0.812902,0.106892,0.59648,0.344156,0.898617,0.260918,0.864016,0.997875,0.485257,0.26688,0.622011,0.156244,0.834149,0.520482,0.960868,0.230139,0.0881301,0.381656,0.405744,0.115414,0.748836,0.576601,0.595102,0.164362,0.690917,0.557619,0.775641,0.409217,0.00887705,0.853776,0.523271,0.821779,0.960668,0.119752,0.165934,0.859285,0.38067,0.0299501,0.857159,0.865927,0.29683,0.47917,0.0221705,0.130979,0.999652,0.983039,0.361119,0.0877818,0.364695,0.766863,0.203196,0.11353,0.343464,0.798297,0.277892,0.0343803,0.355916,0.0535336,0.443597,0.364793,0.90731,0.966868,0.186572,0.867977,0.08662,0.352506,0.727262,0.46729,0.382457,0.584421,0.333217,0.679287,0.0635912,0.355387,0.810266,0.0632429,0.338426,0.171385,0.151025,0.703121,0.938247,0.354221,0.816651,0.281711,0.152518,0.0945434,0.316091,0.508434,0.148077,0.759688,0.873228,0.0553866,0.726557,0.0597998,0.923364,0.813177,0.412306,0.650626,0.280467,0.794763,0.235047,0.613683,0.47405,0.298639,0.96907,0.284316,0.361881,0.307496,0.455701,0.512906,0.0106171,0.393948,0.867127,0.827268,0.675659,0.0196445,0.921812,0.991751,0.528079,0.0698885,0.751439,0.401306,0.125275,0.477996,0.461106,0.048639,0.291173,0.873412,0.699265,0.571639,0.668175,0.934312,0.185323,0.142225,0.232951,0.154393,0.426541,0.594832,0.46189,0.882242,0.107738,0.472507,0.27619,0.974865,0.299775,0.95185,0.99451,0.221587,0.943601,0.522588,0.291475,0.69504,0.923895,0.41675,0.173036,0.385001,0.465389,0.464208,0.258413,0.164654,0.0358477,0.926588,0.0989663,0.221171,0.0688135,0.331917,0.375564,0.495355,0.926749,0.837454,0.377597,0.0344874,0.309961,0.653787,0.00935241,0.609736,0.605637,0.00386199,0.831323,0.549238,0.52645,0.122798,0.244277,0.450345,0.539549,0.417313,0.835346,0.00493794,0.881521,0.0937589,0.169592,0.917369,0.0203474,0.268558,0.138539,0.089161,0.600475,0.514103,0.584516,0.527224,0.351557,0.962112,0.561712,0.661518,0.615899,0.571064,0.271254,0.221536,0.574926,0.102577,0.770774,0.101377,0.225375,0.015051,0.551721,0.764924,0.432364,0.387067,0.769862,0.313885,0.480826,0.939454,0.231253,0.501173,0.208012,0.369793,0.590334,0.808488,0.883896,0.17485,0.335712,0.235453,0.136962,0.897424,0.896971,0.752861,0.468488,0.168225,0.974398,0.0434146,0.270803,0.745172,0.144791,0.496178,0.760223,0.696513,0.261102,0.192586,0.0835797,0.0309635,0.506471,0.564406,0.970417,0.737724,0.0655793,0.17843,0.107517,0.655914,0.986918,0.991413,0.830764,0.32263,0.226866,0.967726,0.220054,0.123838,0.720587,0.688542,0.292063,0.694985,0.731957,0.562866,0.440157,0.876748,0.0590435,0.200379,0.573261,0.320145,0.392965,0.65684,0.351109,0.899437,0.221246,0.321526,0.637161,0.286825,0.499956,0.744678,0.942739,0.486873,0.736091,0.773503,0.809503,0.962958,0.741229,0.0295571,0.0867955,0.461816,0.718099,0.378859,0.156801,0.450056,0.941724,0.596958,0.326804,0.000767693,0.797337,0.900065,0.320913,0.190303,0.556905,0.672021,0.0897392,0.778152,0.993547,0.7269,0.0649769,0.493503,0.471579,0.00771607,0.980376,0.20767,0.781219,0.789879,0.170628,0.522448,0.819436,0.257423,0.984264,0.537536,0.636282,0.141065,0.987592,0.578006,0.738023,0.314397,0.578774,0.53536,0.214462,0.899687,0.725663,0.771367,0.571708,0.815402,0.549519,0.565256,0.542302,0.614496,0.0587587,0.0138811,0.622212,0.0391351,0.221551,0.403431,0.829014,0.392179,0.925878,0.648451,0.649603,0.910142,0.185987,0.285885,0.0512077,0.173579,0.863891,0.789231,0.487976,0.442665,0.324591,0.702437,0.342352,0.0502544,0.473805,0.91406,0.865657,0.0233231,0.479315,0.407959,0.637819,0.538074,0.42184,0.26003,0.577209,0.643391,0.663461,0.406224,0.0355706,0.589339,0.0546747,0.685173,0.499481,0.240662,0.971058,0.550689,0.414241,0.834949,0.33992,0.902216,0.277614,0.664511,0.604654,0.619965,0.714766,0.0784585,0.534025,0.580422,0.101782,0.0133406,0.988382,0.7396,0.551415,0.410222,0.999631,0.128624,0.0536133,0.663091,0.534848,0.0891839,0.25243,0.589522,0.774357,0.751912,0.830184,0.745415,0.302601,0.244425,0.580364,0.642521,0.146641,0.857978,0.307032,0.751295,0.477943,0.0217983,0.829754,0.0119682,0.602221,0.931535,0.0253088,0.590602,0.671135,0.576724,0.000824207,0.670766,0.705347,0.0544375,0.333857,0.240195,0.143621,0.586287,0.829717,0.917979,0.338199,0.659901,0.663394,0.6408,0.904326,0.243758,0.283321,0.0509667,0.101735,0.590354,0.802262,0.579678,0.612152,0.632015,0.591647,0.214373,0.56355,0.616955,0.804975,0.234686,0.193679,0.805799,0.905452,0.899026,0.860237,0.239309,0.139221,0.00385829,0.825596,0.968939,0.921837,0.163796,0.62884,0.58523,0.804596,0.533165,0.828988,0.0879172,0.584132,0.930723,0.678271,0.386394,0.510402,0.290423,0.0184091,0.102048,0.504796,0.58196,0.719004,0.309771,0.816646,0.912683,0.115571,0.722097,0.811709,0.975807,0.961406,0.95093,0.979666,0.787003,0.919869,0.901503,0.950798,0.548708,0.486733,0.755394,0.0818735,0.315721,0.843312,0.666005,0.246445,0.521583,0.0523992,0.756846,0.812006,0.0708083,0.858895,0.316802,0.652768,0.577898,0.626573,0.469413,0.490581,0.742143,0.191511,0.30229,0.717951,0.152917,0.25322,0.697616,0.93992,0.173089,0.599119,0.890718,0.721797,0.0858521,0.646113,0.80367,0.401573,0.489424,0.469676,0.648018,0.0110069,0.522075,0.404864,0.823013,0.592883,0.263759,0.139814,0.245651,0.841657,0.766387,0.715065,0.332238,0.50853,0.906575,0.634528,0.226481,0.0594924,0.887748,0.924097,0.999412,0.0608362,0.523216,0.89013,0.782633,0.609068,0.536243,0.586303,0.0106416,0.0256669,0.0559791,0.658659,0.0366738,0.578054,0.0635237,0.859686,0.170937,0.327282,0.9995,0.416589,0.16894,0.765887,0.131653,0.501178,0.274417,0.0382287,0.135705,0.500898,0.0977211,0.0234531,0.424995,0.0971331,0.0842894,0.948211,0.987263,0.866922,0.557279,0.523506,0.453226,0.567921,0.549173,0.509205,0.22658,0.585846,0.0872588,0.290104,0.445533,0.258196,0.617387,0.445033,0.674785,0.786326,0.21092,0.806438,0.287504,0.485337,0.844667,0.423209,0.986235,0.942388,0.446662,0.41123,0.039521,0.530952,0.359441,0.0267842,0.397874,0.91672,0.55029,0.8511,0.484641,0.0994628,0.360305,0.711222,0.685309,0.447564,0.00132573,0.130842,0.70576,0.618712,0.575875,0.380544,0.405038,0.786796,0.186983,0.692542,0.272133,0.0316495,0.115752,0.258368,0.974037,0.562414,0.669598,0.0135584,0.0933659,0.0290395,0.0403427,0.49124,0.94576,0.590633,0.34234,0.430401,0.690096,0.702645,0.141623,0.375405,0.150208,0.142949,0.506247,0.855968,0.761661,0.0821224,0.236512,0.166699,0.868918,0.423495,0.859242,0.141051,0.455144,0.974993,0.39942,0.429182,0.537407,0.0690179,0.44274,0.630773,0.0980574,0.483083,0.122013,0.0438174,0.0737159,0.464353,0.474219,0.763811,0.166998,0.615842,0.139216,0.317206,0.75879,0.645464,0.173174,0.520451,0.727586,0.409687,0.687151,0.596504,0.833182,0.546393,0.737555,0.288326,0.521386,0.136975,0.717508,0.0587931,0.205993,0.160248,0.689566,0.30405,0.643331,0.811579,0.347868,0.717047,0.275933,0.822086,0.480859,0.442931,0.437928,0.620075,0.760137,0.196719,0.265539,0.933311,0.71717,0.993125,0.342998,0.404321,0.589629,0.176179,0.950714,0.327184,0.464505,0.472099,0.464159,0.182014,0.530892,0.670152,0.342262,0.220459,0.974202,0.985593,0.0320382,0.32207,0.702641,0.307971,0.144156,0.1835,0.750902,0.582084,0.803575,0.511039,0.778803,0.0691138,0.44435,0.495973,0.0622386,0.787347,0.900294,0.651868,0.963527,0.851007,0.979052,0.428032,0.323107,0.443211,0.610046,0.853999,0.113363,0.952308,0.0744578,0.0875648,0.937901,0.106496,0.409635,0.640542,0.414467,0.553791,0.824041,0.165369,0.135875,0.627616,0.676407,0.914678,0.69673,0.120757,0.410651,0.758969,0.908104,0.310944,0.410836,0.871631,0.161952,0.389888,0.299663,0.485059,0.833099,0.909708,0.339058,0.946461,0.862016,0.413516,0.0340263,0.799917,0.520012,0.443661,0.440458,0.934478,0.997451,0.2645,0.0998469,0.133326,0.892116,0.776254,0.048004,0.588846,0.897011,0.458655,0.347815,0.805115,0.769599,0.758651,0.676745,0.931551,0.148539,0.976408,0.416609,0.981637,0.886116,0.755667,0.928099,0.748131,0.169182,0.962125,0.548048,0.689194,0.405786,0.988506,0.623672,0.403237,0.253006,0.723519,0.536564,0.145122,0.499773,0.584568,0.733968,0.396784,0.0432225,0.0817827,0.201899,0.812821,0.840433,0.878644,0.744372,0.988972,0.855051,0.160982,0.97061,0.741167,0.916649,0.898709,0.489298,0.0858311,0.860834,0.0373466,0.775025,0.26662,0.0258531,0.398698,0.669857,0.278859,0.122217,0.206421,0.423981,0.62199,0.790989,0.157949,0.0187743,0.834212,0.239732,0.220673,0.647033,0.0801656,0.0993164,0.391405,0.0691378,0.954368,0.552387,0.0397475,0.695535,0.469035,0.938456,0.184833,0.554866,0.79929,0.22218,0.329892,0.0659098,0.248033,0.728589,0.735767,0.526892,0.850806,0.942188,0.950873,0.472796,0.733178,0.108823,0.491571,0.567389,0.348555,0.712244,0.214422,0.42872,0.81156,0.605827,0.497858,0.765928,0.158214,0.537606,0.461462,0.62725,0.476062,0.646295,0.182116,0.275352,0.868475,0.512008,0.341262,0.116508,0.240597,0.0770289,0.6434,0.0914027,0.0192173,0.594273,0.564199,0.752395,0.703096,0.0557698,0.319784,0.0516508,0.768013,0.534206,0.480371,0.579573,0.140033,0.978229,0.345501,0.298248,0.515835,0.806963,0.925497,0.991897,0.453259,0.107613,0.267249,0.321734,0.619621,0.60851,0.438242,0.860217,0.685539,0.0816417,0.95162,0.704757,0.675915,0.515819,0.457151,0.379011,0.571589,0.776935,0.430662,0.339602,0.311141,0.911033,0.919176,0.451175,0.889262,0.264676,0.749422,0.405098,0.0716397,0.674919,0.396995,0.524898,0.782533,0.664243,0.846632,0.402153,0.272754,0.284874,0.262371,0.958293,0.366516,0.213991,0.663049,0.0424307,0.72981,0.120201,0.421442,0.301399,0.897136,0.852104,0.641001,0.208278,0.763137,0.560176,0.659452,0.652399,0.824853,0.408875,0.0574968,0.896493,0.083794,0.454491,0.421391,0.866327,0.118735,0.268023,0.26848,0.391488,0.552897,0.530851,0.349781,0.919412,0.744841,0.0128304,0.961843,0.474651,0.133031,0.383285,0.77605,0.0301674,0.235388,0.417051,0.238445,0.998525,0.977227,0.897897,0.650924,0.80208,0.306772,0.708421,0.698573,0.390566,0.162912,0.119964,0.256893,0.281647,0.387987,0.525373,0.673135,0.940884,0.0562232,0.022916,0.860296,0.801065,0.0357464,0.822139,0.275716,0.168778,0.205424,0.0517661,0.198945,0.440813,0.468817,0.43739,0.439338,0.446045,0.335287,0.0902622,0.248125,0.642059,0.798683,0.946698,0.032625,0.961596,0.0666617,0.289518,0.243243,0.454649,0.81489,0.916378,0.395532,0.871113,0.939294,0.255828,0.672178,0.97504,0.0779679,0.947894,0.143818,0.283392,0.99966,0.342763,0.724205,0.468477,0.780153,0.163543,0.914522,0.11544,0.253805,0.162647,0.7575,0.0524885,0.109345,0.790125,0.0140841,0.176006,0.079642,0.257327,0.630655,0.894532,0.173704,0.0261872,0.765645,0.112998,0.282016,0.437823,0.0880377,0.359984,0.385717,0.231855,0.643376,0.385378,0.574618,0.367581,0.853855,0.354771,0.531124,0.768377,0.470211,0.784929,0.931024,0.227711,0.837417,0.0403683,0.0178355,0.851502,0.216375,0.0974775,0.108828,0.84703,0.99201,0.282532,0.873217,0.757655,0.39553,0.155233,0.195478,0.483568,0.515216,0.581196,0.715423,0.158592,0.966573,0.290041,0.526173,0.820428,0.644812,0.0572967,0.588805,0.115024,0.842226,0.519828,0.342735,0.679643,0.560197,0.36057,0.531144,0.776571,0.458048,0.639973,0.623601,0.450057,0.922505,0.496818,0.207712,0.318035,0.652051,0.403191,0.801603,0.167267,0.984386,0.517026,0.325859,0.950959,0.807067,0.852032,0.771387,0.451879,0.909329,0.360192,0.566903,0.751554,0.88002,0.909638,0.431197,0.440217,0.270209,0.962342,0.216789,0.728256,0.602314,0.84039,0.178314,0.524819,0.337208,0.386026,0.842854,0.989258,0.789217,0.644457,0.156525,0.773603,0.161482,0.482384,0.724562,0.968549,0.334416,0.495949,0.420429,0.243745,0.856141,0.987332,0.995299,0.736162,0.89697,0.426496,0.176379,0.167179,0.388838,0.393167,0.895435,0.991152,0.233557,0.073749,0.515971,0.570765,0.459775,0.358825,0.560023,0.248992,0.00328197,0.716548,0.0225948,0.164764,0.198933,0.747157,0.133314,0.533348,0.243106,0.553743,0.777093,0.0992475,0.541075,0.772392,0.835409,0.438045,0.198887,0.0117881,0.605223,0.587725,0.404956,0.500659,0.578877,0.638513,0.574408,0.0948478,0.209278,0.0341828,0.453673,0.769301,0.283175,0.456955,0.48585,0.30577,0.621719,0.684782,0.0529265,0.755033,0.218131,0.296033,0.308776,0.995223,0.39528,0.84985,0.767615,0.230689,0.287895,0.966503,0.242477,0.893119,0.554228,0.647433,0.393777,0.133104,0.285946,0.968185,0.227952,0.495224,0.00236773,0.681625,0.264525,0.285543,0.13858,0.750374,0.591312,0.7603,0.435157,0.644239,0.515333,0.653287,0.940271,0.824109,0.648511,0.335551,0.673959,0.416126,0.566241,0.961854,0.382628,0.808718,0.854973,0.936856,0.456151,0.24875,0.0699601,0.742097,0.216935,0.297912,0.237321,0.219303,0.979537,0.501846,0.504845,0.118118,0.252221,0.0961575,0.878417,0.687377,0.740396,0.39375,0.340664,0.680667,0.217858,0.989175,0.0162189,0.891817,0.405301,0.58246,0.853671,0.787929,0.391178,0.708644,0.724785,0.84733,0.957394,0.794745,0.589427,0.17433,0.0926569,0.826748,0.393632,0.0721944,0.328595,0.898478,0.190312,0.580815,0.994635,0.0687289,0.268192,0.735031,0.462479,0.608857,0.415699,0.680337,0.598032,0.431918,0.572154,0.00333222,0.0143773,0.425826,0.791261,0.405555,0.13447,0.516046,0.252885,0.0918645,0.31079,0.842312,0.266194,0.403447,0.66906,0.659826,0.475641,0.997655,0.558304,0.665953,0.57847,0.552939,0.734682,0.846663,0.28797,0.197161,0.455519,0.703669,0.877498,0.0535511,0.135586,0.449652,0.0568833,0.149964,0.875478,0.848144,0.555519,0.00994797,0.36419,0.808404,0.101812,0.67498,0.650716,0.368006,0.078427,0.319776,0.0278327,0.554068,0.317431,0.586137,0.220022,0.895901,0.139076,0.954704,0.742564,0.427046,0.151865,0.198083,0.130715,0.0293628,0.251634,0.266301,0.479015,0.308517,0.416264,0.354493,0.156662,0.971783,0.364441,0.520852,0.780187,0.466253,0.195831,0.430903,0.83426,0.274258,0.750679,0.862092,0.828327,0.0681104,0.448229,0.0483486,0.964012,0.587305,0.00305258,0.706575,0.0143504,0.154917,0.904658,0.145065,0.18428,0.156293,0.411366,0.663295,0.46481,0.82763,0.017788,0.621472,0.799414,0.382229,0.142323,0.579601,0.848482,0.338155,0.0105039,0.682742,0.612413,0.761183,0.544834,0.44074,0.829294,0.993064,0.489089,0.793305,0.580368,0.492141,0.499881,0.594719,0.647059,0.404539,0.739784,0.831339,0.560831,0.151149,0.494634,0.0256415,0.97878,0.512422,0.647113,0.778193,0.894651,0.789437,0.357794,0.743133,0.127591,0.368298,0.425875,0.740005,0.129481,0.97071,0.180745,0.958775,0.963773,0.669833,0.75208,0.544142,0.161975,0.251961,0.13886,0.809034,0.6565,0.878644,0.640373,0.217331,0.0297934,0.135007,0.242973,0.00857304,0.647429,0.890086,0.786766,0.54208,0.679523,0.14456,0.285213,0.807114,0.512858,0.711088,0.547119,0.642339,0.681798,0.727863,0.601114,0.645571,0.397697,0.353195,0.189713,0.559672,0.605156,0.328573,0.368705,0.261656,0.207217,0.00907761,0.478987,0.23701,0.144084,0.72196,0.245583,0.791513,0.612046,0.0323497,0.333593,0.291569,0.17691,0.618806,0.0986827,0.689768,0.329894,0.645801,0.332107,0.0116926,0.373665,0.933221,0.657264,0.771361,0.286416,0.846977,0.331033,0.891572,0.17555,0.699738,0.153227,0.382767,0.708816,0.632214,0.619777,0.8529,0.354174,0.865361,0.644413,0.96622,0.89771,0.978006,0.257789,0.0746203,0.596812,0.356471,0.764388,0.926707,0.00227268,0.0964953,0.938399,0.375937,0.0297167,0.595663,0.147299,0.316133,0.44264,0.478332,0.207704,0.61819,0.17807,0.360932,0.000956713,0.886886,0.993146,0.620734,0.739786,0.34732,0.486095,0.384199,0.31354,0.383805,0.362205,0.571328,0.458426,0.959017,0.9278,0.222814,0.885723,0.930072,0.319309,0.824122,0.30601,0.349026,0.419785,0.453309,0.665159,0.862425,0.93164,0.872863,0.480615,0.10971,0.233795,0.481572,0.996596,0.226941,0.102306,0.736382,0.57426,0.588401,0.12058,0.8878,0.972206,0.482785,0.459128,0.430632,0.441802,0.386928,0.653445,0.327525,0.317,0.972754,0.151647,0.62301,0.32178,0.571433,0.0763186,0.986939,0.433858,0.00795897,0.859802,0.914473,0.117669,0.0935964,0.396045,0.114265,0.320537,0.498351,0.850647,0.894797,0.0867517,0.971227,0.782597,0.0589577,0.454012,0.241726,0.489589,0.895813,0.628654,0.143034,0.223338,0.945654,0.115789,0.374985,0.568664,0.437569,0.946418,0.644983,0.424507,0.380276,0.652942,0.284309,0.294749,0.770611,0.377905,0.690794,0.884877,0.698442,0.189145,0.735523,0.59324,0.275897,0.70675,0.375837,0.334854,0.160762,0.617563,0.824444,0.0565758,0.246217,0.967478,0.279914,0.191871,0.0832669,0.654899,0.760536,0.520836,0.601317,0.405519,0.945343,0.981593,0.0584611,0.229652,0.276343,0.829072,0.607557,0.967137,0.713949,0.306,0.156282,0.449472,0.899239,0.432179,0.156223,0.275076,0.767033,0.316985,0.892639,0.591477,0.373561,0.138856,0.558955,0.653475,0.330728,0.642222,0.308374,0.0912634,0.163057,0.909692,0.496782,0.1084,0.891285,0.555243,0.338052,0.167628,0.384316,0.94561,0.134765,0.0982649,0.251609,0.291046,0.547737,0.150848,0.723225,0.70396,0.425925,0.490258,0.0209454,0.318564,0.0817345,0.394506,0.45742,0.640689,0.0479816,0.788148,0.282911,0.356356,0.879411,0.445968,0.266048,0.376193,0.554369,0.157333,0.931437,0.892421,0.324961,0.315753,0.83803,0.459726,0.414018,0.0896395,0.750772,0.961755,0.240488,0.473997,0.665715,0.666412,0.964255,0.686661,0.984976,0.0459892,0.0811671,0.442396,0.686678,0.129149,0.230544,0.969589,0.485505,0.109955,0.415558,0.751553,0.486148,0.969926,0.908886,0.417585,0.862347,0.233847,0.733338,0.700378,0.693572,0.147356,0.790017,0.444344,0.109111,0.0305051,0.918341,0.774826,0.696918,0.882596,0.461486,0.681894,0.928585,0.542654,0.12429,0.615263,0.671802,0.354834,0.584853,0.157307,0.464789,0.000410601,0.90886,0.950937,0.970337,0.817746,0.368523,0.832684,0.0515924,0.101861,0.533062,0.745165,0.249216,0.323079,0.189509,0.358327,0.353584,0.10785,0.133153,0.0505019,0.990446,0.594639,0.732396,0.919031,0.137293,0.856686,0.534294,0.809095,0.21152,0.119147,0.966402,0.676309,0.119558,0.875261,0.627247,0.0898947,0.693007,0.995769,0.922579,0.744599,0.0976298,0.455641,0.489764,0.346846,0.77872,0.679273,0.705173,0.132305,0.787124,0.838325,0.182807,0.77757,0.432964,0.915202,0.6966,0.570257,0.771888,0.230895,0.379351,0.983408,0.350042,0.345753,0.659718,0.4696,0.221014,0.286964,0.559494,0.914021,0.282733,0.482073,0.65862,0.380363,0.937715,0.148384,0.727209,0.716435,0.827658,0.432382,0.848739,0.614781,0.270707,0.031546,0.392351,0.703671,0.946748,0.0889511,0.273928,0.718636,0.319846,0.65328,0.702045,0.669888,0.999033,0.361762,0.139488,0.220047,0.648727,0.698982,0.134069,0.93146,0.181056,0.792689,0.311823,0.11877,0.941074,0.0390324,0.835205,0.768731,0.471414,0.683944,0.383512,0.742122,0.71549,0.775863,0.445793,0.662239,0.864814,0.719721,0.380875,0.18466,0.373001,0.0829198,0.854548,0.372034,0.444682,0.994035,0.592081,0.0934087,0.693017,0.72615,0.0248686,0.874073,0.518839,0.336692,0.992843,0.459913,0.375724,0.828048,0.228644,0.847139,0.511992,0.612156,0.58926,0.227483,0.388019,0.0350534,0.889721,0.252833,0.754775,0.270596,0.437493,0.127776,0.353516,0.292041,0.49981,0.798198,0.286077,0.0918911,0.891607,0.979094,0.818041,0.916476,0.853167,0.33688,0.253167,0.84601,0.796793,0.628892,0.674058,0.0254371,0.47603,0.18605,0.637593,0.0652907,0.413533,0.0256125,0.100344,0.303254,0.278446,0.855119,0.573851,0.715939,0.982895,0.927367,0.00798011,0.482704,0.725565,0.294057,0.574596,0.617172,0.273151,0.392637,0.533647,0.126318,0.729517,0.786815,0.972328,0.52631,0.415707,0.646386,0.551747,0.891737,0.832436,0.189341,0.957028,0.245969,0.214953,0.0573717,0.549223,0.493399,0.91249,0.123074,0.209338,0.895385,0.0504407,0.217318,0.378089,0.776006,0.511375,0.952685,0.393178,0.784525,0.345322,0.926825,0.910843,0.0748388,0.71364,0.883171,0.601149,0.129347,0.529557,0.152896,0.0210835,0.361993,0.342237,0.978111,0.607963,0.55719,0.0354828,0.157186,0.0505891,0.947973,0.28026,0.259927,0.843358,0.330701,0.477245,0.221448,0.106707,0.98862,0.174133,0.499884,0.773145,0.519454,0.426709,0.683988,0.594293,0.140349,0.56716,0.195442,0.269696,0.0967167,0.348338,0.290779,0.45871,0.690575,0.26889,0.0666728,0.247766,0.304373,0.223859,0.298355,0.252346,0.504119,0.558282,0.0957045,0.83482,0.0355268,0.317152,0.941527,0.0241466,0.491285,0.441411,0.797292,0.0107392,0.86812,0.48128,0.605032,0.00846868,0.0484399,0.800474,0.278164,0.145157,0.148813,0.568943,0.603867,0.839388,0.837833,0.670539,0.0871539,0.142206,0.894399,0.385509,0.394553,0.398518,0.94379,0.490257,0.233338,0.979317,0.807409,0.174864,0.00346381,0.298694,0.616275,0.800756,0.309433,0.484394,0.282036,0.914465,0.492863,0.330476,0.71494,0.771027,0.475632,0.863753,0.339971,0.0794991,0.703141,0.177804,0.750039,0.790295,0.320011,0.644437,0.175803,0.714563,0.0429548,0.119594,0.20482,0.276292,0.098911,0.0122294,0.451157,0.102375,0.310923,0.0674315,0.90313,0.620357,0.551826,0.185166,0.534822,0.0446891,0.515642,0.249762,0.815717,0.991274,0.113514,0.155687,0.0707733,0.816655,0.333491,0.820812,0.60695,0.653502,0.465249,0.782753,0.368065,0.508204,0.902347,0.572885,0.784496,0.00125782,0.585115,0.235653,0.103633,0.896038,0.303084,0.00676295,0.516395,0.85491,0.191929,0.0512168,0.899599,0.707571,0.300979,0.715316,0.698845,0.414493,0.871003,0.769619,0.231148,0.204495,0.59043,0.838098,0.857997,0.0556795,0.620851,0.226062,0.563883,0.523198,0.798947,0.348379,0.524456,0.384062,0.584032,0.628089,0.2801,0.887117,0.634852,0.796494,0.742027,0.826781,0.847711,0.641626,0.534352,0.14869,0.356942,0.233197,0.563183,0.227945,0.00281557,0.794331,0.43244,0.593246,0.632429,0.290437,0.648926,0.25328,0.516498,0.212809,0.776479,0.315445,0.561188,0.300935,0.699507,0.145221,0.929023,0.979606,0.0323375,0.563875,0.7761,0.774365,0.390655,0.623811,0.415991,0.925007,0.772501,0.772933,0.158204,0.335684,0.000878856,0.161019,0.130015,0.433319,0.754266,0.762444,0.723755,0.403191,0.0157245,0.240253,0.616,0.792203,0.555699,0.177188,0.0931377,0.255205,0.322409,0.0221609,0.234811,0.354747,0.586035,0.0109114,0.129111,0.976691,0.634723,0.545102,0.901698,0.407224,0.318036,0.0599016,0.742907,0.318915,0.220921,0.872922,0.752233,0.975187,0.635366,0.475989,0.378378,0.651091,0.716242,0.994378,0.443294,0.271941,0.171566,0.536432,0.527146,0.493976,0.558592,0.761957,0.848722,0.144628,0.772868,0.977834,0.121319,0.407591,0.522936,0.0230164,0.814815,0.840972,0.082918,0.557722,0.159886,0.303839,0.430644,0.912119,0.279026,0.0660107,0.388108,0.657403,0.717102,0.10435,0.651781,0.160395,0.376291,0.823348,0.696827,0.903437,0.317323,0.25542,0.665394,0.166045,0.400048,0.438262,0.143879,0.521366,0.845853,0.666815,0.544383,0.660668,0.507786,0.627301,0.21839,0.667672,0.93114,0.649034,0.579792,0.210166,0.715045,0.9679,0.867569,0.432147,0.0722504,0.51935,0.592542,0.448542,0.342698,0.289369,0.351978,0.660021,0.544789,0.0173722,0.826066,0.944836,0.455634,0.969945,0.466203,0.301487,0.63676,0.0105854,0.962155,0.144546,0.637886,0.180545,0.812218,0.569026,0.829579,0.39201,0.779191,0.544624,0.35991,0.64676,0.976771,0.43216,0.166111,0.569313,0.880702,0.508808,0.858682,0.23268,0.168829,0.403471,0.250052,0.994895,0.348307,0.705687,0.96484,0.81451,0.00717383,0.601599,0.825095,0.969329,0.746145,0.462981,0.149874,0.558363,0.0320072,0.979453,0.950373,0.811199,0.524077,0.310283,0.457959,0.500848,0.742443,0.62407,0.0701613,0.623145,0.132878,0.928843,0.855825,0.301707,0.332314,0.105877,0.296602,0.680621,0.811564,0.261442,0.495131,0.818737,0.863042,0.320226,0.788066,0.609187,0.783207,0.93794,0.16755,0.815214,0.917393,0.117923,0.626413,0.441471,0.428205,0.0843719,0.942319,0.170648,0.708442,0.0124802,0.793793,0.84132,0.941324,0.649617,0.143027,0.273638,0.755494,0.439629,0.954259,0.567058,0.701071,0.44939,0.385795,0.564112,0.769615,0.173862,0.173299,0.552822,0.111802,0.340849,0.368037,0.0291947,0.458772,0.994449,0.470665,0.886977,0.0788214,0.412984,0.0576252,0.787263,0.425464,0.851418,0.628583,0.366788,0.501035,0.771609,0.640426,0.256529,0.211238,0.594684,0.823587,0.912309,0.0440741,0.209382,0.476422,0.81369,0.383244,0.649721,0.366512,0.495046,0.99057,0.734549,0.52424,0.449341,0.728998,0.994905,0.336318,0.80782,0.40789,0.393943,0.595083,0.833354,0.245361,0.223665,0.200142,0.746396,0.995275,0.840567,0.00292566,0.206513,0.435252,0.826513,0.118822,0.479326,0.0358954,0.595243,0.293015,0.419139,0.244964,0.659527,0.914185,0.235534,0.394076,0.438425,0.684875,0.123074,0.433331,0.0211931,0.930894,0.84122,0.415137,0.525976,0.674574,0.660498,0.749642,0.874716,0.406894,0.744916,0.715283,0.40982,0.951429,0.150535,0.236333,0.0702509,0.629861,0.272228,0.665494,0.922876,0.691367,0.910459,0.582403,0.605552,0.145992,0.976479,0.0439776,0.830867,0.0995537,0.477308,0.85206,0.0304474,0.318529,0.267197,0.556424,0.993102,0.927695,0.306065,0.867818,0.334589,0.0509814,0.583101,0.744408,0.00241037,0.733636,0.980741,0.0726613,0.363496,0.252969,0.738156,0.286372,0.944336,0.648614,0.868776,0.549889,0.794606,0.845255,0.593867,0.625473,0.944809,0.0711748,0.477534,0.975256,0.389703,0.74473,0.53168,0.382806,0.672425,0.837745,0.250624,0.00701368,0.888727,0.833725,0.751422,0.891137,0.567361,0.732163,0.963799,0.930858,0.985132,0.701954,0.21723,0.929469,0.350568,0.0860059,0.479358,0.145175,0.931261,0.073224,0.770648,0.87607,0.144399,0.248182,0.851327,0.534102,0.992912,0.383007,0.916908,0.665337,0.220752,0.167532,0.672351,0.109479,0.00125721,0.423773,0.000616314,0.568618,0.155936,0.964415,0.499476,0.141068,0.666369,0.716706,0.0705368,0.0169372,0.802712,0.549894,0.162112,0.733973,0.623118,0.93276,0.610043,0.767517,0.180941,0.46137,0.301619,0.173854,0.844377,0.218527,0.839191,0.0651288,0.386059,0.511541,0.174608,0.387317,0.935314,0.175224,0.955935,0.0912503,0.139639,0.455411,0.232319,0.806008,0.172117,0.302855,0.822945,0.974829,0.85275,0.985057,0.708802,0.475868,0.917817,0.318845,0.243385,0.0987583,0.780215,0.545005,0.272612,0.624592,0.763532,0.111803,0.68972,0.149591,0.623344,0.864328,0.536908,0.558658,0.0395525,0.492843,0.649909,0.179192,0.948254,0.882227,0.9852,0.120371,0.185082,0.808145,0.0951994,0.0378321,0.793202,0.804001,0.5137,0.711019,0.122846,0.757085,0.809777,0.903061,0.30209,0.0823889,0.527653,0.0656218,0.194191,0.217373,0.215213,0.817535,0.0817017,0.752121,0.376194,0.121254,0.244964,0.0261023,0.300446,0.193217,0.908329,0.285645,0.313588,0.0934119,0.0937902,0.408787,0.131244,0.886992,0.212788,0.644944,0.598011,0.335635,0.402029,0.407788,0.238696,0.704119,0.490177,0.766349,0.769741,0.684368,0.983723,0.984954,0.501904,0.0654244,0.737075,0.878097,0.186679,0.982038,0.9042,0.487124,0.175256,0.812529,0.77277,0.488844,0.905941,0.86656,0.897631,0.037185,0.753552,0.110419,0.682129,0.351563,0.446054,0.0841583,0.759351,0.68475,0.788277,0.249527,0.4511,0.558018,0.933895,0.434822,0.542972,0.435799,0.500247,0.280047,0.313896,0.686925,0.262086,0.218096,0.17405,0.437341,0.0306254,0.946819,0.926185,0.936566,0.81338,0.823816,0.973751,0.566932,0.934235,0.655881,0.918494,0.380289,0.740039,0.677845,0.0650395,0.528316,0.927372,0.516139,0.0863347,0.861267,0.950961,0.629307,0.297067,0.451208,0.909354,0.610963,0.138133,0.17144,0.829059,0.312183,0.608781,0.859685,0.259002,0.534966,0.796251,0.0723815,0.358782,0.770003,0.639313,0.293017,0.425883,0.557807,0.673306,0.165922,0.235652,0.738345,0.694238,0.163024,0.254484,0.780573,0.0242916,0.205445,0.40988,0.321358,0.656653,0.319234,0.932321,0.794786,0.490673,0.76138,0.106969,0.0994544,0.621065,0.365971,0.63442,0.417316,0.438352,0.993202,0.187319,0.0776651,0.286219,0.613202,0.635472,0.959525,0.779124,0.871125,0.69787,0.473362,0.0341488,0.952354,0.253935,0.0584404,0.1578,0.663814,0.379799,0.814453,0.983048,0.31212,0.609239,0.473722,0.0735002,0.716208,0.573176,0.694565,0.0821781,0.207597,0.111881,0.52053,0.200799,0.2992,0.598195,0.487018,0.912402,0.233668,0.446542,0.691525,0.104792,0.144412,0.164887,0.138941,0.0967668,0.418822,0.197382,0.254567,0.0826362,0.57718,0.0690197,0.0656846,0.8893,0.678259,0.539406,0.9628,0.394466,0.112583,0.657366,0.476645,0.320179,0.769247,0.997175,0.520978,0.0684469,0.59537,0.00799569,0.980849,0.829038,0.454538,0.672374,0.93383,0.59895,0.837261,0.0727716,0.695717,0.256083,0.270153,0.950284,0.338719,0.847334,0.0193035,0.404403,0.736634,0.697562,0.94381,0.699434,0.0920286,0.0563925,0.3568,0.568673,0.376572,0.126047,0.565848,0.89755,0.194494,0.161218,0.905546,0.175342,0.990256,0.360084,0.847716,0.924086,0.959034,0.684977,0.996858,0.654751,0.94106,0.267011,0.605035,0.279778,0.114345,0.624338,0.684182,0.850978,0.321901,0.627991,0.550413,0.413929,0.684384,0.907212,0.982602,0.0609558,0.0332592,0.54845,0.958506,0.227753,0.709668,0.864051,0.403095,0.699924,0.224135,0.250811,0.62401,0.183169,0.935788,0.620868,0.83792,0.876848,0.887879,0.442955,0.156626,0.00222359,0.0672935,0.840808,0.853202,0.389194,0.468799,0.403615,0.803123,0.153183,0.310827,0.785726,0.214139,0.344086,0.334176,0.172645,0.571839,0.0438439,0.0366962,0.974934,0.743768,0.260831,0.225746,0.367778,0.444,0.161534,0.988646,0.28192,0.0383814,0.876525,0.724876,0.195007,0.878748,0.792169,0.0358152,0.73195,0.181363,0.504614,0.135565,0.984486,0.657798,0.446392,0.770212,0.871937,0.790478,0.104388,0.0445814,0.362317,0.148232,0.0812776,0.337251,0.891999,0.342109,0.562997,0.259777,0.786109,0.724531,0.248423,0.0680296,0.762912,0.124948,0.792905,0.957919,0.00369629,0.585074,0.993735,0.735647,0.766437,0.498349,0.871212,0.750923,0.156147,0.317604,0.521135,0.0280833,0.108082,0.625523,0.0726647,0.470399,0.773755,0.153942,0.80765,0.665754,0.496051,0.370647,0.925532,0.28216,0.095178,0.173955,0.35019,0.85809,0.298903,0.143095,0.81601,0.302599,0.728169,0.809744,0.0382457,0.494606,0.308093,0.909457,0.24553,0.46424,0.227061,0.766665,0.492323,0.335143,0.392188,0.564988,0.805541,0.165943,0.71893,0.613192,0.831697,0.214982,0.983839,0.757229,0.497142,0.0790171,0.931183,0.847332,0.937107,0.230086,0.990427,0.753117,0.532685,0.718596,0.562861,0.570931,0.213202,0.870954,0.480388,0.458732,0.335194,0.707449,0.225397,0.827517,0.0425917,0.617585,0.392505,0.848133,0.783528,0.111436,0.461325,0.615225,0.326417,0.445164,0.372454,0.823559,0.524181,0.303637,0.670891,0.461288,0.533723,0.661318,0.214405,0.0664086,0.379914,0.777266,0.63734,0.593116,0.64822,0.117728,0.0518483,0.983414,0.825177,0.277245,0.810932,0.867769,0.894831,0.203437,0.715902,0.678359,0.314873,0.177227,0.293584,0.64129,0.622391,0.666037,0.46485,0.146572,0.969675,0.135741,0.60786,0.503398,0.797059,0.822265,0.569807,0.176973,0.599531,0.207146,0.770089,0.247751,0.324874,0.821938,0.231166,0.150051,0.099183,0.0420973,0.0178198,0.994014,0.245534,0.733722,0.672372,0.560407,0.910948,0.965956,0.201698,0.533339,0.631993,0.666547,0.679911,0.601668,0.802288,0.287771,0.105066,0.599347,0.110036,0.674873,0.77632,0.709567,0.882019,0.546409,0.957318,0.206893,0.368347,0.188484,0.356944,0.46753,0.230581,0.374764,0.461543,0.476116,0.108485,0.133915,0.0365229,0.0194337,0.0998714,0.238221,0.552773,0.731865,0.904768,0.232684,0.333533,0.707056,0.520455,0.438599,0.306403,0.630491,0.113471,0.0827224,0.340058,0.99549,0.629132,0.297376,0.202383,0.997478,0.48586,0.559327,0.465008,0.716441,0.934091,0.926551,0.192557,0.042576,0.0604668,0.229079,0.0620097,0.160338,0.4673,0.614783,0.892203,0.372068,0.847466,0.225736,0.0791234,0.367921,0.664335,0.385526,0.998412,0.777806,0.468248,0.33847,0.773296,0.09738,0.635846,0.975679,0.0948584,0.121706,0.535006,0.559866,0.838147,0.469097,0.486418,0.0307031,0.511673,0.546885,0.259782,0.573683,0.707223,0.727082,0.188465,0.599426,0.0991501,0.0359314,0.825162,0.178274,0.403852,0.489496,0.5638,0.402264,0.267302,0.032048,0.740734,0.0405986,0.129428,0.376579,0.016278,0.224286,0.498285,0.551284,0.784153,0.336431,0.0203813,0.270571,0.367134,0.532054,0.817455,0.626917,0.105737,0.524678,0.353999,0.294202,0.124104,0.453149,0.330134,0.949265,0.631423,0.733986,0.438761,0.195222,0.13625,0.706064,0.22727,0.876984,0.746662,0.356698,0.253563,0.76294,0.580985,0.751847,0.314225,0.365138,0.0882787,0.334606,0.635708,0.455413,0.86666,0.453163,0.08233,0.972397,0.977841,0.436329,0.2666,0.101945,0.889479,0.596733,0.0512101,0.520902,0.330719,0.489971,0.716124,0.466969,0.196035,0.943395,0.343953,0.942697,0.300093,0.597516,0.705638,0.881078,0.349363,0.019862,0.246216,0.437642,0.354468,0.881924,0.893055,0.221128,0.335087,0.975385,0.193525,0.312929,0.411714,0.460125,0.414874,0.301193,0.0568581,0.466084,0.822095,0.387577,0.956055,0.538219,0.854547,0.15209,0.481613,0.1985,0.0947876,0.781706,0.796015,0.800425,0.662784,0.145378,0.820287,0.909,0.58302,0.174755,0.790924,0.476075,0.395883,0.126011,0.451459,0.589408,0.43894,0.863174,0.049533,0.853813,0.164367,0.106391,0.319897,0.986461,0.493969,0.275952,0.52468,0.348515,0.428043,0.00629338,0.547015,0.52283,0.788,0.34303,0.323255,0.450784,0.488408,0.143543,0.359784,0.0714281,0.318298,0.150708,0.547503,0.714181,0.276719,0.998962,0.303589,0.715658,0.862136,0.353122,0.569472,0.0265022,0.459513,0.889369,0.0129633,0.953481,0.165321,0.537643,0.301997,0.593364,0.543937,0.849012,0.116194,0.331937,0.192042,0.439449,0.782721,0.68045,0.582992,0.142505,0.751878,0.901289,0.293212,0.299381,0.61547,0.569931,0.298343,0.919059,0.285589,0.160479,0.272181,0.855061,0.186981,0.731694,0.744429,0.199944,0.685175,0.90975,0.737588,0.987172,0.503114,0.281524,0.836184,0.619308,0.613461,0.0282257,0.0587572,0.396182,0.708676,0.641749,0.538686,0.460554,0.543038,0.831899,0.759935,0.158508,0.40183,0.0582786,0.0775673,0.687419,0.218758,0.349748,0.542479,0.405739,0.0814416,0.286909,0.605683,0.766617,0.196659,0.343271,0.753789,0.699773,0.624795,0.589973,0.319081,0.238256,0.618198,0.377838,0.634438,0.326874,0.0195871,0.173124,0.787428,0.562626,0.00502245,0.547364,0.721134,0.406852,0.605642,0.798701,0.0942707,0.8244,0.148449,0.63675,0.230139,0.229891,0.923659,0.835822,0.996508,0.120318,0.179092,0.750297,0.820091,0.803887,0.340269,0.139171,0.0421432,0.958467,0.517009,0.676581,0.285341,0.536597,0.849705,0.0727699,0.0992222,0.854727,0.620134,0.820356,0.261579,0.225776,0.619058,0.35585,0.0501762,0.767507,0.9926,0.280315,0.997398,0.916258,0.116136,0.993906,0.0365761,0.295228,0.744202,0.856667,0.0991157,0.0844714,0.995838,0.141259,0.0429388,0.512847,0.81784,0.32828,0.0494439,0.667544,0.40105,0.148666,0.522272,0.0211838,0.969022,0.783851,0.24696,0.58808,0.1397,0.297136,0.355587,0.1323,0.577451,0.352985,0.0485586,0.693587,0.346891,0.0851347,0.988816,0.0910929,0.941801,0.0879312,0.175564,0.937639,0.22919,0.218503,0.450487,0.0470298,0.546783,0.49993,0.714574,0.947833,0.648596,0.236846,0.969017,0.617619,0.0206964,0.215977,0.205699,0.160397,0.513113,0.561286,0.292697,0.0905641,0.91427,0.341256,0.784151,0.261161,0.42639,0.772967,0.352254,0.368191,0.860898,0.527818,0.305831,0.0900882,0.746321,0.756317,0.137118,0.293104,0.256248,0.851692,0.240938,0.904844,0.088538,0.209955,0.522463,0.109234,0.425932,0.728161,0.269631,0.939045,0.289447,0.562328,0.0296093,0.203717,0.903584,0.813761,0.464878,0.329974,0.586727,0.817132,0.698165,0.447625,0.34495,0.00399594,0.537714,0.0912713,0.760313,0.674832,0.384376,0.0165607,0.526524,0.625314,0.921405,0.615062,0.835268,0.443867,0.724296,0.2612,0.172029,0.993927,0.200246,0.461476,0.556255,0.229855,0.665193,0.459839,0.0436154,0.130072,0.789813,0.630343,0.947204,0.487978,0.0779682,0.292154,0.491974,0.615682,0.383425,0.252287,0.290513,0.767801,0.268848,0.817037,0.393114,0.190252,0.432099,0.228383,0.63412,0.156395,0.489583,0.806149,0.150322,0.689829,0.267624,0.706577,0.919683,0.932818,0.166416,0.963299,0.0628894,0.956229,0.593642,0.0100931,0.444206,0.67161,0.302247,0.93618,0.287292,0.685672,0.188467,0.577805,0.453473,0.457315,0.394842,0.846587,0.647567,0.826941,0.07497,0.281687,0.983336,0.564553,0.0878351,0.133659,0.254382,0.35546,0.840236,0.174065,0.288277,0.00665221,0.137364,0.351167,0.962881,0.731006,0.36126,0.407087,0.402615,0.663507,0.343268,0.689907,0.349179,0.531735,0.267712,0.802652,0.989049,0.662554,0.649239,0.636616,0.489495,0.724209,0.918303,0.472832,0.288762,0.00613772,0.60649,0.543144,0.361597,0.446726,0.717209,0.649875,0.453378,0.854573,0.00104169,0.416259,0.585579,0.362302,0.823347,0.988194,0.0258086,0.166614,0.678101,0.374988,0.698349,0.945813,0.177639,0.687398,0.608368,0.826878,0.324014,0.0978629,0.551088,0.242317,0.570695,0.83985,0.248455,0.177185,0.382994,0.610052,0.623911,0.100203,0.259927,0.0772893,0.954776,0.260968,0.493549,0.540355,0.62327,0.316895,0.528549,0.649079,0.48351,0.20665,0.0240662,0.181859,0.152464,0.201706,0.869257,0.760831,0.0285841,0.193271,0.858694,0.579672,0.435588,0.429389,0.419521,0.684042,0.606573,0.802515,0.294094,0.230484,0.902718,0.554021,0.307774,0.857493,0.814989,0.801322,0.397848,0.438259,0.118217,0.926397,0.0873379,0.601727,0.133047,0.111404,0.783585,0.285511,0.31311,0.652842,0.0463421,0.341694,0.846113,0.905036,0.921365,0.281701,0.334425,0.340887,0.965743,0.940998,0.143402,0.259837,0.171482,0.0461191,0.813858,0.479256,0.903612,0.628847,0.280578,0.30146,0.0671061,0.398795,0.227857,0.154444,0.000522443,0.360904,0.265848,0.784108,0.646415,0.578958,0.43695,0.692757,0.920652,0.283063,0.597793,0.842017,0.564764,0.932218,0.182904,0.530507,0.873216,0.326305,0.790344,0.0446988,0.372424,0.604201,0.523955,0.276037,0.233048,0.804533,0.577497,0.300155,0.203328,0.805354,0.454599,0.203851,0.166258,0.720447,0.987959,0.812673,0.299405,0.424909,0.505431,0.220056,0.707972,0.103224,0.0620732,0.272736,0.0354423,0.244977,0.803242,0.908659,0.571282,0.593586,0.953358,0.943706,0.197788,0.477312,0.219743,0.430836,0.281845,0.79724,0.730991,0.485173,0.602594,0.185589,0.689024,0.768852,0.906036,0.676982,0.581526,0.205441,0.101891,0.0869565,0.425497,0.809863,0.190181,0.48757,0.0825982,0.225623,0.732547,0.885841,0.134282,0.303829,0.479427,0.0876391,0.247535,0.677214,0.564951,0.467278,0.10805,0.846796,0.264518,0.839041,0.331969,0.867112,0.0246303,0.0209929,0.635965,0.930666,0.697975,0.21749,0.136107,0.799866,0.304447,0.561603,0.609728,0.494627,0.0491733,0.692326,0.72025,0.78172,0.578167,0.854532,0.0855488,0.0575937,0.942171,0.333084,0.734808,0.507122,0.800362,0.842858,0.353919,0.0648805,0.681899,0.685888,0.931993,0.70653,0.706881,0.567957,0.637196,0.404856,0.785448,0.773303,0.204722,0.0898946,0.334906,0.81445,0.584522,0.38408,0.506776,0.304772,0.1658,0.0849434,0.159304,0.251348,0.142537,0.101475,0.584432,0.877345,0.608598,0.384795,0.720204,0.962517,0.449675,0.402103,0.648405,0.381668,0.108633,0.355285,0.949625,0.745829,0.760141,0.735073,0.519132,0.964863,0.824967,0.854038,0.779313,0.40949,0.238118,0.286089,0.714262,0.403917,0.371033,0.873566,0.655266,0.51357,0.975042,0.239698,0.390915,0.58364,0.624493,0.111119,0.546156,0.0741684,0.513222,0.194561,0.455836,0.621855,0.549846,0.405461,0.367684,0.309987,0.140534,0.886816,0.27485,0.965502,0.740854,0.054163,0.374991,0.978972,0.340252,0.0892532,0.382889,0.711285,0.96282,0.0381551,0.224854,0.937861,0.277854,0.615769,0.521501,0.902347,0.726888,0.0676572,0.976515,0.24011,0.262218,0.432351,0.861965,0.812064,0.837813,0.229649,0.122052,0.978347,0.116464,0.396902,0.943849,0.857318,0.451065,0.31884,0.83629,0.791317,0.408093,0.219179,0.502602,0.370913,0.257334,0.727456,0.308774,0.535188,0.343226,0.830275,0.437534,0.0701137,0.897932,0.41405,0.310224,0.16015,0.846401,0.172189,0.972215,0.684214,0.401837,0.0942665,0.662561,0.518301,0.491168,0.60641,0.37562,0.942234,0.925251,0.211909,0.733551,0.333344,0.431088,0.236152,0.704257,0.688422,0.963609,0.0130312,0.22361,0.306834,0.843306,0.661144,0.376948,0.741239,0.0751938,0.687172,0.901389,0.921595,0.85936,0.873604,0.605809,0.261198,0.967871,0.26837,0.779499,0.459039,0.874781,0.155119,0.401273,0.800031,0.367028,0.134823,0.133375,0.798116,0.370976,0.837632,0.486538,0.334584,0.850663,0.710148,0.641419,0.69397,0.371292,0.0183669,0.435209,0.446486,0.705539,0.336598,0.368081,0.564899,0.210202,0.97389,0.826097,0.178073,0.242261,0.605596,0.637112,0.117041,0.760714,0.0383841,0.917072,0.127742,0.173207,0.0504474,0.925858,0.544183,0.88808,0.412397,0.878767,0.738743,0.122545,0.520186,0.432712,0.493837,0.538553,0.867921,0.940324,0.244092,0.204519,0.308405,0.808991,0.414721,0.282295,0.635088,0.592793,0.524556,0.240683,0.229905,0.641597,0.00139733,0.268289,0.558669,0.129139,0.441496,0.609117,0.0549977,0.985679,0.497196,0.467394,0.864447,0.235939,0.589939,0.384633,0.668651,0.0837769,0.923186,0.536572,0.0241007,0.167278,0.741091,0.332506,0.976269,0.155812,0.614801,0.611356,0.748606,0.139357,0.85204,0.978511,0.780954,0.853437,0.2468,0.339623,0.982576,0.688296,0.94874,0.037574,0.673976,0.445936,0.504968,0.538422,0.681875,0.0949078,0.923055,0.350527,0.178685,0.846241,0.887099,0.202785,0.0135191,0.628191,0.535291,0.989788,0.784003,0.150092,0.601144,0.532608,0.289449,0.453184,0.511119,0.0704025,0.306621,0.757918,0.410026,0.289197,0.446215,0.358766,0.326771,0.12019,0.804702,0.831739,0.658612,0.486577,0.926647,0.581668,0.837104,0.105332,0.427909,0.724203,0.308117,0.441428,0.352394,0.843408,0.431216,0.136396,0.9935,0.0323602,0.669004,0.282949,0.485544,0.180123,0.353351,0.792165,0.938041,0.763377,0.0813613,0.384256,0.122143,0.408132,0.504446,0.926844,0.239871,0.163059,0.413422,0.166518,0.744726,0.250525,0.27185,0.172635,0.974728,0.579967,0.614064,0.327122,0.423375,0.0452796,0.463518,0.416875,0.0776398,0.132522,0.699824,0.563184,0.312645,0.053175,0.355348,0.250687,0.816552,0.43671,0.634943,0.938695,0.844842,0.139389,0.865539,0.0847128,0.302448,0.278961,0.251231,0.0471742,0.529486,0.523081,0.21981,0.504214,0.103048,0.833873,0.831336,0.526423,0.879153,0.294854,0.943298,0.956793,0.427377,0.643121,0.519976,0.740022,0.696296,0.875325,0.990709,0.512848,0.312034,0.625652,0.451543,0.156876,0.765041,0.317082,0.241589,0.0674892,0.596042,0.49282,0.114663,0.125528,0.0159006,0.334473,0.629743,0.118948,0.168346,0.461079,0.645371,0.0474988,0.755933,0.588669,0.00429139,0.18331,0.23179,0.524268,0.923332,0.928086,0.399592,0.914041,0.440934,0.711627,0.539694,0.892477,0.868503,0.304735,0.209559,0.110091,0.372224,0.805601,0.602911,0.486887,0.93113,0.618812,0.82136,0.560872,0.73776,0.989706,0.0219507,0.383131,0.0372052,0.777884,0.971799,0.0414966,0.961194,0.203589,0.565764,0.884526,0.131675,0.965357,0.798567,0.57261,0.676984,0.338261,0.465087,0.545486,0.642996,0.674646,0.655577,0.0152197,0.480247,0.258488,0.502107,0.411377,0.8773,0.323467,0.972248,0.61506,0.313174,0.994199,0.998191,0.350379,0.772083,0.96999,0.391876,0.733277,0.173579,0.95764,0.617802,0.305254,0.922997,0.416369,0.877864,0.599981,0.75463,0.342951,0.145467,0.397625,0.0175963,0.801044,0.412845,0.497843,0.0595323,0.914952,0.90922,0.936832,0.23842,0.881468,0.551892,0.551594,0.875668,0.550082,0.901973,0.647751,0.520072,0.293849,0.381027,0.693651,0.251489,0.99883,0.998906,0.174486,0.415199,0.87677,0.774466,0.169829,0.21972,0.919933,0.567454,0.237317,0.720977,0.9803,0.73516,0.780509,0.895252,0.64438,0.717341,0.133672,0.525848,0.269233,0.685265,0.401516,0.819316,0.587238,0.0492661,0.339388,0.881087,0.430293,0.0330394,0.132576,0.429123,0.0319453,0.307061,0.844322,0.908715,0.0815278,0.014151,0.128435,0.00146079,0.581605,0.365752,0.722438,0.561905,0.100912,0.502947,0.457157,0.745292,0.220288,0.590828,0.27114,0.489522,0.276094,0.672655,0.308837,0.863332,0.721922,0.648225,0.744419,0.152215,0.681264,0.876994,0.581338,0.71321,0.184055,0.42566,0.621925,0.265583,0.439811,0.75036,0.267044,0.0214164,0.116112,0.989482,0.583321,0.217025,0.492429,0.0404783,0.962317,0.712717,0.631307,0.233456,0.202239,0.907401,0.906112,0.511076,0.770732,0.628034,0.159301,0.515151,0.780249,0.840565,0.392145,0.361586,0.553775,0.576201,0.787246,0.1757,0.841784,0.227057,0.92606,0.108828,0.248474,0.0421726,0.0983097,0.831795,0.259197,0.590739,0.872274,0.221514,0.303456,0.503581,0.45497,0.505694,0.410981,0.361082,0.0167703,0.181714,0.989116,0.176071,0.696865,0.769364,0.0166367,0.0890097,0.130951,0.570412,0.66521,0.918197,0.746112,0.506994,0.145255,0.672172,0.615822,0.393728,0.714344,0.714132,0.225524,0.973542,0.30487,0.0977975,0.195055,0.608326,0.601378,0.650025,0.11402,0.0123591,0.0111076,0.130791,0.194073,0.000223212,0.306862,0.890937,0.769587,0.323499,0.979947,0.900538,0.893911,0.645157,0.818735,0.640022,0.152152,0.96399,0.312194,0.767974,0.357718,0.0265387,0.482106,0.583242,8.03829e-05,0.786976,0.681039,0.195136,0.395302,0.282417,0.845161,0.509322,0.294776,0.856269,0.640113,0.488849,0.856492,0.946975,0.379786,0.626079,0.270474,0.359733,0.526617,0.164385,0.0048907,0.345352,0.804407,0.157042,0.309342,0.116601,0.925016,0.66706,0.14314,0.407122,0.250302,0.143221,0.194097,0.931341,0.338356,0.589399,0.213759,0.183518,0.0987217,0.508535,0.0397864,0.738835,0.997384,0.896278,0.68581,0.37717,0.522358,0.956284,0.736904,0.0489752,0.120669,0.741794,0.394328,0.925076,0.898837,0.70367,0.0416778,0.823853,0.37073,0.184818,0.230974,0.621032,0.328038,0.425072,0.552373,0.666395,0.0144709,0.766132,0.849912,0.113193,0.274667,0.889699,0.852028,0.272051,0.785977,0.537838,0.649221,0.308335,0.494122,0.386125,0.35731,0.614792,0.127919,0.751638,0.539868,0.026756,0.455307,0.581546,0.850609,0.826037,0.766364,0.0815829,0.447069,0.0944023,0.506655,0.999442,0.760797,0.521125,0.765574,0.610709,0.634318,0.0402403,0.500408,0.486346,0.312291,0.286385,0.0241837,0.961512,0.59472,0.518306,0.347637,0.95203,0.133098,0.475556,0.703667,0.672966,0.502312,0.158975,0.254512,0.352921,0.985012,0.0208756,0.434504,0.432081,0.115278,0.941158,0.431523,0.876075,0.462284,0.197096,0.486784,0.0966018,0.237336,0.987192,0.582947,0.549627,0.273577,0.607131,0.51114,0.868297,0.125437,0.858777,0.820326,0.258535,0.334333,0.523994,0.931501,0.836645,0.682968,0.186013,0.189566,0.66798,0.206888,0.62407,0.100061,0.322166,0.565228,0.531583,0.198241,0.0275116,0.728679,0.685025,0.124113,0.966016,0.672217,0.707061,0.515643,0.945794,0.314192,0.0267825,0.81409,0.439629,0.885559,0.634416,0.698164,0.219892,0.15841,0.629665,0.0565368,0.841378,0.815678,0.246103,0.509358,0.0225663,0.870172,0.609419,0.344732,0.4354,0.141002,0.542973,0.462912,0.869681,0.227998,0.587025,0.835697,0.900215,0.294086,0.35134,0.846009,0.608278,0.378122,0.660099,0.0479073,0.263682,0.294515,0.746072,0.483573,0.452926,0.375737,0.54011,0.294304,0.191415,0.786213,0.803662,0.213982,0.656385,0.413081,0.558714,0.0917848,0.554083,0.101687,0.554696,0.423765,0.329685,0.141721,0.259462,0.2299,0.435807,0.610801,0.0759093,0.0440851,0.988924,0.736008,0.0919923,0.252605,0.0305237,0.838064,0.736179,0.483449,0.213801,0.276289,0.777753,0.405217,0.0625016,0.581415,0.619198,0.718886,0.994497,0.177912,0.810671,0.54858,0.279599,0.365368,0.972344,0.609284,0.507089,0.231806,0.839185,0.942896,0.842607,0.915094,0.986981,0.831531,0.651102,0.0789736,0.0841368,0.681626,0.917038,0.820316,0.165075,0.130839,0.0966047,0.942828,0.536055,0.159106,0.524244,0.155253,0.877993,0.51874,0.333165,0.688664,0.06732,0.612764,0.0540318,0.0396644,0.222049,0.561121,0.27147,0.0612336,0.504017,0.114078,0.976328,0.490998,0.945609,0.62743,0.569972,0.0297458,0.309056,0.487009,0.850061,0.474132,0.617848,0.946666,0.41696,0.153904,0.105772,0.941204,0.309157,0.983765,0.459944,0.642323,0.672429,0.527264,0.255087,0.726461,0.566928,0.477136,0.287582,0.838399,0.53837,0.791599,0.952476,0.514697,0.282597,0.898085,0.142127,0.852569,0.927831,0.451184,0.339578,0.777892,0.925315,0.957426,0.724559,0.342275,0.11133,0.830331,0.283479,0.420488,0.814096,0.743423,0.0628103,0.486525,0.270687,0.317897,0.212986,0.837615,0.795033,0.500568,0.676014,0.333403,0.292167,0.62849,0.8481,0.574764,0.526576,0.990228,0.427332,0.454406,0.441411,0.76691,0.232299,0.366727,0.724337,0.956857,0.709002,0.835667,0.787188,0.992481,0.256155,0.601284,0.735904,0.318965,0.0878097,0.00659137,0.636862,0.300796,0.844207,0.431896,0.801364,0.520221,0.765298,0.0935307,0.148711,0.613399,0.668294,0.675287,0.603626,0.0956266,0.129693,0.045038,0.862537,0.361992,0.411765,0.586874,0.318849,0.120767,0.422541,0.106038,0.113248,0.678695,0.707322,0.849153,0.99766,0.795132,0.855744,0.634522,0.0959277,0.699951,0.0664177,0.897292,0.220172,0.831716,0.990822,0.368883,0.445115,0.659117,0.0441693,0.0487414,0.754743,0.173862,0.0937795,0.61728,0.535854,0.505544,0.204154,0.854704,0.626311,0.626694,0.960741,0.73956,0.30539,0.668063,0.588712,0.303049,0.463195,0.444456,0.937572,0.559123,0.144407,0.00398931,0.456414,0.364578,0.835705,0.447237,0.733461,0.28082,0.106354,0.77763,0.329562,0.861097,0.951493,0.423341,0.478377,0.487347,0.928886,0.682531,0.342051,0.555197,0.309226,0.302792,0.294756,0.614615,0.970855,0.883469,0.917665,0.434051,0.327925,0.855236,0.993173,0.472332,0.859225,0.449588,0.83691,0.694931,0.896824,0.570371,0.975751,0.00317807,0.348002,0.305313,0.864275,0.299494,0.728655,0.342652,0.786841,0.65754,0.0251834,0.128892,0.212737,0.334409,0.431684,0.507493,0.949024,0.40254,0.390962,0.866688,0.83659,0.718887,0.721925,0.829763,0.191218,0.58115,0.279351,0.0281286,0.276081,0.176175,0.5985,0.251832,0.179354,0.946501,0.557146,0.0436286,0.245996,0.2858,0.386281,0.032837,0.94334,0.411464,0.161729,0.156077,0.745873,0.593413,0.663571,0.694897,0.995953,0.0545327,0.561585,0.832543,0.77342,0.28351,0.662306,0.964638,0.86466,0.941657,0.992767,0.140741,0.117833,0.591266,0.392574,0.297186,0.537768,0.949719,0.340815,0.783763,0.23552,0.727095,0.8166,0.17886,0.13856,0.97833,0.334937,0.884433,0.571743,0.998508,0.57933,0.567696,0.0530405,0.140915,0.400239,0.82646,0.424425,0.0625448,0.791098,0.289086,0.00420194,0.783865,0.429827,0.122035,0.375131,0.822401,0.419221,0.912899,0.77212,0.760035,0.696662,0.00764006,0.487131,0.513263,0.1865,0.625691,0.491592,0.521437,0.510123,0.0633352,0.519945,0.0894529,0.631031,0.572985,0.230368,0.0312695,0.399446,0.654794,0.0938142,0.190544,0.943879,0.0980162,0.974408,0.373706,0.220051,0.349539,0.196107,0.639271,0.262438,0.968228,0.399307,0.959101,0.975868,0.886438,0.472363,0.162368,0.512128,0.963956,0.683805,0.0222512,0.027291,0.20375,0.111704,0.658322,0.776735,0.342072,0.689591,0.176181,0.996866,0.783406,0.366724,0.940745,0.881422,0.341133,0.314451,0.101472,0.690672,0.510559,0.740744,0.95311,0.478786,0.140051,0.912211,0.454654,0.0264881,0.384574,0.617021,0.538616,0.34853,0.300826,0.560867,0.375821,0.504576,0.672571,0.0341431,0.281311,0.0146438,0.723734,0.457491,0.0115097,0.50714,0.824216,0.952255,0.388562,0.165348,0.266706,0.490034,0.85602,0.777265,0.230778,0.80913,0.256051,0.370829,0.721341,0.710705,0.397317,0.105916,0.327726,0.935933,0.454446,0.628552,0.4968,0.830267,0.133128,0.169372,0.86441,0.414439,0.184015,0.588145,0.871931,0.195525,0.0952848,0.696146,0.14778,0.483847,0.861494,0.414486,0.973881,0.717515,0.191751,0.204659,0.526645,0.447802,0.575488,0.247986,0.158507,0.972805,0.353902,0.486233,0.908738,0.808348,0.114786,0.405538,0.638615,0.247914,0.57491,0.503025,0.662353,0.758925,0.0911701,0.534283,0.95445,0.186455,0.23043,0.10223,0.670302,0.091924,0.516717,0.644183,0.809439,0.708468,0.848842,0.336083,0.15627,0.42433,0.58407,0.314777,0.397135,0.937972,0.80101,0.305873,0.74632,0.915795,0.711411,0.384935,0.163709,0.28632,0.88796,0.826062,0.0452454,0.97913,0.360346,0.999696,0.165585,0.590775,0.101926,0.835887,0.682699,0.618643,0.48007,0.492138,0.32711,0.328912,0.828221,0.48338,0.753242,0.412291,0.798157,0.150377,0.350263,0.599167,0.45625,0.0965826,0.514962,0.16766,0.481518,0.678672,0.453981,0.369478,0.504734,0.499226,0.348608,0.86508,0.498922,0.514194,0.455855,0.600848,0.350081,0.138554,0.219491,0.83015,0.630692,0.546601,0.159062,0.458913,0.0299815,0.912304,0.871205,0.828139,0.0626813,0.221467,0.427305,0.518931,0.31805,0.942268,0.686591,0.799568,0.62094,0.140572,0.169045,0.125673,0.639798,0.517654,0.990753,0.13872,0.0318471,0.446608,0.739568,0.381928,0.585162,0.959058,0.212078,0.215854,0.50566,0.37114,0.674768,0.535641,0.283445,0.545972,0.36378,0.346126,0.76744,0.791085,0.865057,0.0854896,0.733353,0.551648,0.885057,0.354292,0.69222,0.0541025,0.479966,0.332018,0.571756,0.470719,0.470738,0.603603,0.917327,0.210306,0.985531,0.502489,0.169365,0.197609,0.718343,0.675024,0.568749,0.393111,0.210665,0.852194,0.939083,0.574445,0.19832,0.706523,0.36553,0.0633767,0.792012,0.0988831,0.615025,0.677069,0.453176,0.307245,0.731172,0.933141,0.639264,0.302928,0.40386,0.110002,0.906531,0.321187,0.320308,0.892062,0.823676,0.489673,0.0896708,0.542019,0.164697,0.65842,0.93513,0.375362,0.510614,0.874213,0.949807,0.708934,0.580735,0.315337,0.772311,0.372748,0.414221,0.387336,0.049817,0.867396,0.694581,0.780989,0.800538,0.333844,0.0839169,0.204398,0.443846,0.990448,0.525585,0.764155,0.88251,0.349261,0.253827,0.972181,0.89128,0.418524,0.630601,0.826409,0.793887,0.141215,0.700622,0.743694,0.850149,0.281357,0.0590315,0.622459,0.654105,0.473252,0.00979468,0.703921,0.340648,0.704376,0.48491,0.141186,0.03822,0.568827,0.345583,0.482066,0.559275,0.871168,0.246221,0.441785,0.220429,0.500048,0.413966,0.111709,0.918573,0.0445671,0.938118,0.712459,0.185782,0.63874,0.456153,0.0359305,0.920097,0.515185,0.65839,0.574201,0.988437,0.668184,0.278123,0.329085,0.37256,0.763033,0.47027,0.41078,0.331861,0.815854,0.892846,0.891136,0.687022,0.139067,0.332921,0.907451,0.639116,0.746888,0.0191599,0.557688,0.791455,0.957278,0.270147,0.977237,0.596017,0.726301,0.013167,0.516114,0.241485,0.671557,0.0903151,0.229922,0.339741,0.368438,0.559007,0.712301,0.131471,0.0292774,0.12308,0.463331,0.845131,0.0159267,0.354467,0.532154,0.154994,0.687389,0.439605,0.794109,0.434276,0.458765,0.351797,0.225731,0.416042,0.621945,0.202968,0.0120595,0.348245,0.216134,0.528173,0.589731,0.887691,0.618488,0.819653,0.227432,0.986926,0.37866,0.939733,0.118397,0.407937,0.0628131,0.581729,0.253068,0.0787398,0.936196,0.785222,0.233734,0.623585,0.224827,0.027843,0.0578614,0.683591,0.37964,0.283592,0.0996335,0.00158508,0.48656,0.111693,0.34983,0.702694,0.639866,0.939561,0.590385,0.258355,0.759214,0.817817,0.245281,0.137873,0.75755,0.363679,0.54581,0.820363,0.945407,0.798878,0.899103,0.881604,0.5841,0.132837,0.505189,0.808926,0.16068,0.56305,0.492518,0.54032,0.846643,0.592151,0.541905,0.333202,0.703844,0.891735,0.0358969,0.343711,0.831296,0.626282,0.602066,0.59051,0.4441,0.847347,0.728383,0.20165,0.211025,0.274193,0.0220132,0.156433,0.0730712,0.921116,0.0380362,0.657171,0.0539527,0.543225,0.466098,0.214632,0.106275,0.958615,0.754952,0.952918,0.550766,0.296857,0.28612,0.254611,0.188593,0.322017,0.598321,0.0198888,0.948299,0.200387,0.610399,0.392399,0.0477334,0.338782,0.594049,0.258759,0.612974,0.616062,0.415191,0.686046,0.537178,0.453228,0.343217,0.591131,0.996452,0.809315,0.805763,0.102727,0.76793,0.560715,0.055645,0.318696,0.857573,0.341765,0.573307,0.0461652,0.663782,0.171628,0.066054,0.612081,0.372015,0.676453,0.0044803,0.419748,0.0152341,0.598529,0.678507,0.628209,0.214592,0.0936982,0.314254,0.75177,0.546926,0.657471,0.342901,0.543378,0.466786,0.148664,0.646106,0.234716,0.70938,0.701751,0.553412,0.566953,0.0435155,0.126719,0.613118,0.707297,0.298347,0.679172,0.319379,0.670362,0.355624,0.323859,0.0901099,0.370858,0.922388,0.768617,0.999067,0.13698,0.862315,0.313321,0.88875,0.409241,0.970792,0.231651,0.952619,0.437578,0.380315,0.598724,0.672293,0.0896951,0.300475,0.225705,0.656648,0.34399,0.352424,0.269765,0.0512878,0.650771,0.948937,0.370666,0.321133,0.304561,0.694525,0.411243,0.67542,0.616914,0.17986,0.674487,0.753893,0.0421745,0.987808,0.642643,0.451415,0.9586,0.874294,0.404034,0.396178,0.254609,0.00275822,0.0684713,0.344305,0.303233,0.294177,0.000952164,0.647224,0.646601,0.270718,0.698511,0.297372,0.219655,0.0691778,0.618505,0.524216,0.763703,0.0297474,0.199635,0.380617,0.209607,0.874122,0.13451,0.251781,0.86193,0.777153,0.703197,0.820529,0.651448,0.10723,0.216707,0.906057,0.109989,0.285179,0.250362,0.413222,0.579355,0.251314,0.0604453,0.225956,0.522031,0.758957,0.523328,0.741686,0.828134,0.141832,0.265902,0.591838,0.17158,0.465537,0.972454,0.381187,0.339659,0.106964,0.632968,0.201589,0.884118,0.336165,0.0221182,0.535565,0.443395,0.238825,0.441622,0.553384,0.524004,0.691984,0.966605,0.103359,0.943298,0.0270507,0.329315,0.465329,0.786007,0.852643,0.207015,0.614142,0.994475,0.472916,0.205979,0.166055,0.938453,0.178434,0.547242,0.278112,0.285398,0.18021,0.479701,0.169516,0.516374,0.501819,0.705081,0.959769,0.740645,0.146703,0.513153,0.264649,0.838687,0.479759,0.368008,0.781985,0.506809,0.697323,0.247313,0.292817,0.549966,0.454328,0.906958,0.544441,0.927244,0.112938,0.710495,0.865697,0.291371,0.257737,0.14381,0.576769,0.437947,0.623511,0.746285,0.954321,0.125331,0.451366,0.91409,0.865975,0.598069,0.427244,0.130624,0.436756,0.907002,0.498632,0.218741,0.413811,0.195955,0.466054,0.706628,0.745921,0.920382,0.613586,0.290361,0.847626,0.726524,0.000856793,0.713324,0.0178958,0.258594,0.857134,0.594665,0.69654,0.480645,0.34095,0.650861,0.605975,0.792316,0.564952,0.47195,0.390386,0.992195,0.602575,0.827142,0.899197,0.101207,0.045883,0.313008,0.297162,0.511937,0.0196364,0.0430831,0.432319,0.633223,0.333445,0.279946,0.359747,0.334301,0.993269,0.377643,0.592895,0.850403,0.972308,0.289435,0.331047,0.313258,0.940296,0.937022,0.105575,0.505248,0.408973,0.49596,0.497443,0.0115476,0.323102,0.39664,0.112755,0.368985,0.709648,0.409917,0.880922,0.729285,0.453,0.313242,0.362508,0.786444,0.593188,0.722255,0.120746,0.586457,0.0998976,0.713641,0.436859,0.0722057,0.00307608,0.767907,0.385464,0.943372,0.704929,0.491039,0.44862,0.113902,0.986999,0.946063,0.125449,0.310101,0.342703,0.238204,0.679087,0.0523519,0.648121,0.560009,0.781637,0.101121,0.873251,0.144144,0.887565,0.466439,0.866399,0.00831128,0.0528953,0.966297,0.721952,0.489755,0.0385024,0.725028,0.257662,0.423966,0.668401,0.962591,0.915005,0.117021,0.0764926,0.902004,0.0630846,0.201942,0.212106,0.405788,0.440146,0.891192,0.45814,0.0882672,0.451202,0.239777,0.189388,0.324453,0.383921,0.0769535,0.790891,0.25032,0.0852648,0.843786,0.216617,0.807217,0.333541,0.255119,0.532245,0.591203,0.679085,0.200646,0.553794,0.594091,0.317667,0.630286,0.496095,0.380752,0.832228,0.708201,0.78654,0.272374,0.599394,0.24468,0.360642,0.0505952,0.484456,0.55003,0.375048,0.868377,0.626983,0.165939,0.118697,0.712248,0.00972536,0.335314,0.519465,0.343267,0.590433,0.0517102,0.934469,0.269518,0.252356,0.488263,0.863609,0.570023,0.118549,0.359704,0.950775,0.950777,0.0679053,0.737314,0.223152,0.667299,0.981994,0.583794,0.717894,0.46645,0.133823,0.0929419,0.334827,0.760807,0.258881,0.453524,0.473055,0.268606,0.788838,0.99252,0.611873,0.37927,0.0442301,0.546342,0.648788,0.296586,0.0346053,0.512397,0.866609,0.153154,0.872102,0.817384,0.103932,0.940007,0.554698,0.327084,0.607306,0.536692,0.910878,0.3252,0.00314147,0.044701,0.418142,0.337968,0.805508,0.677023,0.791492,0.278563,0.945629,0.58033,0.271082,0.557501,0.9596,0.315313,0.103844,0.608389,0.611899,0.138449,0.120786,0.478508,0.291603,0.992888,0.295892,0.395535,0.932895,0.85059,0.722619,0.5402,0.387281,0.633497,0.8654,0.390423,0.678198,0.283542,0.728391,0.483706,0.960565,0.519884,0.762268,0.906193,0.100214,0.0333506,0.463695,0.059814,0.348663,0.567538,0.668203,0.960562,0.705987,0.788989,0.43907,0.997591,0.781877,0.734961,0.393126,0.714771,0.585551,0.115746,0.254972,0.972832,0.749243,0.120372,0.363255,0.427441,0.403914,0.0916457,0.911146,0.364479,0.611529,0.673415,0.270672,0.711743,0.706765,0.734367,0.771557,0.0554283,0.301905,0.43976,0.0159901,0.0078925,0.228749,0.45506,0.00548334,0.0106253,0.190021,0.39861,0.725397,0.775572,0.514355,0.980368,0.748404,0.263598,0.10074,0.111658,0.691039,0.504654,0.203304,0.602185,0.869133,0.814833,0.275599,0.139805,0.526576,0.982365,0.874172,0.298133,0.0377929,0.176077,0.737893,0.0537829,0.183969,0.966641,0.508843,0.189453,0.977267,0.698864,0.588062,0.702663,0.474435,0.102418,0.683032,0.222839,0.366015,0.783772,0.334498,0.0570541,0.288426,0.537802,0.659239,0.157559,0.352635,0.934838,0.297364,0.879212,0.917203,0.171536,0.177345,0.954996,0.347613,0.915238,0.00877867,0.531582,0.881879,0.517621,0.721035,0.859146,0.216485,0.309097,0.561809,0.69092,0.411515,0.244841,0.91376,0.77753,0.0286126,0.248258,0.834584,0.317039,0.78606,0.493823,0.474598,0.138695,0.428662,0.771963,0.0179067,0.345865,0.943499,0.195252,0.30086,0.291112,0.110489,0.309639,0.822694,0.992368,0.82726,0.543729,0.851514,0.0437453,0.852826,0.413323,0.734666,0.264341,0.658163,0.648426,0.0418717,0.686776,0.896683,0.876456,0.00381478,0.682743,0.37028,0.478413,0.821438,0.798941,0.250376,0.839345,0.144806,0.193874,0.034596,0.445666,0.484986,0.145085,0.755305,0.30768,0.137453,0.582566,0.851409,0.988967,0.626311,0.704235,0.40229,0.360977,0.968577,0.060453,0.00940229,0.0104483,0.747229,0.906085,0.886905,0.751044,0.588828,0.257184,0.229457,0.410266,0.0561255,0.479832,0.24961,0.200931,0.673706,0.284206,0.646598,0.158692,0.429292,0.401903,0.466372,0.566745,0.984468,0.317781,0.555712,0.610779,0.0220164,0.958001,0.971756,0.990593,0.0184543,0.981158,0.00104131,0.765683,0.887244,0.887946,0.516727,0.476072,0.14513,0.746183,0.886338,0.201256,0.226016,0.135948,0.402187,0.899722,0.420155,0.0487847,0.0584144,0.849446,0.450688,0.524787,0.416191,0.435156,0.842568,0.971903,0.0459354,0.864584,0.929904,0.0176915,0.855177,0.948358,0.99885,0.856219,0.714042,0.886094,0.744164,0.230768,0.362166,0.889294,0.976952,0.248504,0.0905499,0.202967,0.384452,0.492737,0.102689,0.804607,0.541521,0.161104,0.654053,0.992209,0.68589,0.0702442,0.427365,0.528458,0.042147,0.473301,0.393042,0.972051,0.490992,0.24822,0.92041,0.489842,0.104438,0.634451,0.375935,0.848602,0.865219,0.738101,0.737897,0.842171,0.986604,0.828447,0.0451384,0.371056,0.321183,0.147828,0.175663,0.862705,0.308931,0.829716,0.854914,0.994822,0.89996,0.282279,0.52328,0.942107,0.75558,0.916322,0.914158,0.246572,0.164542,0.834568,0.736413,0.26898,0.469019,0.112349,0.117582,0.334238,0.85045,0.855479,0.17641,0.837054,0.683926,0.221548,0.208111,0.00510916,0.369376,0.383774,0.867814,0.678307,0.213489,0.722728,0.673129,0.11345,0.00500733,0.196409,0.0555568,0.760587,0.112731,0.969715,0.00715868,0.277273,0.804283,0.743572,0.546253,0.273302,0.855921,0.663835,0.607541,0.706371,0.519314,0.78395,0.543425,0.20324,0.00549801,0.751535,0.208349,0.374874,0.135309,0.0761631,0.0531807,0.348798,0.798891,0.72631,0.462248,0.803899,0.922718,0.517805,0.564486,0.0354494,0.48752,0.571644,0.312722,0.291803,0.315216,0.858975,0.565105,0.171137,0.52281,0.172646,0.877508,0.0421245,0.956596,0.420933,0.245364,0.962094,0.172468,0.453713,0.336967,0.307777,0.529876,0.390148,0.656576,0.328768,0.116458,0.118824,0.132666,0.039176,0.636628,0.697152,0.0746255,0.124148,0.268796,0.387348,0.415951,0.584013,0.246323,0.981057,0.75515,0.769133,0.153702,0.632658,0.811258,0.110298,0.0535911,0.0566223,0.0723922,0.226059,0.510336,0.40936,0.533836,0.0402121,0.799508,0.190412,0.36898,0.915965,0.309236,0.501646,0.955142,0.945864,0.198798,0.029767,0.0700123,0.467595,0.417115,0.485964,0.0516077,0.663438,0.467021,0.806758,0.432571,0.620723,0.439416,0.243829,0.731021,0.493007,0.300452,0.803413,0.719067,0.810787,0.212773,0.252903,0.850999,0.0122809,0.443315,0.219979,0.928246,0.752551,0.721625,0.883388,0.698415,0.920424,0.913155,0.768427,0.388019,0.33027,0.254391,0.439626,0.993708,0.721411,0.246384,0.426279,0.342134,0.6858,0.670108,0.0731555,0.178808,0.97056,0.876569,0.897874,0.781347,0.089342,0.150777,0.632346,0.101623,0.594092,0.852325,0.0298693,0.346643,0.573951,0.913257,0.0450573,0.494375,0.826412,0.813484,0.882393,0.156682,0.0678748,0.322019,0.15039,0.789286,0.568404,0.576669,0.13142,0.254204,0.246777,0.204576,0.433012,0.217337,0.0811446,0.330886,0.998684,0.170487,0.481663,0.631031,0.27211,0.0757552,0.483356,0.301979,0.422398,0.0573069,0.215236,0.467455,0.551681,0.0416483,0.28094,0.434074,0.19833,0.348814,0.756094,0.34872,0.1381,0.324497,0.925389,0.269521,0.578701,0.172166,0.474096,0.0117127,0.389503,0.555241,0.342598,0.388188,0.725727,0.824261,0.0192185,0.997837,0.900017,0.502575,0.299816,0.322415,0.559882,0.515052,0.78987,0.111563,0.5567,0.0708096,0.545637,0.755031,0.419624,0.301731,0.103751,0.557724,0.626229,0.0291397,0.827245,0.20493,0.201306,0.301341,0.216642,0.590809,0.856582,0.559241,0.978997,0.58231,0.383502,0.998216,0.580147,0.283519,0.50079,0.879963,0.605934,0.0606717,0.395015,0.395804,0.172235,0.951715,0.466613,0.717872,0.706746,0.886238,0.0196033,0.810497,0.443962,0.645832,0.839636,0.271207,0.850762,0.0409423,0.572548,0.067404,0.631752,0.429131,0.626645,0.610749,0.0114404,0.0101474,0.608964,0.591587,0.293666,0.109755,0.47155,0.8996,0.170426,0.866565,0.295404,0.342661,0.81828,0.762017,0.0605329,0.525025,0.648255,0.0801361,0.335522,0.0922169,0.725968,0.175158,0.363424,0.576729,0.216101,0.935972,0.644134,0.847852,0.365103,0.270779,0.458601,0.376543,0.280926,0.0675653,0.96813,0.574592,0.17732,0.43968,0.474192,0.347746,0.306245,0.769596,0.690407,0.124524,0.531614,0.75094,0.64955,0.179869,0.831076,0.985072,0.272086,0.557044,0.16023,0.63551,0.133773,0.376331,0.571482,0.777907,0.224183,0.936585,0.0486854,0.682784,0.313128,0.329611,0.750349,0.281259,0.904204,0.927669,0.720939,0.378396,0.275415,0.0271838,0.147992,0.965822,0.151708,0.679606,0.716762,0.801258,0.859475,0.547838,0.78633,0.131561,0.104882,0.94656,0.767071,0.238655,0.322891,0.338552,0.0165623,0.547074,0.275137,0.0652476,0.229858,0.588266,0.394859,0.980208,0.869524,0.299062,0.907877,0.590463,0.677458,0.183292,0.617647,0.825451,0.149115,0.769355,0.505057,0.865877,0.570613,0.364532,0.413715,0.356943,0.496093,0.518597,0.303503,0.263164,0.757252,0.626394,0.601716,0.773815,0.173468,0.876853,0.839062,0.403326,0.465119,0.233921,0.383533,0.334643,0.532984,0.29141,0.925107,0.210442,0.474702,0.542754,0.0358929,0.623817,0.312109,0.54095,0.489694,0.882722,0.905482,0.903409,0.239665,0.401575,0.422006,0.543168,0.664739,0.179258,0.169562,0.266455,0.953072,0.34303,0.143308,0.792135,0.746355,0.608427,0.0260558,0.129889,0.943071,0.559039,0.421299,0.868177,0.769481,0.896001,0.410931,0.805374,0.519818,0.723039,0.346324,0.00951167,0.605761,0.251806,0.91292,0.845426,0.653382,0.334926,0.388594,0.318121,0.514184,0.558156,0.584575,0.467256,0.901185,0.727884,0.259391,0.647541,0.336311,0.285447,0.777429,0.279382,0.844486,0.198728,0.147559,0.613967,0.0947291,0.558489,0.419342,0.614547,0.281529,0.765666,0.624059,0.88729,0.0174723,0.536979,0.732716,0.670854,0.871905,0.12131,0.988975,0.386089,0.679466,0.57355,0.853345,0.580652,0.301434,0.112735,0.228193,0.637745,0.398182,0.0056219,0.917126,0.242668,0.20435,0.064685,0.856635,0.299079,0.623174,0.275977,0.913626,0.904703,0.0416425,0.537685,0.791993,0.0591148,0.0746638,0.524709,0.729969,0.946569,0.64602,0.718943,0.332657,0.325486,0.292493,0.186002,0.906138,0.593927,0.298737,0.13433,0.231671,0.696919,0.139952,0.148798,0.939587,0.344302,0.213483,0.796222,0.643381,0.836657,0.0721985,0.557007,0.74136,0.113841,0.094692,0.533353,0.172956,0.169356,0.0580626,0.902925,0.115924,0.704082,0.621868,0.448581,0.0295683,0.914361,0.634583,0.935706,0.508288,0.93332,0.0700364,0.739959,0.630239,0.209989,0.888757,0.569826,0.554291,0.102239,0.366048,0.197672,0.938896,0.438247,0.754679,0.680257,0.552088,0.849371,0.21361,0.725043,0.018727,0.271673,0.627968,0.134651,0.975755,0.249836,0.583233,0.00532318,0.164197,0.217816,0.941029,0.672485,0.151136,0.0110657,0.412444,0.781376,0.221054,0.301201,0.351202,0.775345,0.40344,0.71725,0.973017,0.342337,0.155497,0.727696,0.0225934,0.707585,0.577068,0.236203,0.432628,0.595795,0.507876,0.0605962,0.730446,0.483631,0.310432,0.313679,0.488954,0.474629,0.531495,0.429983,0.147114,0.682631,0.441049,0.559558,0.464007,0.662103,0.860759,0.815209,0.437449,0.264199,0.532459,0.410466,0.606536,0.687956,0.138162,0.629129,0.395541,0.71523,0.865333,0.828169,0.311025,0.373209,0.888766,0.0414706,0.856839,0.199198,0.355149,0.345793,0.673827,0.886644,0.775776,0.820941,0.569276,0.216825,0.380499,0.0332826,0.878929,0.241258,0.848492,0.316377,0.505457,0.380951,0.726843,0.111993,0.0689077,0.865005,0.741122,0.464449,0.580235,0.606455,0.292618,0.89126,0.979663,0.181384,0.93273,0.836502,0.380581,0.28788,0.182296,0.0544081,0.174524,0.958072,0.875349,0.7438,0.174897,0.255848,0.777082,0.0538262,0.497105,0.625574,0.370203,0.00256172,0.00652534,0.0970464,0.114554,0.075433,0.962052,0.855676,0.539882,0.542287,0.462131,0.8325,0.433546,0.441794,0.0138836,0.366277,0.278296,0.394465,0.654157,0.460592,0.448873,0.828681,0.418664,0.324222,0.57248,0.593561,0.58007,0.349563,0.647388,0.0771747,0.975137,0.0175909,0.0797364,0.981662,0.114637,0.194291,0.0570952,0.0766888,0.0499668,0.596977,0.618976,0.512097,0.429477,0.0525219,0.953891,0.443361,0.418799,0.232187,0.837826,0.0729553,0.692779,0.286699,0.901636,0.111443,0.610921,0.474117,0.705004,0.19099,0.823679,0.352392,0.268165,0.798816,0.369983,0.347902,0.780478,0.48462,0.542192,0.837573,0.561309,0.592159,0.434551,0.180285,0.104256,0.864028,0.232806,0.0581475,0.307388,0.651605,0.290335,0.145214,0.72456,0.983114,0.431913,0.626196,0.0945568,0.0428336,0.100313,0.799561,0.233824,0.923992,0.151953,0.501989,0.722808,0.521936,0.849891,0.503287,0.0065561,0.392083,0.34086,0.567865,0.984242,0.775411,0.74815,0.0884986,0.639438,0.980956,0.146646,0.946827,0.632561,0.436981,0.0920406,0.357121,0.420095,0.523953,0.983318,0.514652,0.566787,0.0836309,0.314213,0.800611,0.0076232,0.466166,0.3026,0.730432,0.988102,0.152491,0.233718,0.994658,0.544574,0.574578,0.562523,0.528817,0.349989,0.310673,0.617315,0.989427,0.291629,0.763961,0.936254,0.92419,0.200942,0.0282944,0.281311,0.621037,0.552248,0.264629,0.135689,0.119035,0.34826,0.449901,0.919646,0.355883,0.916067,0.222246,0.086315,0.904169,0.374738,0.320033,0.898827,0.919312,0.894611,0.46135,0.448129,0.2446,0.772023,0.0654438,0.234028,0.0636517,0.829405,0.170282,0.987841,0.0303472,0.198576,0.269153,0.651384,0.750824,0.533782,0.787073,0.869859,0.882042,0.236974,0.789505,0.237926,0.153042,0.0117514,0.324241,0.0572109,0.386489,0.644274,0.956038,0.305801,0.538885,0.417388,0.75393,0.783486,0.189411,0.819374,0.0175134,0.253063,0.648779,0.187795,0.240905,0.679126,0.386371,0.510057,0.33051,0.137195,0.0438394,0.117583,0.00705367,0.925882,0.354557,0.796559,0.163807,0.507599,0.80831,0.488048,0.56481,0.194799,0.132322,0.520848,0.5006,0.671207,0.938236,0.25453,0.454693,0.127648,0.0739036,0.472206,0.380711,0.722682,0.660001,0.621615,0.401808,0.0463719,0.131673,0.732318,0.183567,0.175512,0.849901,0.19062,0.101394,0.204458,0.987179,0.265201,0.712057,0.795489,0.753249,0.276867,0.990288,0.885571,0.797715,0.490888,0.556777,0.735951,0.745418,0.0114699,0.863599,0.819322,0.483676,0.24431,0.542004,0.143677,0.865926,0.943812,0.190049,0.997598,0.67613,0.373616,0.173111,0.526031,0.564236,0.274505,0.730489,0.551415,0.539706,0.442547,0.346904,0.292955,0.719414,0.337192,0.178526,0.517129,0.82808,0.735303,0.25308,0.573498,0.746773,0.116679,0.39282,0.230449,0.360989,0.934824,0.374126,0.226915,0.878636,0.564174,0.224513,0.554766,0.93779,0.397624,0.0807974,0.502026,0.672129,0.811287,0.0534412,0.211834,0.253833,0.400345,0.504789,0.973247,0.737537,0.683315,0.490376,0.565618,0.418618,0.743456,0.139116,0.165391,0.860135,0.531936,0.39584,0.221124,0.466761,0.769966,0.448039,0.345397,0.33414,0.672552,0.900163,0.27193,0.0701761,0.980961,0.773956,0.742305,0.792247,0.827397,0.954139,0.0460808,0.227743,0.458929,0.0193278,0.96528,0.142244,0.509703,0.530898,0.560862,0.253159,0.670014,0.726253,0.113294,0.20195,0.122093,0.334418,0.668711,0.892059,0.782457,0.0141077,0.226199,0.455009,0.914271,0.498129,0.525185,0.895232,0.272085,0.26749,0.687479,0.0994829,0.221629,0.73356,0.327226,0.680558,0.752888,0.292506,0.822802,0.262591,0.823403,0.383663,0.51575,0.493417,0.109916,0.629044,0.695367,0.232009,0.963463,0.364078,0.124068,0.74592,0.378185,0.350267,0.200929,0.292456,0.848396,0.726115,0.187688,0.120481,0.993605,0.875167,0.219964,0.215234,0.608727,0.54719,0.895792,0.361615,0.839695,0.718593,0.624206,0.663099,0.102257,0.139957,0.156515,0.212173,0.769001,0.851882,0.444182,0.732464,0.21596,0.56825,0.478384,0.594146,0.918517,0.679313,0.886602,0.766913,0.405427,0.0742902,0.887395,0.399032,0.949458,0.107359,0.614266,0.558185,0.654549,0.510058,0.9198,0.494245,0.228651,0.544006,0.157343,0.330907,0.683963,0.313859,0.54308,0.452964,0.165741,0.987262,0.185428,0.381701,0.555513,0.663812,0.975847,0.47403,0.343125,0.862449,0.240944,0.748552,0.936739,0.128338,0.147584,0.886196,0.235698,0.76185,0.444381,0.890247,0.271907,0.364181,0.384492,0.500558,0.908188,0.541835,0.831465,0.592151,0.855693,0.374545,0.0451153,0.0214342,0.361808,0.230544,0.403135,0.91732,0.894355,0.378982,0.391351,0.23748,0.241431,0.632294,0.986032,0.178169,0.760633,0.133615,0.0643659,0.99633,0.895465,0.508747,0.886577,0.167372,0.872928,0.271069,0.66793,0.781116,0.812903,0.499395,0.373267,0.668597,0.87394,0.418382,0.690031,0.235748,0.648925,0.0931661,0.153068,0.543281,0.472148,0.544419,0.780761,0.713579,0.176713,0.766792,0.891748,0.937345,0.900408,0.956114,0.933676,0.795873,0.464861,0.820253,0.963245,0.337789,0.0913219,0.631174,0.118905,0.904225,0.130569,0.492172,0.572822,0.00450908,0.910554,0.262853,0.240257,0.559479,0.356019,0.393325,0.10276,0.828167,0.937743,0.88352,0.541746,0.114456,0.650313,0.433494,0.0518016,0.550721,0.389608,0.985477,0.346593,0.854469,0.805731,0.309838,0.192258,0.897052,0.941013,0.311163,0.801278,0.0715819,0.803335,0.3741,0.076091,0.713889,0.636953,0.316348,0.273368,0.992972,0.709672,0.376127,0.821139,0.647416,0.259648,0.362885,0.761872,0.909961,0.796378,0.813673,0.460682,0.185986,0.799151,0.807275,0.040455,0.604881,0.117113,0.232713,0.501934,0.058126,0.543876,0.303211,0.129708,0.347211,0.677311,0.205799,0.0610999,0.314264,0.522147,0.334468,0.307236,0.231819,0.710595,0.128375,0.879234,0.970243,0.491259,0.641106,0.880204,0.287638,0.454779,0.340885,0.473624,0.25393,0.14816,0.514079,0.858811,0.265274,0.746792,0.360745,0.3234,0.290668,0.663956,0.453108,0.63788,0.341268,0.658907,0.69898,0.655532,0.181053,0.0334473,0.962768,0.412872,0.744042,0.0911424,0.292106,0.714285,0.582402,0.933212,0.594489,0.87004,0.387992,0.935374,0.343664,0.641922,0.0835347,0.857743,0.500733,0.348808,0.604535,0.861478,0.672208,0.895204,0.525434,0.125316,0.533083,0.866701,0.784222,0.232063,0.522233,0.965275,0.265511,0.485,0.378147,0.00955287,0.576143,0.670253,0.723838,0.158545,0.603465,0.318327,0.0285846,0.991457,0.253702,0.372248,0.633379,0.337236,0.229991,0.134111,0.686045,0.834527,0.995589,0.358253,0.72973,0.521023,0.483568,0.262814,0.387724,0.267791,0.494877,0.909957,0.233066,0.760387,0.394957,0.611213,0.76994,0.9711,0.281466,0.493778,0.129645,0.884931,0.812106,0.15823,0.876388,0.0658072,0.530478,0.509767,0.403044,0.76047,0.643878,0.0890881,0.594996,0.639467,0.447341,0.324726,0.16049,0.930909,0.58754,0.548214,0.1987,0.082417,0.45817,0.431766,0.842804,0.853128,0.0429784,0.612745,0.824228,0.324444,0.106523,0.953873,0.209376,0.918629,0.112103,0.0857643,0.984436,0.642581,0.595531,0.38748,0.40305,0.239409,0.476568,0.998046,0.878877,0.923909,0.322773,0.0393661,0.854818,0.910313,0.58758,0.0535176,0.99273,0.0457501,0.485283,0.835534,0.898878,0.528262,0.448279,0.723106,0.852706,0.554802,0.676979,0.0620819,0.473431,0.789082,0.147846,0.457867,0.431662,0.743377,0.845347,0.834712,0.982787,0.321915,0.832759,0.861663,0.245823,0.155532,0.901029,0.100641,0.0658444,0.488609,0.154159,0.0585743,0.534359,0.639442,0.894109,0.433237,0.167704,0.342388,0.156343,0.0204096,0.89719,0.833322,0.0824915,0.370621,0.622404,0.230338,0.828489,0.0540657,0.973715,0.673835,0.888778,0.956502,0.99575,0.721537,0.818165,0.241573,0.877069,0.719195,0.342214,0.942913,0.207804,0.496373,0.00148724,0.742163,0.135815,0.895596,0.1754,0.303519,0.237984,0.331743,0.323928,0.135174,0.165065,0.40642,0.505795,0.787469,0.636757,0.334283,0.841534,0.610473,0.00811888,0.730313,0.566975,0.00386893,0.451849,0.38514,0.245442,0.328918,0.104335,0.587657,0.271831,0.312139,0.0840297,0.273318,0.0543021,0.219845,0.168914,0.229702,0.523363,0.406898,0.561446,0.847291,0.542071,0.726511,0.253711,0.0478665,0.51398,0.890469,0.38215,0.355514,0.500941,0.390269,0.0858265,0.0679156,0.394138,0.537676,0.453056,0.63958,0.866594,0.55739,0.227237,0.138425,0.869529,0.311266,0.411743,0.923831,0.531111,0.580657,0.153534,0.0544741,0.987555,0.714979,0.901766,0.529626,0.44149,0.155477,0.577493,0.95547,0.0459453,0.959643,0.310984,0.546886,0.349912,0.396811,0.614802,0.744049,0.934487,0.0678576,0.383629,0.801081,0.625248,0.610866,0.939506,0.494777,0.922132,0.351249,0.418609,0.453243,0.931906,0.572142,0.507717,0.919461,0.287122,0.409483,0.449087,0.728612,0.564959,0.0265797,0.684082,0.610905,0.986223,0.995066,0.157791,0.336134,0.391877,0.772593,0.0801833,0.326363,0.840451,0.463813,0.127444,0.465699,0.0746784,0.0669493,0.960476,0.99681,0.418198,0.379085,0.450053,0.350104,0.951227,0.95777,0.269564,0.238349,0.367253,0.718651,0.966961,0.932212,0.745231,0.651043,0.543117,0.731453,0.646109,0.700908,0.0675874,0.0379858,0.473501,0.147771,0.364349,0.313952,0.611583,0.491793,0.77965,0.686262,0.558742,0.740126,0.683072,0.97694,0.119211,0.133125,0.327043,0.0704382,0.0908957,0.596608,0.308787,0.458149,0.315259,0.275748,0.390361,0.0604896,0.926791,0.933478,0.791943,0.5729,0.634385,0.85953,0.610886,0.107886,0.00730106,0.975235,0.421838,0.618884,0.467028,0.201488,0.305146,0.0257696,0.941615,0.988218,0.00270948,0.0608261,0.121344,0.329753,0.131264,0.212239,0.926361,0.440051,0.670388,0.241619,0.715799,0.0607487,0.302109,0.64259,0.994226,0.0940519,0.21549,0.628612,0.953582,0.826376,0.736498,0.960883,0.801611,0.158336,0.579768,0.268639,0.359825,0.884914,0.294408,0.30144,0.873132,0.297118,0.362266,0.994475,0.626871,0.49353,0.206715,0.553231,0.933581,0.877102,0.794851,0.64938,0.937851,0.0969595,0.29197,0.932077,0.191011,0.507461,0.560689,0.144594,0.333837,0.297187,0.105477,0.135448,0.455523,0.685245,0.404086,0.815348,0.570158,0.698495,0.116787,0.44329,0.995612,0.479053,0.437766,0.622483,0.972583,0.64448,0.175714,0.906164,0.521583,0.970565,0.555545,0.459434,0.0675243,0.847515,0.391512,0.258536,0.354976,0.952201,0.403129,0.688813,0.249388,0.508607,0.824261,0.704911,0.193851,0.228347,0.520259,0.76401,0.926842,0.637047,0.2073,0.922454,0.1161,0.645066,0.544937,0.0886831,0.289546,0.720651,0.994847,0.811129,0.691216,0.550392,0.270563,0.75874,0.397907,0.662075,0.0172761,0.752883,0.614276,0.420406,0.441696,0.863664,0.929012,0.265957,0.568575,0.122864,0.494304,0.0888345,0.886873,0.421145,0.725881,0.0941735,0.343599,0.841981,0.739239,0.888537,0.930664,0.0287857,0.609188,0.925512,0.839915,0.300404,0.475904,0.110478,0.0591445,0.873811,0.772553,0.0764206,0.626694,0.386829,0.496826,0.0683903,0.250492,0.425838,0.334347,0.819068,0.548702,0.828651,0.907902,0.435575,0.249796,0.633783,0.529749,0.593395,0.475765,0.268988,0.481932,0.406429,0.297774,0.09112,0.331941,0.137689,0.391524,0.807845,0.248167,0.450669,0.681656,0.02072,0.527089,0.308351,0.407549,0.0239154,0.376741,0.658041,0.449754,0.711088,0.477109,0.998456,0.539738,0.385011,0.434031,0.789534,0.0187942,0.96378,0.38293,0.494559,0.232768,0.864862,0.900988,0.530542,0.955982,0.232929,0.668231,0.347506,0.0407745,0.916398,0.798174,0.722431,0.937118,0.325264,0.0307813,0.344666,0.349179,0.407522,0.00270744,0.798933,0.11861,0.479816,0.797388,0.658348,0.864827,0.231419,0.447883,0.883621,0.195199,0.830812,0.37818,0.427967,0.695674,0.279169,0.958509,0.651656,0.512098,0.626739,0.999162,0.552872,0.543137,0.797336,0.275303,0.480254,0.1226,0.306084,0.824921,0.471779,0.713607,0.827628,0.270712,0.832217,0.307444,0.0681,0.490565,0.172271,0.299519,0.938447,0.0558922,0.494718,0.76926,0.434072,0.922685,0.464934,0.713241,0.881194,0.116589,0.225339,0.507933,0.115751,0.778211,0.05107,0.913087,0.0535145,0.531324,0.0356874,0.359599,0.356245,0.507466,0.0732055,0.183873,0.778178,0.905422,0.491318,0.846278,0.395987,0.663589,0.145797,0.334434,0.719481,0.640516,0.103694,0.153553,0.563201,0.568627,0.866794,0.444395,0.685217,0.0921331,0.952328,0.800968,0.870344,0.00339806,0.714056,0.923859,0.534723,0.749743,0.283458,0.890968,0.257209,0.356663,0.0748412,0.035387,0.262085,0.566159,0.881665,0.658072,0.229748,0.027462,0.992506,0.949229,0.667978,0.0961998,0.102782,0.231179,0.664827,0.969577,0.675573,0.350044,0.0617098,0.627901,0.151012,0.932054,0.631299,0.865068,0.855913,0.166022,0.614811,0.139371,0.0569898,0.87202,0.496034,0.131831,0.907407,0.758119,0.69799,0.789072,0.416191,0.927738,0.816534,0.408697,0.876967,0.484511,0.504897,0.979749,0.71569,0.169724,0.949326,0.391263,0.519768,0.0110356,0.0191647,0.67078,0.94309,0.650464,0.535848,0.799003,0.816486,0.150659,0.938374,0.873476,0.0226785,0.434408,0.00530696,0.930085,0.192527,0.703297,0.719157,0.608719,0.631035,0.535691,0.0174162,0.508001,0.0202021,0.522313,0.48775,0.735892,0.692038,0.437076,0.127155,0.211806,0.448112,0.14632,0.882587,0.391202,0.796784,0.418435,0.190205,0.613271,0.569093,0.128578,0.486747,0.591772,0.562986,0.492054,0.521857,0.755514,0.19535,0.241014,0.364232,0.826385,0.776705,0.381649,0.334386,0.796907,0.903962,0.822137,0.532799,0.596,0.259213,0.659954,0.807806,0.707324,0.806275,0.690393,0.098526,0.603059,0.108827,0.288731,0.216329,0.67792,0.417309,0.703076,0.269692,0.980295,0.19513,0.791549,0.735809,0.39048,0.0325636,0.100042,0.216865,0.809269,0.48169,0.551251,0.606176,0.385652,0.373388,0.138975,0.981652,0.632601,0.798929,0.789458,0.339925,0.605204,0.479851,0.438451,0.208263,0.588678,0.727182,0.424592,0.266598,0.144491,0.127668,0.53629,0.124786,0.322798,0.32784,0.860595,0.713278,0.360403,0.960637,0.930143,0.169672,0.442327,0.481394,0.775848,0.82798,0.854782,0.914823,0.809632,0.487383,0.713752,0.59909,0.827308,0.318956,0.0789411,0.265759,0.527219,0.667619,0.99294,0.951811,0.934217,0.137431,0.0794797,0.470508,0.262217,0.402278,0.798348,0.122812,0.115556,0.158751,0.0834492,0.0456986,0.328423,0.525776,0.527093,0.104271,0.353756,0.381875,0.0190939,0.163388,0.869258,0.732846,0.762478,0.696565,0.0518025,0.841419,0.962324,0.579022,0.509038,0.955264,0.530833,0.443256,0.0926953,0.610313,0.913763,0.354912,0.0125903,0.712111,0.477725,0.128146,0.870862,0.561174,0.173845,0.199285,0.0869502,0.700938,0.303556,0.440706,0.0828125,0.32265,0.604094,0.95207,0.0554961,0.366573,0.648636,0.107299,0.207992,0.61096,0.68632,0.71703,0.566224,0.217153,0.160286,0.658919,0.827466,0.0740495,0.0138314,0.840056,0.78616,0.491556,0.968202,0.657022,0.0527297,0.142047,0.856307,0.13968,0.842984,0.159863,0.580386,0.925797,0.482513,0.184481,0.877867,0.538009,0.551053,0.526503,0.645308,0.759045,0.137462,0.331628,0.476076,0.703686,0.548781,0.636362,0.362605,0.376247,0.710412,0.376436,0.216303,0.496572,0.867992,0.184505,0.153594,0.920722,0.326552,0.00990141,0.060402,0.169536,0.169765,0.640788,0.0953331,0.652278,0.825269,0.9732,0.190287,0.376322,0.499703,0.835595,0.135368,0.637165,0.167223,0.611444,0.340851,0.716004,0.247806,0.703456,0.0922508,0.958217,0.0798921,0.308554,0.454789,0.947885,0.493059,0.608383,0.868607,0.819611,0.618285,0.929009,0.989147,0.788049,0.569797,0.0844802,0.440327,0.395066,0.0576804,0.630614,0.771388,0.557383,0.466209,0.906755,0.194548,0.633432,0.518199,0.535398,0.349436,0.766005,0.238854,0.441687,0.724222,0.318746,0.750241,0.179011,0.266631,0.2433,0.787394,0.135237,0.0629104,0.405678,0.064246,0.0520576,0.193728,0.634043,0.136538,0.634055,0.0291086,0.194218,0.264669,0.800496,0.751601,0.730878,0.707252,0.946149,0.364311,0.225451,0.481547,0.713747,0.991455,0.720401,0.155433,0.715677,0.0391476,0.905674,0.894688,0.305778,0.148974,0.682082,0.441016,0.211884,0.08776,0.505262,0.263942,0.281488,0.139305,0.400479,0.915543,0.168413,0.594698,0.180212,0.96891,0.346299,0.91109,0.676161,0.292448,0.275401,0.901612,0.773995,0.989147,0.893067,0.494397,0.144581,0.608744,0.533544,0.0502549,0.503432,0.839323,0.199229,0.185513,0.280339,0.411113,0.273273,0.7856,0.675054,0.554761,0.924905,0.0755337,0.470303,0.0933182,0.670231,0.650515,0.0622277,0.0165302,0.561606,0.738389,0.308978,0.837006,0.640001,0.0829734,0.826154,0.533068,0.57737,0.970735,0.141812,0.110915,0.0209897,0.645244,0.950237,0.220218,0.830757,0.230576,0.631331,0.10403,0.0161763,0.306385,0.658791,0.941081,0.381919,0.129094,0.0343994,0.05215,0.77961,0.0966272,0.0686802,0.341215,0.835016,0.377658,0.178222,0.475017,0.460632,0.00437554,0.00808517,0.038002,0.97511,0.149897,0.148917,0.9961,0.795141,0.0991542,0.216318,0.625898,0.32973,0.847649,0.729928,0.345906,0.154034,0.388719,0.286988,0.535953,0.517814,0.321387,0.588103,0.297423,0.418014,0.656783,0.638638,0.253031,0.0344416,0.81686,0.728048,0.495073,0.821236,0.736133,0.533075,0.796346,0.88603,0.681992,0.792446,0.681171,0.781146,0.00876439,0.307069,0.110877,0.856414,0.0369971,0.456783,0.0104478,0.425716,0.743771,0.546401,0.94353,0.0651579,0.134504,0.240953,0.483172,0.791287,0.879591,0.736203,0.825729,0.696451,0.46425,0.320802,0.517687,0.200383,0.853877,0.314033,0.0864132,0.535869,0.106479,0.767584,0.317016,0.115244,0.0746533,0.427892,0.971657,0.11165,0.884675,0.982105,0.537367,0.628446,0.528506,0.480896,0.693604,0.66301,0.721849,0.176776,0.454297,0.60144,0.912979,0.280025,0.297892,0.377229,0.600827,0.815579,0.577612,0.454704,0.129612,0.664026,0.990574,0.236091,0.43161,0.30759,0.351335,0.506263,0.735482,0.322992,0.617913,0.620157,0.305097,0.15528,0.248603,0.833603,0.636176,0.942207,0.496612,0.358025,0.118983,0.950909,0.959466,0.0319623,0.230934,0.257358,0.409192,0.831761,0.0729366,0.986804,0.286466,0.202549,0.650829,0.27704,0.43864,0.0824393,0.584629,0.789975,0.588702,0.320111,0.112967,0.206616,0.940269,0.418064,0.361896,0.188872,0.251667,0.998072,0.131079,0.74828,0.356097,0.250063,0.699189,0.315563,0.282025,0.930123,0.572921,0.691217,0.761884,0.645857,0.678021,0.04835,0.848406,0.32885,0.32539,0.287046,0.411289,0.910019,0.0770212,0.999992,0.23013,0.189989,0.206608,0.170399,0.608053,0.568504,0.359271,0.85972,0.566576,0.49035,0.608,0.922673,0.740413,0.307189,0.238237,0.0224378,0.237312,0.811157,0.713654,0.999196,0.457014,0.391675,0.0475465,0.30542,0.720525,0.372936,0.592467,0.131815,0.282955,0.669488,0.131806,0.513085,0.859476,0.338414,0.683484,0.467529,0.906918,0.0427546,0.32725,0.473494,0.533105,0.93525,0.396167,0.273517,0.242439,0.634404,0.295955,0.479751,0.445561,0.00960964,0.478948,0.902576,0.401285,0.526494,0.207996,0.12181,0.89943,0.800462,0.253624,0.182385,0.46995,0.385431,0.69547,0.329427,0.723845,0.378954,0.796956,0.630763,0.421709,0.124206,0.104257,0.954813,0.0594555,0.500424,0.228331,0.301895,0.134828,0.524286,0.781646,0.580389,0.533896,0.260593,0.482965,0.93518,0.787087,0.690961,0.05699,0.686518,0.491423,0.310614,0.868903,0.961374,0.696045,0.564373,0.2908,0.41989,0.943327,0.0877561,0.0506526,0.365036,0.211962,0.15491,0.319849,0.271417,0.655334,0.54818,0.573312,0.790162,0.072466,0.354957,0.370552,0.606362,0.615551,0.853517,0.541542,0.402638,0.544478,0.598532,0.0891558,0.0359011,0.909146,0.958058,0.997275,0.605191,0.522431,0.288075,0.0250809,0.465758,0.375831,0.0757335,0.830794,0.587793,0.230643,0.150643,0.85921,0.885977,0.698823,0.432522,0.676139,0.771289,0.787479,0.0466909,0.377651,0.40303,0.900208,0.919193,0.805668,0.444685,0.517725,0.894824,0.480586,0.426871,0.852882,0.477861,0.0320625,0.375314,0.765936,0.0571434,0.841072,0.141768,0.132877,0.671866,0.729561,0.36352,0.82251,0.588771,0.249497,0.521333,0.0212927,0.925636,0.292622,0.808772,0.972327,0.670273,0.211802,0.872535,0.589467,0.0174702,0.31722,0.107192,0.912294,0.797806,0.534063,0.765176,0.275668,0.566126,0.14049,0.0416041,0.623269,0.981562,0.183372,0.756146,0.653429,0.912932,0.119666,0.475939,0.501703,0.369163,0.997272,0.522996,0.294799,0.289894,0.331768,0.267126,0.960167,0.54357,0.13966,0.549634,0.56104,0.45688,0.656826,0.473334,0.254687,0.190889,0.238511,0.530354,0.757014,0.379001,0.571959,0.380283,0.360563,0.75533,0.136429,0.0139921,0.668263,0.256095,0.489931,0.169966,0.625258,0.487202,0.692962,0.920056,0.777096,0.0247298,0.187182,0.737264,0.5683,0.326842,0.286898,0.12934,0.783723,0.943723,0.602674,0.0384095,0.134612,0.841184,0.568764,0.891627,0.220185,0.140723,0.27191,0.580748,0.896053,0.408339,0.59474,0.564316,0.664434,0.0846709,0.734282,0.289692,0.571873,0.427244,0.209749,0.348969,0.451974,0.396931,0.0862329,0.0202732,0.723773,0.373131,0.149613,0.507496,0.316854,0.752286,0.545905,0.451466,0.593471,0.114669,0.343093,0.813655,0.255392,0.615003,0.394404,0.151445,0.0233425,0.989144,0.715761,0.687777,0.0738149,0.450042,0.977469,0.645688,0.877286,0.187218,0.994657,0.32926,0.584148,0.0808901,0.349533,0.307921,0.454021,0.499146,0.815417,0.770875,0.251432,0.361323,0.222341,0.844902,0.475992,0.565434,0.658558,0.731384,0.180437,0.0529617,0.882829,0.20378,0.0421057,0.59859,0.891557,0.115921,0.0486322,0.869026,0.761609,0.925918,0.0562432,0.756266,0.255178,0.640392,0.837156,0.604711,0.948313,0.291176,0.103857,0.76373,0.0620511,0.355289,0.125053,0.284392,0.200191,0.601046,0.849826,0.858749,0.33243,0.0302628,0.911711,0.215259,0.234042,0.953816,0.813849,0.125599,0.0697371,0.862481,0.994624,0.831346,0.788399,0.0508677,0.587612,0.0435773,0.691259,0.424767,0.648288,0.639572,0.715944,0.752145,0.403303,0.777995,0.107434,0.528356,0.0623869,0.307625,0.129401,0.912213,0.166374,0.461831,0.942475,0.0780844,0.67709,0.176518,0.0319008,0.490939,0.302117,0.101638,0.35342,0.296741,0.932984,0.141819,0.347609,0.520595,0.185396,0.038868,0.945363,0.833685,0.67844,0.661306,0.58583,0.0817427,0.439301,0.693263,0.610099,0.501688,0.000887994,0.7395,0.413901,0.167262,0.201331,0.356377,0.245346,0.878422,0.532894,0.277247,0.369361,0.835011,0.378885,0.722781,0.131752,0.311868,0.8646,0.479361,0.832463,0.0499962,0.518229,0.777826,0.883681,0.196669,0.439132,0.469511,0.278412,0.878434,0.162774,0.88851,0.380122,0.163662,0.62801,0.794023,0.330924,0.829342,0.1504,0.57627,0.707763,0.683294,0.853517,0.0771241,0.518305,0.232401,0.799905,0.650057,0.54427,0.664504,0.129418,0.376733,0.714501,0.647647,0.154559,0.598182,0.844316,0.593692,0.0676925,0.122728,0.472125,0.230467,0.0112378,0.852248,0.394129,0.639248,0.646271,0.725053,0.46859,0.796671,0.301323,0.176353,0.479965,0.154839,0.253477,0.99827,0.387241,0.053382,0.648327,0.931511,0.717886,0.777746,0.308244,0.432387,0.425392,0.462803,0.0305687,0.269708,0.0564944,0.0982611,0.392436,0.52862,0.328728,0.403674,0.380867,0.722857,0.0429219,0.0271383,0.447909,0.511512,0.823809,0.749232,0.687865,0.303774,0.904071,0.941342,0.302044,0.291312,0.994724,0.950371,0.222822,0.71261,0.728117,0.531066,0.144997,0.153509,0.993869,0.175566,0.423218,0.0503634,0.273827,0.815654,0.578983,0.602555,0.219327,0.959851,0.325412,0.262249,0.986989,0.773321,0.773761,0.810798,0.522553,0.461626,0.114572,0.426624,0.402968,0.416616,0.717936,0.397692,0.366987,0.940758,0.110303,0.0951038,0.471824,0.2553,0.248613,0.465693,0.430866,0.671831,0.516057,0.704693,0.487484,0.0950399,0.307248,0.706812,0.0548905,0.63266,0.969061,0.0418795,0.405981,0.742822,0.852678,0.928534,0.204448,0.967249,0.355158,0.607416,0.383865,0.0730939,0.00510802,0.750852,0.0138522,0.115411,0.845956,0.485677,0.370711,0.0945689,0.95137,0.801577,0.7664,0.467427,0.50627,0.253884,0.562466,0.813518,0.960696,0.617357,0.446178,0.929757,0.659236,0.852159,0.672579,0.511914,0.780693,0.877027,0.479163,0.135851,0.484443,0.863028,0.208945,0.489551,0.61388,0.222798,0.604962,0.459836,0.708474,0.975672,0.554405,0.659844,0.777249,0.320805,0.127271,0.283519,0.574689,0.689737,0.0970372,0.535385,0.307094,0.543216,0.465142,0.96633,0.395375,0.137721,0.478244,0.176068,0.0147486,0.957407,0.31192,0.499192,0.820436,0.520865,0.988743,0.434316,0.743663,0.593705,0.894152,0.452137,0.569377,0.448557,0.111981,0.346626,0.769362,0.239252,0.630145,0.344051,0.928989,0.727182,0.879435,0.236082,0.270398,0.344577,0.202413,0.665773,0.482299,0.680657,0.841841,0.497047,0.638064,0.153761,0.996239,0.4585,0.674626,0.984982,0.892816,0.418289,0.578687,0.786968,0.870427,0.148064,0.235525,0.982408,0.49469,0.0048865,0.221659,0.124835,0.348937,0.150648,0.852017,0.228372,0.386731,0.122415,0.57295,0.589143,0.788188,0.0552487,0.2698,0.630029,0.552296,0.907865,0.78379,0.548535,0.366364,0.458416,0.533517,0.25918,0.876706,0.112204,0.0461481,0.747132,0.260268,0.281673,0.72954,0.754958,0.286559,0.951199,0.879793,0.635496,0.101847,0.731811,0.863869,0.488578,0.854225,0.436819,0.0777214,0.642413,0.492068,0.347522,0.272442,0.0443636,0.255386,0.0562317,0.592899,0.621751,0.514648,0.126416,0.880931,0.391354,0.23862,0.927079,0.138486,0.498888,0.208752,0.868026,0.253847,0.495311,0.819225,0.13364,0.130808,0.921073,0.865451,0.994677,0.409651,0.719676,0.431496,0.487372,0.362089,0.923563,0.834894,0.634531,0.967927,0.09028,0.690763,0.560826,0.712031,0.205411,0.687242,0.592961,0.596765,0.925862,0.52004,0.73525,0.42475,0.728792,0.603276,0.678597,0.224104,0.422502,0.812237,0.354912,0.343574,0.677688,0.349589,0.753225,0.397365,0.781084,0.240597,0.759454,0.704648,0.0754911,0.393985,0.672575,0.165771,0.0847482,0.2334,0.877802,0.290159,0.920642,0.470763,0.886924,0.846504,0.990803,0.622174,0.271254,0.719596,0.22545,0.949851,0.9437,0.647952,0.762088,0.298612,0.991526,0.439776,0.6482,0.744751,0.837141,0.429285,0.985348,0.596595,0.133932,0.0608394,0.99058,0.806507,0.22661,0.0753282,0.0399074,0.104412,0.365487,0.960549,0.575175,0.252411,0.807053,0.565979,0.874585,0.0783066,0.285574,0.100035,0.0281572,0.229274,0.747987,0.790245,0.527886,0.739513,0.230021,0.176086,0.484264,0.0671619,0.60537,0.469612,0.663757,0.739303,0.530452,0.654337,0.54581,0.757062,0.729665,0.585717,0.861474,0.0951523,0.546266,0.436649,0.347563,0.353319,0.002628,0.222148,0.431626,0.288202,0.322183,0.459783,0.517477,0.0701701,0.250028,0.0453626,0.809683,0.48005,0.221449,0.293947,0.547211,0.826819,0.76356,0.210968,0.566122,0.294011,0.865305,0.111932,0.0510736,0.59497,0.697649,0.912548,0.690122,0.243915,0.349197,0.0376852,0.597235,0.351825,0.259833,0.0288606,0.640028,0.582016,0.488644,0.157505,0.652186,0.738672,0.202867,0.461869,0.218722,0.424316,0.755817,0.765933,0.251135,0.519376,0.976901,0.817257,0.813387,0.842206,0.929188,0.864461,0.437176,0.626837,0.777009,0.127298,0.870752,0.126206,0.164983,0.467987,0.478032,0.424816,0.496848,0.11806,0.00683264,0.985492,0.275564,0.659019,0.724164,0.478432,0.120888,0.942885,0.902747,0.876705,0.708818,0.153882,0.396081,0.68572,0.971139,0.209469,0.527926,0.900327,0.0739296,0.965101,0.527164,0.850939,0.0923995,0.397917,0.977145,0.257383,0.865904,0.455177,0.682199,0.362752,0.573237,0.689032,0.348243,0.848801,0.348051,0.072407,0.327233,0.468939,0.0152923,0.22998,0.345644,0.724111,0.383862,0.741725,0.40983,0.355001,0.951194,0.937756,0.255328,0.0251232,0.902857,0.782493,0.876062,0.995257,0.180409,0.853207,0.252639,0.046313,0.308384,0.934838,0.409065,0.881621,0.62387,0.757308,0.730422,0.971921,0.829715,0.057655,0.44086,0.845007,0.287635,0.786504,0.569118,0.671498,0.528229,0.978948,0.0264992,0.479423,0.916704,0.281828,0.504546,0.819561,0.06432,0.380608,0.814818,0.244729,0.233815,0.0674572,0.291042,0.542199,0.0022957,0.700107,0.42382,0.626166,0.457415,0.154242,0.598087,0.28713,0.211897,0.0389469,0.132137,0.499533,0.825451,0.701255,0.171031,0.35368,0.680203,0.19753,0.833103,0.596907,0.479357,0.337649,0.416468,0.543677,0.718257,0.231286,0.788406,0.952072,0.298743,0.0794486,0.494271,0.301039,0.779555,0.918091,0.927204,0.23697,0.0723337,0.525291,0.5241,0.284231,0.564238,0.656237,0.783764,0.389689,0.357492,0.954794,0.743369,0.0376945,0.152324,0.576472,0.634601,0.631682,0.914121,0.0510691,0.175359,0.632378,0.282355,0.963765,0.584449,0.581098,0.0432139,0.0787204,0.882136,0.822769,0.996812,0.80934,0.0597395,0.0691454,0.334632,0.583839,0.353377,0.89887,0.240076,0.13714,0.288559,0.597568,0.0919349,0.0319279,0.635262,0.244259,0.6084,0.269864,0.875941,0.522521,0.320933,0.0512994,0.154898,0.603287,0.0150647,0.739347,0.184385,0.0582785,0.818068,0.0665209,0.881048,0.81488,0.875861,0.940787,0.884025,0.210493,0.524627,0.237402,0.109363,0.764703,0.374542,0.397922,0.362271,0.466477,0.42985,0.997533,0.710736,0.0382495,0.267396,0.586677,0.56077,0.588329,0.637976,0.715668,0.191616,0.653041,0.455015,0.376001,0.711319,0.273083,0.442522,0.592367,0.0879628,0.318384,0.533154,0.971988,0.528877,0.057781,0.209389,0.638239,0.822484,0.583931,0.0361611,0.184755,0.050408,0.466011,0.182287,0.761144,0.50426,0.449684,0.34782,0.0650303,0.0380131,0.985796,0.780698,0.22963,0.638837,0.235714,0.605631,0.350156,0.508797,0.048153,0.942523,0.59676,0.366536,0.475677,0.568748,0.895413,0.533458,0.778137,0.533652,0.355942,0.362068,0.569814,0.540697,0.412476,0.0358243,0.722984,0.17362,0.540084,0.172668,0.52144,0.605115,0.210681,0.507237,0.385813,0.440311,0.146074,0.621527,0.0459418,0.49623,0.130324,0.0940948,0.438753,0.727084,0.460631,0.91443,0.295831,0.356044,0.447889,0.0739682,0.889697,0.803831,0.436036,0.45951,0.344528,0.848512,0.495335,0.0675123,0.022132,0.0354191,0.240181,0.543572,0.640534,0.450862,0.0508088,0.026347,0.891173,0.196882,0.647874,0.937115,0.693112,0.778198,0.0312097,0.131865,0.505282,0.491841,0.0462948,0.801113,0.847885,0.494183,0.875081,0.737582,0.298014,0.311117,0.197092,0.642542,0.15963,0.692427,0.710054,0.181762,0.727846,0.950235,0.725334,0.36838,0.401097,0.776143,0.394727,0.29227,0.973025,0.0426006,0.229385,0.666137,0.820798,0.260595,0.798002,0.32608,0.752436,0.844297,0.127193,0.600321,0.33848,0.00227431,0.337903,0.636494,0.313392,0.534996,0.279037,0.473021,0.227422,0.989091,0.654783,0.955268,0.939326,0.380117,0.323648,0.340423,0.15626,0.718375,0.632694,0.129285,0.760976,0.862079,0.795422,0.581774,0.122674,0.593424,0.907854,0.875111,0.437721,0.0350472,0.475432,0.776201,0.0373215,0.813335,0.412695,0.350713,0.348331,0.691732,0.823735,0.575753,0.680823,0.478518,0.531021,0.620149,0.858635,0.85467,0.960573,0.0148949,0.573044,0.593267,0.14418,0.33402,0.455346,0.939602,0.915794,0.57802,0.533026,0.823648,0.453131,0.970747,0.858695,0.928563,0.746948,0.896017,0.741898,0.159643,0.24673,0.0902281,0.851375,0.070465,0.665981,0.532198,0.548983,0.197002,0.152347,0.407618,0.0516718,0.112919,0.422513,0.624716,0.706186,0.566693,0.958736,0.161532,0.506295,0.87453,0.739552,0.0393205,0.698179,0.192683,0.0100672,0.556874,0.121245,0.757015,0.452891,0.863143,0.916658,0.699621,0.953371,0.768032,0.770086,0.619352,0.30023,0.319069,0.816354,0.452576,0.726687,0.868026,0.565495,0.1492,0.492743,0.271681,0.715892,0.451479,0.433213,0.222187,0.326009,0.172765,0.261507,0.0241882,0.365447,0.271575,0.581062,0.486693,0.0285895,0.0339535,0.349836,0.945247,0.733575,0.303207,0.713279,0.503661,0.922559,0.0135092,0.82273,0.738913,0.466086,0.549417,0.606939,0.031581,0.698617,0.099682,0.303262,0.414509,0.551161,0.736475,0.636696,0.87717,0.909239,0.898204,0.901359,0.274687,0.169778,0.482421,0.761379,0.198368,0.516374,0.111215,0.143615,0.249949,0.414422,0.856894,0.75361,0.336981,0.870404,0.57634,0.0758939,0.336489,0.125757,0.682833,0.36807,0.824374,0.782515,0.671332,0.238884,0.333676,0.407807,0.87558,0.210847,0.317046,0.773784,0.112205,0.591733,0.943562,0.594626,0.353112,0.14193,0.111001,0.464327,0.285545,0.36095,0.878749,0.14244,0.11456,0.21573,0.0128433,0.690901,0.291624,0.349333,0.816658,0.974457,0.717403,0.641032,0.756973,0.388735,0.879916,0.090649,0.796542,0.755496,0.301496,0.113589,0.529279,0.413701,0.705322,0.472841,0.00832782,0.0584341,0.614771,0.119329,0.522762,0.900316,0.480279,0.401511,0.0427559,0.594839,0.617241,0.0555992,0.28574,0.908864,0.404932,0.102398,0.883322,0.122334,0.74343,0.640294,0.51107,0.623346,0.730943,0.307612,0.378841,0.032439,0.421201,0.908121,0.44614,0.126522,0.380962,0.454468,0.184956,0.995733,0.573797,0.707718,0.896049,0.0540758,0.109229,0.938805,0.648915,0.726469,0.994405,0.934655,0.635334,0.399336,0.0370525,0.518655,0.521671,0.780482,0.158949,0.0327404,0.403828,0.889892,0.340352,0.78267,0.922331,0.761553,0.69079,0.368472,0.888075,0.071752,0.82294,0.0730314,0.0674851,0.396737,0.780749,0.963535,0.450812,0.889978,0.90234,0.0997274,0.616447,0.896745,0.0343822,0.251781,0.296081,0.0714346,0.770436,0.817752,0.851917,0.929385,0.850492,0.255745,0.819278,0.190845,0.0384147,0.741609,0.952397,0.729205,0.110081,0.840472,0.800957,0.93302,0.913504,0.868442,0.329757,0.694253,0.831976,0.780569,0.584231,0.734316,0.880297,0.200679,0.631061,0.914679,0.45246,0.927142,0.986114,0.222896,0.744894,0.838031,0.152281,0.595386,0.0937758,0.971559,0.786231,0.13219,0.713168,0.738628,0.861395,0.823248,0.5791,0.662352,0.756269,0.492604,0.530794,0.0860255,0.186857,0.36277,0.866595,0.771089,0.0970869,0.746892,0.971767,0.728148,0.66157,0.424227,0.65529,0.647684,0.647122,0.400184,0.485715,0.799403,0.99557,0.57949,0.770962,0.781801,0.711681,0.48413,0.520429,0.573076,0.307378,0.0995295,0.235428,0.0636465,0.592134,0.766222,0.149672,0.778991,0.128992,0.0162668,0.55008,0.226079,0.763158,0.521847,0.954227,0.424729,0.946074,0.609517,0.0724128,0.593196,0.00970153,0.558127,0.3926,0.00527177,0.137618,0.163562,0.787073,0.849298,0.647691,0.307502,0.422375,0.955069,0.407031,0.657803,0.0187159,0.999165,0.424025,0.168388,0.778157,0.553017,0.184655,0.328237,0.779096,0.947813,0.850084,0.733324,0.372542,0.796158,0.342841,0.444954,0.389354,0.352543,0.00308174,0.781954,0.357815,0.140699,0.945515,0.144887,0.989998,0.593207,0.452389,0.412372,0.548276,0.85942,0.0701751,0.566992,0.858586,0.4942,0.73538,0.636742,0.0472169,0.920034,0.964979,0.826313,0.867847,0.815063,0.559637,0.240389,0.61122,0.902478,0.685343,0.000574419,0.255021,0.688425,0.782528,0.612836,0.829125,0.728044,0.757723,0.819122,0.32125,0.210112,0.231495,0.869527,0.0695327,0.30167,0.436519,0.928118,0.79587,0.171898,0.564861,0.843087,0.0919328,0.529839,0.6694,0.95978,0.344902,0.229037,0.200169,0.956122,0.131516,0.885512,0.956697,0.386537,0.573938,0.739225,0.999373,0.403062,0.467268,0.757096,0.222185,0.788519,0.967208,0.453679,0.658045,0.036741,0.75535,0.0945637,0.964859,0.551219,0.266462,0.52972,0.394306,0.358395,0.0595592,0.0637061,0.318175,0.404461,0.292743,0.518344,0.360583,0.424259,0.403857,0.31728,0.810796,0.977794,0.0565048,0.810169,0.380856,0.523773,0.567265,0.603041,0.312292,0.534473,0.0567204,0.970337,0.571214,0.81207,0.0649007,0.536074,0.363289,0.331363,0.0657937,0.757595,0.689758,0.125353,0.821301,0.00793308,0.529814,0.114045,0.526277,0.890397,0.538304,0.930134,0.207677,0.3491,0.907929,0.264182,0.159269,0.288785,0.787955,0.726534,0.891826,0.100247,0.261008,0.948546,0.070584,0.832222,0.760616,0.135485,0.368296,0.123906,0.466848,0.43409,0.881501,0.156606,0.559442,0.702802,0.164539,0.0892564,0.816847,0.690816,0.979654,0.355151,0.62095,0.187331,0.704252,0.528879,0.451513,0.863521,0.817664,0.239468,0.590055,0.70949,0.339715,0.851063,0.658036,0.410299,0.683285,0.418653,0.545784,0.051581,0.542558,0.0126317,0.485671,0.424059,0.169237,0.0451129,0.126861,0.333776,0.134369,0.943709,0.0245919,0.114023,0.29886,0.645542,0.301354,0.00311138,0.174421,0.752867,0.866632,0.992085,0.992336,0.456687,0.701575,0.332051,0.30775,0.359611,0.74235,0.991036,0.778263,0.288134,0.0426166,0.320821,0.300766,0.528287,0.74488,0.470003,0.5734,0.871742,0.803779,0.707769,0.81545,0.828371,0.821792,0.11431,0.473913,0.123146,0.117422,0.648334,0.876014,0.984054,0.640419,0.868349,0.440741,0.341994,0.2004,0.748492,0.701604,0.94275,0.739527,0.479867,0.230885,0.782144,0.800689,0.53165,0.310431,0.545569,0.00165374,0.883831,0.417311,0.805433,0.5916,0.232761,0.633804,0.413393,0.347072,0.107717,0.536539,0.464493,0.756052,0.412553,0.448547,0.396471,0.280902,0.889288,0.738464,0.481302,0.63778,0.440068,0.424052,0.377308,0.919936,0.654937,0.159452,0.720625,0.186587,0.469883,0.266194,0.188241,0.353714,0.683505,0.993674,0.945314,0.916266,0.627478,0.358707,0.263338,0.735195,0.895246,0.727831,0.491247,0.307799,0.176378,0.887717,0.588701,0.0656667,0.626181,0.0700032,0.703447,0.0662498,0.494056,0.0807547,0.986185,0.148992,0.240206,0.70681,0.33558,0.710089,0.973004,0.523821,0.0638026,0.656509,0.517495,0.00911667,0.572775,0.144973,0.367824,0.836113,0.880168,0.26307,0.563944,0.371415,0.570869,0.740322,0.259133,0.15957,0.805989,0.885314,0.229573,0.509436,0.951564,0.723628,0.59019,0.937749,0.872621,0.830397,0.644559,0.2082,0.540486,0.617563,0.732021,0.604288,0.274072,0.249516,0.613405,0.846846,0.394489,0.981228,0.682959,0.274658,0.244298,0.246903,0.646073,0.815166,0.987225,0.905205,0.974736,0.793213,0.79052,0.204309,0.302649,0.742083,0.927937,0.892839,0.679833,0.800558,0.723236,0.324392,0.00875828,0.263721,0.941956,0.74078,0.86801,0.216027,0.990296,0.481414,0.0628737,0.384785,0.462643,0.745833,0.659443,0.70694,0.992735,0.305516,0.522107,0.97996,0.210721,0.496843,0.773173,0.0012406,0.701152,0.0758222,0.743324,0.629089,0.968662,0.423157,0.429646,0.691898,0.747549,0.438405,0.955619,0.689505,0.179184,0.823628,0.905532,0.16948,0.305043,0.968406,0.554266,0.767685,0.714238,0.213709,0.474626,0.706974,0.519224,0.996732,0.686934,0.729945,0.493575,0.460107,0.731186,0.194727,0.535929,0.47451,0.823816,0.504591,0.897667,0.253462,0.196488,0.645216,0.691867,0.152107,0.334721,0.871051,0.975736,0.240253,0.0405315,0.280778,0.208659,0.594797,0.0484638,0.922898,0.808506,0.523089,0.629871,0.32773,0.519822,0.316805,0.0576751,0.0133971,0.776912,0.788861,0.208124,0.312841,0.263371,0.0319395,0.817432,0.161038,0.285401,0.01392,0.806254,0.977268,0.166027,0.140975,0.848319,0.141763,0.381228,0.888851,0.422541,0.589888,0.483648,0.471005,0.512785,0.292154,0.994095,0.142657,0.619884,0.513917,0.459462,0.677559,0.527314,0.236374,0.466419,0.735438,0.549215,0.72979,0.767377,0.366646,0.890828,0.0527787,0.380567,0.697082,0.0300469,0.546594,0.838057,0.878366,0.688357,0.219286,0.767217,0.110898,0.809173,0.250865,0.581904,0.321958,0.543019,0.575998,0.464615,0.162902,0.0899148,0.924077,0.840461,0.617229,0.16045,0.30688,0.352666,0.709665,0.0366705,0.120043,0.0763116,0.927498,0.172822,0.456878,0.62458,0.202869,0.0034719,0.462637,0.0812354,0.691829,0.681923,0.848452,0.802727,0.491096,0.0993174,0.384631,0.813055,0.642336,0.960629,0.27767,0.805238,0.0505436,0.201746,0.645699,0.667772,0.362197,0.952579,0.0204384,0.071862,0.98925,0.140482,0.148174,0.916748,0.313304,0.605052,0.541328,0.516173,0.608524,0.00396585,0.597409,0.300352,0.685889,0.445861,0.103079,0.176985,0.545178,0.48771,0.99004,0.187514,0.448339,0.267709,0.992753,0.498882,0.469456,0.638452,0.166654,0.831653,0.591031,0.187093,0.903515,0.58028,0.327575,0.0516883,0.497029,0.640879,0.65674,0.038357,0.157052,0.265264,0.0423229,0.75446,0.565616,0.728212,0.200321,0.668695,0.905197,0.7455,0.156405,0.895236,0.933014,0.604744,0.162946,0.925767,0.103626,0.632402,0.564218,0.27028,0.464054,0.155249,0.457373,0.367569,0.73553,0.784948,0.419257,0.232558,0.425826,0.0759971,0.270915,0.582878,0.341261,0.313238,0.337339,0.906877,0.0414497,0.53766,0.575572,0.946646,0.28316,0.731977,0.841883,0.216174,0.33672,0.00482862,0.141941,0.440346,0.63723,0.706159,0.710626,0.101284,0.861408,0.167999,0.468853,0.596937,0.952947,0.88811,0.829496,0.378773,0.964108,0.100411,0.961652,0.305368,0.413649,0.29899,0.212245,0.455098,0.83665,0.787817,0.401745,0.11981,0.519794,0.243628,0.335984,0.856514,0.248456,0.477925,0.296861,0.885687,0.184084,0.0074871,0.986971,0.0454915,0.175486,0.455824,0.642429,0.128433,0.343935,0.471924,0.507207,0.308042,0.572335,0.468858,0.613411,0.985984,0.767849,0.825656,0.441082,0.604499,0.613473,0.842827,0.724309,0.133267,0.086455,0.0602933,0.989781,0.334911,0.538218,0.286642,0.220598,0.722302,0.294129,0.207569,0.767793,0.469615,0.663393,0.410222,0.598049,0.00732811,0.882146,0.105256,0.31537,0.454482,0.574114,0.928781,0.440465,0.341962,0.754437,0.881548,0.946461,0.36791,0.724375,0.670771,0.501176,0.81083,0.731064,0.490957,0.145741,0.269282,0.777599,0.366339,0.991583,0.0717282,0.573908,0.759377,0.541344,0.237302,0.169599,0.139392,0.24463,0.0517449,0.244648,0.56,0.506226,0.818762,0.488781,0.946692,0.160724,0.243218,0.82824,0.107185,0.611128,0.552615,0.777956,0.112304,0.363444,0.50902,0.603262,0.509186,0.778302,0.380861,0.875525,0.769885,0.452589,0.449434,0.529262,0.993933,0.686735,0.69886,0.133325,0.931365,0.750605,0.377973,0.491366,0.256832,0.196735,0.980147,0.203523,0.357459,0.223365,0.0317631,0.464644,0.834493,0.584378,0.2426,0.946797,0.947822,0.75162,0.550059,0.457008,0.529922,0.93092,0.332533,0.299807,0.383509,0.781967,0.829068,0.377442,0.468702,0.527929,0.510767,0.400067,0.278534,0.88874,0.891433,0.535365,0.0854747,0.87158,0.738889,0.442934,0.0949454,0.770652,0.907578,0.929439,0.355029,0.150178,0.876236,0.302851,0.901798,0.426295,0.759859,0.43172,0.357215,0.0923923,0.731527,0.740724,0.874359,0.560595,0.118166,0.343061,0.088524,0.628933,0.743128,0.367058,0.517673,0.634561,0.902423,0.603148,0.506142,0.641312,0.0460813,0.601087,0.411963,0.953659,0.530526,0.766993,0.103837,0.406762,0.0698441,0.00563576,0.833057,0.829703,0.437356,0.190272,0.922096,0.168883,0.930996,0.796455,0.729478,0.0491619,0.139516,0.818002,0.678095,0.882644,0.18506,0.195768,0.517206,0.0874828,0.798916,0.0233471,0.728794,0.844997,0.624434,0.140758,0.798656,0.15496,0.907751,0.902494,0.561721,0.977595,0.90813,0.394778,0.807298,0.345485,0.585049,0.729393,0.514368,0.516045,0.525848,0.243846,0.565207,0.665364,0.0618483,0.243302,0.548008,0.246908,0.43907,0.0652133,0.334391,0.237986,0.0885604,0.0631854,0.0829829,0.712994,0.203943,0.881639,0.867954,0.111694,0.784133,0.429675,0.0892884,0.692262,0.824453,0.896586,0.0377478,0.409502,0.62598,0.552116,0.925547,0.151828,0.795962,0.490754,0.817191,0.85781,0.734057,0.365199,0.104718,0.173127,0.430412,0.439109,0.411113,0.518973,0.502295,0.494095,0.231967,0.706238,0.375735,0.099921,0.817932,0.159868,0.529596,0.90722,0.85213,0.354049,0.803806,0.889878,0.763551,0.429786,0.441994,0.689098,0.581614,0.237956,0.179852,0.398805,0.0957659,0.913909,0.764004,0.200484,0.0870359,0.194417,0.639594,0.498148,0.71339,0.141889,0.992244,0.945357,0.848127,0.367979,0.0452778,0.666058,0.527846,0.574874,0.573279,0.379976,0.928922,0.377085,0.269854,0.692473,0.806871,0.711848,0.381571,0.388485,0.949804,0.561424,0.787291,0.0455695,0.475333,0.551295,0.246054,0.562369,0.745712,0.885648,0.0605172,0.459101,0.0275362,0.0527611,0.404458,0.875663,0.42074,0.449736,0.541721,0.948586,0.0246098,0.115,0.328562,0.953532,0.492085,0.598417,0.646006,0.298956,0.310264,0.0275767,0.687442,0.260068,0.589,0.474732,0.305638,0.0643332,0.026027,0.551691,0.626702,0.771739,0.437339,0.687219,0.23084,0.464875,0.73998,0.635298,0.340538,0.16072,0.0850344,0.882259,0.109306,0.109644,0.997259,0.437868,0.0631765,0.489344,0.0362849,0.709182,0.788301,0.346549,0.736759,0.475742,0.606617,0.325759,0.950474,0.912255,0.390092,0.976501,0.463946,0.0167942,0.74824,0.901285,0.704013,0.97908,0.366161,0.443993,0.614379,0.706699,0.604713,0.699413,0.588958,0.714019,0.809057,0.586218,0.151888,0.872234,0.075562,0.188172,0.581416,0.863863,0.534722,0.318175,0.339605,0.141339,0.643934,0.29008,0.0535944,0.0340261,0.266581,0.517541,0.0508203,0.014821,0.418826,0.754833,0.993901,0.784987,0.198827,0.60828,0.491686,0.80354,0.307693,0.0806439,0.517559,0.11675,0.666861,0.669447,0.988984,0.742423,0.857619,0.5704,0.606286,0.392341,0.888574,0.945891,0.53368,0.532508,0.235971,0.587275,0.566534,0.502552,0.104816,0.617354,0.517373,0.523642,0.372188,0.511274,0.308629,0.571014,0.119554,0.800315,0.374554,0.427247,0.880959,0.892114,0.543997,0.54782,0.56156,0.532981,0.290244,0.41918,0.10338,0.89653,0.811521,0.991954,0.842421,0.345201,0.524462,0.0783924,0.932476,0.0909957,0.580944,0.0372921,0.70835,0.098317,0.560934,0.0805372,0.609591,0.869563,0.651552,0.729145,0.669878,0.026106,0.156392,0.550837,0.91822,0.700389,0.098657,0.47978,0.233369,0.388901,0.89896,0.336749,0.285431,0.710481,0.328703,0.127852,0.0556819,0.853165,0.206245,0.988158,0.944161,0.787189,0.0254502,0.652511,0.885506,0.586384,0.733048,0.495097,0.455948,0.384599,0.224242,0.125826,0.410705,0.380634,0.676662,0.328925,0.0810222,0.775319,0.808705,0.314391,0.16422,0.707665,0.651141,0.449651,0.418145,0.979844,0.577503,0.473827,0.833009,0.783748,0.461985,0.77717,0.570937,0.487435,0.42968,0.456443,0.0738198,0.162728,0.95154,0.529767,0.547327,0.175782,0.655593,0.958033,0.556416,0.332255,0.286957,0.637438,0.107575,0.0956624,0.951829,0.271795,0.803327,0.60297,0.721446,0.221472,0.582813,0.298949,0.695299,0.415822,0.0826969,0.157284,0.192992,0.653634,0.64472,0.622672,0.110077,0.71854,0.7854,0.0616166,0.248307,0.332728,0.237398,0.9039,0.29076,0.793814,0.236155,0.577718,0.431252,0.34373,0.67338,0.383081,0.615525,0.476707,0.986051,0.33697,0.698179,0.568864,0.635919,0.393478,0.984687,0.718616,0.550763,0.177679,0.37225,0.195482,0.800351,0.482327,0.914022,0.585751,0.543943,0.162329,0.918479,0.781342,0.0662287,0.209239,0.575156,0.302384,0.786957,0.00640772,0.646114,0.460337,0.389489,0.261638,0.937044,0.37554,0.598608,0.635224,0.944404,0.234528,0.0287018,0.929091,0.953144,0.579464,0.106769,0.325394,0.774947,0.90712,0.807721,0.688969,0.492872,0.351664,0.851298,0.411351,0.133005,0.917526,0.62059,0.708161,0.21991,0.407547,0.714569,0.866024,0.867885,0.104058,0.127662,0.804929,0.479597,0.72627,0.440153,0.424001,0.960798,0.468855,0.353092,0.913942,0.0483191,0.459861,0.239336,0.823266,0.366982,0.0470564,0.512235,0.859853,0.39872,0.363532,0.271204,0.531726,0.281059,0.891795,0.239887,0.500969,0.299342,0.954456,0.366993,0.167227,0.0585138,0.494655,0.972156,0.538111,0.220925,0.412309,0.962113,0.181723,0.881163,0.315205,0.0956647,0.929483,0.775066,0.335,0.752748,0.142047,0.382057,0.264983,0.0019008,0.780777,0.628515,0.273105,0.312503,0.909574,0.1649,0.55239,0.410543,0.464242,0.506846,0.777535,0.631469,0.56536,0.27219,0.603625,0.103471,0.493115,0.0159334,0.0655837,0.674837,0.897097,0.380788,0.770502,0.826579,0.155854,0.105503,0.579328,0.297902,0.487559,0.844311,0.299803,0.268337,0.472826,0.572908,0.58084,0.3824,0.737807,0.133229,0.792943,0.202049,0.640075,0.570478,0.833517,0.205435,0.842668,0.437142,0.308906,0.335783,0.453075,0.37449,0.0106201,0.350172,0.755278,0.781122,0.176752,0.911133,0.886625,0.756079,0.209034,0.374184,0.60039,0.508837,0.642521,0.0732163,0.0817444,0.223361,0.455616,0.819552,0.35659,0.248559,0.0216005,0.996665,0.819037,0.855118,0.202101,0.661705,0.29226,0.511007,0.997488,0.745335,0.885496,0.00810804,0.0955074,0.640775,0.78923,0.272259,0.551907,0.675855,0.0283385,0.760942,0.0500392,0.628729,0.269778,0.69256,0.701945,0.351523,0.915921,0.157561,0.171074,0.272511,0.406121,0.192675,0.269176,0.225158,0.0477927,0.471277,0.886863,0.340052,0.982283,0.884351,0.0853876,0.86778,0.892459,0.180895,0.508554,0.681689,0.453154,0.0604617,0.357544,0.481492,0.821403,0.407583,0.110221,0.0911817,0.100144,0.812166,0.442704,0.0160642,0.969728,0.613779,0.288575,0.375848,0.806454,0.557751,0.601006,0.854246,0.0290276,0.487869,0.194299,0.0113108,0.37222,0.279686,0.87909,0.264679,0.460581,0.387645,0.946368,0.913735,0.448107,0.303912,0.395228,0.26951,0.711496,0.505449,0.360692,0.811639,0.317615,0.803396,0.827703,0.287343,0.417175,0.116278,0.663191,0.223629,0.674029,0.264197,0.0778751,0.703057,0.752066,0.272174,0.714368,0.124286,0.55186,0.593458,0.388965,0.0124416,0.981103,0.335334,0.926177,0.42921,0.639246,0.321405,0.69872,0.350742,0.826854,0.0594115,0.162381,0.144469,0.862808,0.990084,0.431812,0.279983,0.106363,0.0950039,0.503611,0.780392,0.359201,0.581487,0.483449,0.111268,0.85366,0.197817,0.235554,0.405521,0.791275,0.62452,0.417962,0.772378,0.959853,0.344139,0.201588,0.599099,0.665544,0.900307,0.949841,0.492398,0.959719,0.112222,0.636867,0.822527,0.102307,0.0686799,0.102509,0.208669,0.163684,0.606121,0.989062,0.522885,0.187607,0.472511,0.634153,0.0412676,0.670327,0.869707,0.446788,0.461602,0.494227,0.864751,0.23398,0.45408,0.20889,0.435568,0.0531799,0.874434,0.335875,0.00302104,0.366832,0.295594,0.115243,0.00369965,0.11812,0.21755,0.0723796,0.220629,0.426219,0.236063,0.82675,0.415281,0.758949,0.0143573,0.887792,0.393102,0.0556249,0.558119,0.262809,0.502413,0.0197209,0.757036,0.367164,0.253701,0.211116,0.576054,0.689268,0.264296,0.450488,0.0251434,0.267317,0.81732,0.320737,0.38256,0.82102,0.438858,0.60011,0.893399,0.659487,0.0263297,0.129463,0.486237,0.441611,0.888411,0.500594,0.329402,0.281513,0.556219,0.887521,0.544322,0.0586325,0.907242,0.301358,0.425796,0.160943,0.512474,0.00184986,0.850211,0.77677,0.452338,0.875355,0.0440874,0.269657,0.196092,0.426648,0.090677,0.634949,0.0267583,0.984076,0.294436,0.053088,0.113539,0.780673,0.494699,0.00194985,0.281268,0.824101,0.283463,0.837487,0.711622,0.827784,0.89612,0.618864,0.129142,0.321916,0.779806,0.641616,0.323766,0.630017,0.418386,0.776103,0.505372,0.462473,0.0457608,0.701464,0.889121,0.136438,0.336413,0.91588,0.120514,0.630849,0.968968,0.234053,0.411523,0.463666,0.236002,0.69279,0.287767,0.519465,0.530278,0.999389,0.347249,0.426397,0.618252,0.476391,0.748313,0.398059,0.118007,0.0720789,0.0280759,0.536393,0.848182,0.533448,0.998866,0.893943,0.234911,0.887987,0.0303808,0.571324,0.803867,0.150895,0.202173,0.772834,0.384947,0.613696,0.236501,0.62095,0.306487,0.524268,0.140415,0.836764,0.523656,0.487664,0.263161,0.141909,0.964055,0.0114746,0.539967,0.0820619,0.0835536,0.568043,0.618455,0.931736,0.101491,0.61732,0.825679,0.336402,0.505307,0.85606,0.907727,0.309174,0.00695434,0.1099,0.0820084,0.391902,0.723596,0.318509,0.0128515,0.0300827,0.842777,0.153266,0.866847,0.366433,0.640931,0.130008,0.508342,0.604986,0.141483,0.0483091,0.687048,0.225036,0.616352,0.305502,0.156772,0.717843,0.922823,0.982451,0.0542456,0.42813,0.838511,0.961972,0.737304,0.845465,0.0718723,0.819313,0.237367,0.795468,0.137822,0.250218,0.825551,0.980598,0.403485,0.692398,0.347031,0.0444151,0.822406,0.855373,0.649401,0.963889,0.903682,0.336449,0.188925,0.520034,0.641951,0.345698,0.237877,0.564773,0.328149,0.292123,0.992903,0.166659,0.254095,0.730208,0.0121244,0.325968,0.54952,0.249491,0.121436,0.687342,0.499709,0.946987,0.66794,0.903194,0.639385,0.0149709,0.947609,0.461791,0.870344,0.59701,0.42568,0.774026,0.933458,0.614606,0.29406,0.575409,0.960303,0.531937,0.140183,0.288452,0.82406,0.133086,0.455112,0.0781554,0.863294,0.467236,0.404123,0.412814,0.716727,0.525559,0.100156,0.216436,0.472546,0.768096,0.11963,0.111931,0.783067,0.0672391,0.573722,0.65341,0.664249,0.999402,0.427436,0.597707,0.614008,0.721496,0.173117,0.574312,0.253433,0.313299,0.862764,0.0774928,0.446385,0.317875,0.155648,0.309679,0.785111,0.559771,0.722493,0.501838,0.08533,0.822649,0.718275,0.557876,0.590745,0.837905,0.669807,0.373811,0.905144,0.243529,0.0272214,0.569393,0.242931,0.454657,0.1671,0.85694,0.176153,0.340217,0.431251,0.429585,0.653516,0.294015,0.507078,0.099901,0.61189,0.662726,0.40958,0.397001,0.222497,0.132073,0.89884,0.307827,0.954722,0.617115,0.865703,0.545467,0.455019,0.53551,0.919278,0.360163,0.779039,0.946499,0.929556,0.0219707,0.401156,0.0966562,0.87891,0.577309,0.436873,0.310161,0.00689455,0.0903886,0.604176,0.513973,0.19029,0.216066,0.176699,0.59987,0.613068,0.399197,0.731943,0.511908,0.707024,0.686665,0.129022,0.572727,0.232131,0.584041,0.108238,0.151409,0.944205,0.887277,0.097908,0.873761,0.909248,0.499064,0.970417,0.788158,0.0763735,0.40729,0.0983192,0.083268,0.497679,0.702495,0.597241,0.687968,0.918562,0.77394,0.287838,0.531629,0.173136,0.0197806,0.0435367,0.88016,0.706445,0.172559,0.452888,0.938577,0.7566,0.561125,0.0899855,0.700805,0.448402,0.187893,0.574566,0.35765,0.686958,0.544983,0.145808,0.763331,0.952273,0.244127,0.846599,0.449951,0.946622,0.44384,0.13792,0.865184,0.21778,0.425757,0.396813,0.390916,0.445538,0.44035,0.271077,0.151983,0.612909,0.723965,0.09056,0.369509,0.28509,0.180545,0.0703137,0.733492,0.368439,0.644879,0.0911423,0.0553967,0.189862,0.23695,0.818728,0.142135,0.481077,0.665327,0.592087,0.427699,0.109167,0.730006,0.292883,0.326947,0.155764,0.689696,0.717864,0.601302,0.130046,0.988941,0.753285,0.742955,0.712906,0.843845,0.112464,0.997996,0.0243905,0.182777,0.731488,0.392829,0.827657,0.822631,0.448226,0.017519,0.0595806,0.266954,0.159654,0.540658,0.932281,0.751741,0.968357,0.0414489,0.481747,0.26124,0.368396,0.637511,0.950937,0.0862603,0.238812,0.0809829,0.0752011,0.992097,0.823938,0.788107,0.835942,0.936401,0.786103,0.860333,0.119178,0.517591,0.253162,0.946835,0.340221,0.701388,0.964354,0.399802,0.968342,0.124008,0.94046,0.900624,0.875749,0.908817,0.942073,0.357496,0.170057,0.310469,0.995006,0.120994,0.396729,0.233819,0.201977,0.471931,0.225916,0.0259147,0.260037,0.0618584,0.962316,0.0461399,0.922191,0.0814942,0.563731,0.175354,0.0283292,0.903952,0.876742,0.992683,0.303754,0.845084,0.116691,0.244214,0.745708,0.99244,0.153031,0.687781,0.349936,0.323088,0.99825,0.344942,0.444082,0.394979,0.578761,0.646059,0.86691,0.804677,0.671974,0.126947,0.866536,0.63429,0.173087,0.788727,0.715784,0.736818,0.964081,0.744113,0.64077,0.840822,0.736796,0.944524,0.685907,0.853487,0.188738,0.431615,0.845928,0.341769,0.119396,0.195864,0.664857,0.117646,0.540806,0.108939,0.512625,0.119567,0.754998,0.379535,0.924245,0.426972,0.506482,0.79078,0.0612611,0.679569,0.579507,0.777045,0.416387,0.543588,0.521158,0.0571573,0.38441,0.257954,0.00168147,0.0703166,0.111441,0.19042,0.501931,0.957369,0.532188,0.621327,0.153233,0.197045,0.738973,0.694039,0.305984,0.251598,0.813606,0.0609821,0.631133,0.737851,0.487954,0.137615,0.528631,0.549215,0.817185,0.108138,0.32626,0.233572,0.651726,0.847417,0.29073,0.0361357,0.105371,0.292411,0.106452,0.216813,0.482831,0.608384,0.174182,0.0150189,0.229711,0.327414,0.212064,0.968683,0.021453,0.518048,0.220281,0.835059,0.57903,0.851414,0.57291,0.066984,0.98903,0.10154,0.616199,0.806215,0.209678,0.942458,0.039787,0.861404,0.789876,0.330517,0.89754,0.895247,0.622928,0.003992,0.11206,0.105758,0.612376,0.286242,0.120777,0.842086,0.613656,0.332841,0.810769,0.635109,0.85089,0.0310508,0.470168,0.42992,0.882465,0.0430778,0.496904,0.871495,0.144618,0.113103,0.67771,0.354297,0.0555613,0.717497,0.215701,0.845437,0.0480135,0.11324,0.740685,0.670941,0.117232,0.852745,0.7767,0.729608,0.138987,0.897477,0.571694,0.752643,0.230318,0.382464,0.387752,0.0812081,0.413514,0.85792,0.511128,0.29598,0.900998,0.00803216,0.167475,0.0456161,0.121135,0.845184,0.399913,0.176696,0.562681,0.615613,0.0221336,0.610695,0.728854,0.762818,0.281636,0.846086,0.615563,0.0583354,0.575694,0.75455,0.955812,0.147389,0.507192,0.186131,0.529852,0.894944,0.267339,0.943367,0.752864,0.778467,0.239346,0.653862,0.786499,0.406821,0.699478,0.907634,0.252005,0.099391,0.0843304,0.814686,0.715004,0.106464,0.425381,0.443858,0.869282,0.707017,0.289945,0.484845,0.765352,0.865639,0.239395,0.721164,0.0130276,0.746587,0.907295,0.54288,0.641532,0.174634,0.486247,0.394396,0.953101,0.725593,0.0482583,0.7396,0.132414,0.747737,0.647234,0.384419,0.847127,0.731565,0.199105,0.562132,0.838029,0.624486,0.00599016,0.707311,0.331502,0.295935,0.192156,0.0968545,0.161574,0.431551,0.818019,0.174602,0.178139,0.725314,0.717481,0.81967,0.899948,0.203728,0.214066,0.853049,0.929321,0.262325,0.592649,0.0617342,0.0100612,0.239884,0.446153,0.857189,0.971448,0.645258,0.419321,0.809477,0.269744,0.425311,0.516788,0.601246,0.721246,0.708944,0.698101,0.882819,0.140496,0.51612,0.057421,0.318634,0.241434,0.774902,0.138305,0.141382,0.97863,0.352371,0.994431,0.907951,0.614696,0.587081,0.969685,0.624757,0.826964,0.415838,0.481946,0.798413,0.061096,0.901267,0.60789,0.33084,0.326577,0.124678,0.932086,0.0478229,0.833622,0.630186,0.930642,0.974118,0.146306,0.988063,0.292752,0.38774,0.762966,0.431057,0.529122,0.741596,0.783428,0.523553,0.649547,0.398124,0.110633,0.619232,0.0228814,0.937598,0.0350704,0.504827,0.73601,0.0961664,0.406094,0.3439,0.427006,0.732671,0.468578,0.359092,0.780494,0.3022,0.989278,0.711136,0.276318,0.135584,0.6992,0.56907,0.523323,0.462166,0.000126784,0.052445,0.203762,0.783555,0.575998,0.853309,0.181679,0.686631,0.472541,0.204561,0.624229,0.507611,0.709388,0.360239,0.603778,0.115482,0.704139,0.0307838,0.848153,0.172717,0.389876,0.628647,0.474917,0.379154,0.339784,0.751234,0.514737,0.0389835,0.320304,0.0380609,0.501149,0.320431,0.0905059,0.704911,0.103986,0.666504,0.558219,0.285665,0.353135,0.0307602,0.490226,0.977364,0.538372,0.199614,0.337603,0.142149,0.315096,0.0417422,0.172933,0.163249,0.214459,0.562809,0.791896,0.689376,0.941962,0.131679,0.44061,0.4567,0.170663,0.760914,0.494761,0.671812,0.0813455,0.585266,0.376723,0.185331,0.25177,0.934942,0.470997,0.604905,0.965702,0.961222,0.582269,0.504074,0.160836,0.919872,0.646223,0.475932,0.961614,0.819156,0.63918,0.176074,0.381965,0.431076,0.865449,0.323927,0.562755,0.30606,0.780626,0.733418,0.0669741,0.275387,0.40523,0.14832,0.860654,0.781953,0.333651,0.112424,0.716895,0.804648,0.717329,0.682597,0.76587,0.299598,0.18667,0.926706,0.21947,0.832893,0.402638,0.181085,0.652049,0.041818,0.357158,0.0340134,0.472894,0.222608,0.35794,0.0356493,0.528668,0.138567,0.769068,0.595642,0.413954,0.174298,0.743961,0.274607,0.95625,0.0776122,0.387031,0.673145,0.88226,0.10436,0.355742,0.64813,0.403958,0.542412,0.574836,0.623429,0.375305,0.977474,0.804514,0.0273534,0.0192923,0.161672,0.0613668,0.492186,0.38428,0.419307,0.527835,0.912947,0.557874,0.296903,0.508589,0.971828,0.471201,0.25255,0.246435,0.427451,0.330162,0.633466,0.100596,0.212422,0.737826,0.456338,0.860553,0.141785,0.998749,0.435389,0.765213,0.374054,0.412863,0.569727,0.401407,0.432155,0.731399,0.462774,0.924342,0.115679,0.882081,0.452177,0.0286266,0.439955,0.74908,0.537216,0.411782,0.220281,0.789766,0.658217,0.647732,0.119928,0.291683,0.748328,0.332351,0.0295096,0.204665,0.192903,0.171294,0.203415,0.628292,0.936508,0.577468,0.0411554,0.506235,0.978876,0.473311,0.237634,0.44165,0.397653,0.353313,0.323731,0.84983,0.38194,0.763686,0.59891,0.919155,0.175468,0.819191,0.708921,0.833685,0.466922,0.828849,0.125369,0.21525,0.1612,0.154878,0.419916,0.354103,0.326173,0.62333,0.982395,0.26268,0.200799,0.0235508,0.768915,0.179674,0.496862,0.00654862,0.621324,0.894514,0.359862,0.945055,0.744344,0.741801,0.70874,0.343254,0.660956,0.884208,0.162444,0.369877,0.717894,0.629366,0.198726,0.843263,0.844617,0.359926,0.998141,0.264532,0.714029,0.324314,0.887862,0.696425,0.586994,0.088661,0.719976,0.355909,0.268335,0.216837,0.362457,0.889659,0.111352,0.722319,0.834714,0.855696,0.46412,0.543454,0.198949,0.125076,0.427663,0.361393,0.494953,0.145556,0.99076,0.69368,0.988819,0.835377,0.0536058,0.98696,0.0999087,0.767635,0.311274,0.987771,0.46406,0.898268,0.076432,0.184036,0.254176,0.344767,0.400873,0.616634,0.234426,0.512225,0.338953,0.0691403,0.36792,0.803073,0.612594,0.56687,0.928149,0.0402571,0.928263,0.423102,0.185814,0.919023,0.116782,0.174633,0.754399,0.170387,0.161593,0.854308,0.938022,0.472867,0.842079,0.402083,0.371134,0.918511,0.586118,0.625311,0.263278,0.986991,0.241945,0.497705,0.499216,0.580897,0.566845,0.867137,0.38397,0.179439,0.434006,0.312119,0.219697,0.362269,0.735221,0.40551,0.281292,0.852002,0.580143,0.035691,0.0223895,0.741736,0.889999,0.960412,0.214603,0.732078,0.362494,0.585737,0.650589,0.948613,0.211048,0.913867,0.935604,0.452993,0.411572,0.43482,0.0338902,0.978417,0.301957,0.41786,0.157856,0.735963,0.729979,0.377553,0.0982322,0.4652,0.783063,0.379524,0.317202,0.363206,0.415215,0.339591,0.104941,0.305214,0.300003,0.319544,0.0372916,0.662498,0.905281,0.68788,0.611111,0.116329,0.601747,0.546715,0.569322,0.0133191,0.981535,0.603212,0.991736,0.283492,0.0210722,0.149592,0.0194553,0.751051,0.527144,0.117688,0.216251,0.310207,0.497212,0.533453,0.673412,0.912427,0.873044,0.778354,0.21764,0.173047,0.0978977,0.254932,0.835545,0.00317871,0.942812,0.446656,0.119508,0.54456,0.99337,0.68883,0.557879,0.974905,0.292042,0.549614,0.258398,0.313114,0.699206,0.277853,0.0641649,0.22635,0.395541,0.280416,0.536557,0.892752,0.813868,0.209969,0.805179,0.686912,0.988323,0.0228189,0.859959,0.0862207,0.277751,0.695504,0.0893994,0.220563,0.14216,0.208907,0.765123,0.13553,0.897737,0.323001,0.110436,0.189778,0.872616,0.368833,0.502892,0.571822,0.646686,0.567057,0.798172,0.0422267,0.847473,0.334729,0.934979,0.661341,0.544698,0.740157,0.348253,0.533021,0.762976,0.208213,0.619242,0.0407269,0.903717,0.708641,0.26129,0.0458772,0.917548,0.0264127,0.181408,0.815285,0.349414,0.291843,0.00506346,0.22203,0.660677,0.507956,0.793852,0.307363,0.0750126,0.592024,0.34959,0.922485,0.926753,0.284568,0.583826,0.471451,0.0247257,0.93208,0.00447183,0.787702,0.140292,0.623714,0.828429,0.0440095,0.332355,0.0897188,0.0898867,0.249903,0.116131,0.271294,0.0651884,0.465546,0.563137,0.0702519,0.687575,0.223814,0.578207,0.481427,0.531177,0.65322,0.0734509,0.880766,0.575705,0.00020338,0.165335,0.159532,0.471654,0.19006,0.0916112,0.476126,0.977762,0.231903,0.0998396,0.806191,0.275913,0.432194,0.89591,0.3658,0.682098,0.0120413,0.637094,0.747286,0.477587,0.200231,0.817538,0.165162,0.424046,0.395746,0.64659,0.955222,0.0489656,0.72004,0.835989,0.624671,0.720244,0.00132339,0.784203,0.191898,0.191384,0.875814,0.668024,0.169146,0.107717,0.767863,0.975337,0.38363,0.200058,0.871247,0.74943,0.882155,0.883288,0.386524,0.629442,0.360875,0.586755,0.44698,0.526037,0.0108008,0.842725,0.172627,0.966023,0.891691,0.892667,0.802012,0.516362,0.612911,0.803335,0.300565,0.804809,0.994719,0.176378,0.472832,0.163865,0.284096,0.240696,0.139202,0.667726,0.440753,0.0104488,0.417156,0.322909,0.893737,0.803679,0.95235,0.254612,0.390435,0.39933,0.780649,0.401236,0.242056,0.953276,0.367259,0.133747,0.845943,0.169271,0.650109,0.458854,0.972606,0.950673,0.263663,0.967325,0.127051,0.736495,0.13119,0.411147,0.977191,0.270392,0.0788728,0.417944,0.280841,0.496028,0.740853,0.174577,0.299708,0.693203,0.429189,0.690143,0.0925331,0.209839,0.0913783,0.334589,0.163115,0.458637,0.468335,0.00905763,0.627908,0.118444,0.467912,0.600514,0.0691169,0.731574,0.567838,0.196168,0.468069,0.699028,0.607315,0.44526,0.96942,0.686188,0.863204,0.25026,0.182217,0.604056,0.424838,0.481924,0.297259,0.854027,0.172067,0.389792,0.0638657,0.263445,0.724381,0.22698,0.722083,0.192716,0.236038,0.34999,0.31116,0.703949,0.950504,0.380277,0.435524,0.518342,0.576445,0.903593,0.21737,0.18376,0.348853,0.18679,0.869948,0.212057,0.43705,0.052165,0.816113,0.861888,0.534089,0.113372,0.715915,0.706157,0.503164,0.779781,0.969602,0.227545,0.00676118,0.691685,0.420261,0.242799,0.0416748,0.731421,0.946748,0.992179,0.111698,0.382272,0.510521,0.688143,0.285865,0.727891,0.871903,0.634718,0.914681,0.741851,0.846775,0.351731,0.794016,0.662888,0.213619,0.328106,0.77626,0.929535,0.0342624,0.279425,0.709316,0.0038645,0.50697,0.716077,0.695549,0.927231,0.958876,0.737224,0.658652,0.905624,0.729402,0.77035,0.287897,0.239923,0.458492,0.573762,0.967814,0.330395,0.20848,0.882495,0.0722464,0.0552551,0.234226,0.866263,0.718143,0.447846,0.194369,0.494403,0.37738,0.228631,0.773828,0.0866961,0.232496,0.280798,0.802773,0.928045,0.208029,0.761649,0.665269,0.866681,0.667273,0.394671,0.637031,0.95517,0.634594,0.0955234,0.528932,0.602408,0.425918,0.737412,0.484903,0.498165,0.792667,0.719129,0.364428,0.51081,0.166974,0.558796,0.00521355,0.544355,0.787427,0.779042,0.631051,0.0199228,0.0598395,0.433824,0.947967,0.267869,0.195473,0.613236,0.13455,0.862746,0.00790702,0.771581,0.817916,0.642501,0.867104,0.346848,0.244909,0.293023,0.0842601,0.729812,0.791188,0.876927,0.448941,0.155616,0.387737,0.615915,0.714412,0.392951,0.16027,0.501839,0.171992,0.791321,0.521762,0.231832,0.225145,0.46973,0.499701,0.420618,0.0829656,0.634251,0.283364,0.0908726,0.405832,0.10128,0.733374,0.272936,0.448129,0.978283,0.565959,0.532389,0.708095,0.357147,0.409316,0.157036,0.512763,0.797053,0.772951,0.227175,0.190004,0.933222,0.729014,0.361997,0.724542,0.250776,0.593829,0.949687,0.720506,0.0935293,0.370305,0.803471,0.72778,0.653669,0.894344,0.133612,0.754949,0.627718,0.406548,0.203078,0.606001,0.972508,0.735467,0.314096,0.329655,0.144782,0.471132,0.842417,0.941836,0.244083,0.0695921,0.13184,0.177304,0.798606,0.493836,0.901847,0.0493821,0.0876649,0.851534,0.769888,0.181194,0.221839,0.573359,0.908974,0.875508,0.467703,0.0425862,0.630457,0.0954203,0.449134,0.833535,0.701421,0.421642,0.569002,0.0155164,0.751297,0.713784,0.486648,0.593714,0.65562,0.730731,0.663306,0.787459,0.908035,0.461912,0.281296,0.809882,0.511294,0.36896,0.661416,0.281182,0.550155,0.883255,0.854541,0.459129,0.758763,0.322244,0.501715,0.389221,0.417664,0.95085,0.222756,0.119085,0.372492,0.791758,0.134601,0.123788,0.505542,0.621249,0.717502,0.161161,0.35198,0.380809,0.948621,0.260016,0.842721,0.229916,0.0698983,0.354015,0.598877,0.731315,0.635197,0.149031,0.61457,0.489738,0.60816,0.373334,0.811982,0.109875,0.762554,0.229646,0.0607252,0.98531,0.348731,0.433217,0.777068,0.483333,0.557005,0.282609,0.104582,0.274508,0.443771,0.456563,0.655316,0.392391,0.716579,0.498037,0.622307,0.786477,0.852052,0.221184,0.517792,0.48725,0.370215,0.132362,0.976988,0.978376,0.505696,0.78897,0.0882511,0.26825,0.0186163,0.148976,0.25356,0.367348,0.582193,0.0306283,0.85068,0.139198,0.313238,0.955262,0.413706,0.757009,0.411825,0.0690226,0.1494,0.128404,0.56706,0.771707,0.914881,0.419112,0.992891,0.432672,0.906362,0.363107,0.565034,0.88335,0.341482,0.0707296,0.67232,0.429733,0.33898,0.690936,0.57871,0.59254,0.058284,0.160903,0.623168,0.908964,0.300101,0.936406,0.864227,0.713808,0.693415,0.276052,0.78283,0.842815,0.404456,0.34989,0.614522,0.319336,0.769002,0.607413,0.752008,0.675364,0.97052,0.317043,0.558714,0.312002,0.387772,0.231034,0.741735,0.726752,0.921971,0.320445,0.319292,0.980255,0.481348,0.94246,0.889219,0.781449,0.878866,0.753446,0.495257,0.572281,0.0294977,0.278087,0.415096,0.433953,0.627977,0.0296175,0.75329,0.396979,0.637031,0.505298,0.0723434,0.60755,0.822341,0.631058,0.919552,0.210113,0.862092,0.661288,0.936865,0.784062,0.981733,0.256157,0.764317,0.463081,0.198617,0.653536,0.24453,0.0774833,0.406982,0.739787,0.649764,0.436479,0.0178746,0.0648602,0.870433,0.645852,0.0944777,0.623723,0.042831,0.731508,0.129021,0.115174,0.339059,0.951362,0.746232,0.258611,0.161475,0.608324,0.919899,0.0983395,0.392386,0.901632,0.354496,0.156703,0.364713,0.553113,0.810239,0.609243,0.630596,0.21722,0.349031,0.280361,0.6537,0.366905,0.345221,0.524133,0.0127572,0.439698,0.147855,0.0555882,0.171207,0.276876,0.170763,0.510266,0.228238,0.916994,0.768877,0.389713,0.525318,0.688776,0.488052,0.917704,0.590407,0.842549,0.0744064,0.95512,0.395662,0.884645,0.564364,0.0262577,0.101865,0.913395,0.306618,0.755565,0.2803,0.651839,0.279698,0.293057,0.0915376,0.427553,0.348646,0.262744,0.704429,0.519408,0.77301,0.932667,0.436402,0.541887,0.32238,0.96172,0.230662,0.810433,0.879424,0.82107,0.652981,0.95383,0.77619,0.0486429,0.838475,0.340554,0.0749006,0.940341,0.253948,0.381519,0.695906,0.534249,0.033358,0.975603,0.827306,0.124896,0.403156,0.175951,0.38764,0.107585,0.695359,0.16065,0.0402529,0.131762,0.702537,0.362633,0.0934821,0.933199,0.173066,0.972906,0.754268,0.826047,0.926736,0.530458,0.87469,0.765211,0.871012,0.949591,0.705552,0.124961,0.33111,0.401458,0.659209,0.364468,0.377061,0.486515,0.489364,0.780217,0.662467,0.877004,0.887803,0.357826,0.0376536,0.928056,0.489588,0.74019,0.290689,0.58307,0.673389,0.463755,0.555976,0.427657,0.289802,0.482712,0.958116,0.164493,0.247924,0.829128,0.114084,0.953476,0.954089,0.445194,0.354934,0.613298,0.809662,0.731995,0.0998133,0.299026,0.512212,0.76228,0.176029,0.400015,0.120106,0.213683,0.328071,0.609694,0.953873,0.61876,0.192765,0.627262,0.0825145,0.748741,0.054919,0.372317,0.231453,0.0130347,0.53681,0.479377,0.842163,0.650893,0.432853,0.796251,0.0960872,0.787787,0.409549,0.905749,0.519782,0.509363,0.204775,0.031994,0.271643,0.380804,0.432009,0.391749,0.594487,0.76008,0.0014435,0.54836,0.378839,0.194208,0.175621,0.461354,0.942949,0.230541,0.833671,0.174402,0.243575,0.37048,0.65378,0.0857377,0.0213738,0.0866326,0.881989,0.117461,0.87442,0.291538,0.02321,0.394201,0.800901,0.227985,0.426195,0.0725433,0.608788,0.858204,0.464292,0.203275,0.618284,0.465736,0.751635,0.997124,0.659944,0.927256,0.458478,0.602893,0.157797,0.292148,0.777295,0.401372,0.662629,0.431075,0.487109,0.684002,0.517708,0.369098,0.801463,0.392127,0.660636,0.824674,0.786328,0.461537,0.0526581,0.212524,0.53408,0.661447,0.070728,0.998373,0.864721,0.689012,0.464108,0.616356,0.686136,0.124052,0.543612,0.144613,0.726946,0.701409,0.436762,0.504241,0.102781,0.0993904,0.935316,0.58989,0.783393,0.453023,0.958988,0.584856,0.84515,0.619625,0.40953,0.631479,0.0811619,0.462188,0.844002,0.615242,0.123634,0.91473,0.613615,0.988356,0.603743,0.0777233,0.604712,0.289878,0.201776,0.148324,0.434492,0.928721,0.849733,0.871254,0.432962,0.952513,0.970644,0.368278,0.542403,0.754037,0.821302,0.501392,0.338893,0.666452,0.121017,0.748423,0.297931,0.202178,0.210611,0.141933,0.817421,0.334245,0.0566637,0.431036,0.322601,0.660406,0.508759,0.927313,0.950285,0.710535,0.0756374,0.384777,0.639256,0.92537,0.25603,0.0722179,0.877884,0.226674,0.440496,0.420287,0.980711,0.261798,0.921679,0.319604,0.92825,0.0426954,0.068027,0.22618,0.244874,0.278638,0.368114,0.0622946,0.612883,0.424777,0.49333,0.935484,0.0851837,0.00208909,0.862798,0.0354686,0.712624,0.938435,0.420245,0.351879,0.863805,0.676276,0.424097,0.741689,0.90295,0.864593,0.161976,0.883661,0.126391,0.0836547,0.203265,0.0546405,0.12635,0.271292,0.280821,0.371224,0.54993,0.648934,0.433518,0.162813,0.0737118,0.926849,0.0982976,0.158896,0.928938,0.961095,0.194364,0.641561,0.89953,0.61461,0.993441,0.763336,0.290885,0.417538,0.505024,0.193835,0.282132,0.667,0.077496,0.408523,0.750655,0.280761,0.463163,0.877005,0.552053,0.743984,0.248229,0.101983,0.392918,0.681747,0.264796,0.46663,0.608596,0.363094,0.625526,0.537534,0.324189,0.81989,0.179095,0.223719,0.434499,0.172536,0.987055,0.725384,0.590074,0.492079,0.919219,0.872206,0.15908,0.996715,0.280729,0.909735,0.277476,0.743892,0.78674,0.829529,0.487875,0.0349687,0.931512,0.880794,0.716716,0.196308,0.347424,0.325312,0.559402,0.972949,0.862846,0.88359,0.792839,0.0419409,0.10731,0.227338,0.214477,0.0943644,0.952722,0.804551,0.586444,0.871942,0.676757,0.745523,0.868657,0.957486,0.655258,0.146134,0.701377,0.441998,0.975663,0.189253,0.476966,0.907175,0.0700462,0.193683,0.103483,0.41747,0.518995,0.662885,0.390419,0.38184,0.546475,0.183258,0.423781,0.653785,0.410596,0.638258,0.748149,0.363319,0.442809,0.334593,0.23526,0.119567,0.0801164,0.103918,0.0770522,0.735375,0.250051,0.778429,0.177372,0.225715,0.967682,0.654339,0.13289,0.0377282,0.848021,0.236373,0.455198,0.367016,0.899258,0.845617,0.748856,0.445733,0.0288754,0.172638,0.0995185,0.439472,0.810896,0.847668,0.80279,0.253705,0.182261,0.0380506,0.373272,0.262378,0.141968,0.450324,0.997752,0.39202,0.228754,0.175124,0.617734,0.196436,0.829463,0.750624,0.234164,0.677484,0.986997,0.689362,0.0445003,0.886255,0.534979,0.793357,0.331988,0.563855,0.965994,0.431507,0.00332631,0.77689,0.279175,0.806116,0.0305956,0.461436,0.844167,0.403868,0.723813,0.986135,0.854192,0.721565,0.378155,0.0829455,0.89669,0.995889,0.279381,0.726153,0.746513,0.513545,0.403637,0.733511,0.202907,0.448137,0.619766,0.737886,0.241494,0.951754,0.301741,0.207488,0.383261,0.305067,0.984378,0.662436,0.111184,0.0149739,0.123872,0.955351,0.418842,0.847685,0.941486,0.273033,0.569251,0.319641,0.355979,0.465941,0.315531,0.63536,0.192093,0.0620444,0.148905,0.59573,0.795555,0.351812,0.0438679,0.41532,0.0896985,0.285362,0.367074,0.39144,0.49285,0.750336,0.696507,0.477228,0.412771,0.807691,0.492202,0.536643,0.763042,0.911044,0.384329,0.704528,0.184077,0.953579,0.0241691,0.540056,0.41952,0.3397,0.175416,0.611613,0.401744,0.324322,0.207344,0.197299,0.676134,0.251212,0.61262,0.765832,0.536573,0.979694,0.157272,0.0294235,0.73003,0.853779,0.506652,0.142801,0.661469,0.998854,0.679444,0.424511,0.909898,0.0637729,0.129039,0.093975,0.0173523,0.153208,0.634031,0.436872,0.492908,0.809448,0.0484856,0.894652,0.133769,0.255829,0.0919512,0.809903,0.507041,0.704571,0.575735,0.0436146,0.684265,0.733007,0.0730381,0.414295,0.586785,0.57969,0.557096,0.248255,0.578544,0.23654,0.672765,0.488442,0.300313,0.801804,0.582417,0.317665,0.955012,0.216448,0.754538,0.447919,0.0258953,0.803023,0.342571,0.159664,0.0588526,0.434522,0.969567,0.565894,0.139093,0.545302,0.609508,0.823358,0.278309,0.682547,0.237653,0.865094,0.262236,0.794749,0.113349,0.84078,0.0312886,0.786114,0.329222,0.331601,0.587918,0.911638,0.649267,0.54293,0.128086,0.403804,0.99085,0.153981,0.206827,0.333421,0.313646,0.26568,0.767943,0.283213,0.831574,0.907037,0.828515,0.441082,0.730395,0.106824,0.123629,0.968048,0.971919,0.385865,0.762796,0.0852678,0.226645,0.794085,0.871382,0.555867,0.125686,0.459301,0.467505,0.774953,0.00223091,0.595591,0.178757,0.99308,0.749573,0.385585,0.326501,0.0632182,0.651265,0.0944446,0.346431,0.482838,0.00148124,0.174947,0.923921,0.731876,0.281771,0.0475492,0.699924,0.25369,0.433414,0.46272,0.338958,0.66006,0.256806,0.21034,0.215927,0.382492,0.66964,0.683432,0.157445,0.671871,0.279023,0.336203,0.664952,0.0285957,0.721787,0.991453,0.091814,0.373052,0.0858978,0.438245,0.85589,0.087379,0.613192,0.779811,0.819255,0.894963,0.82736,0.519179,0.148653,0.260774,0.9819,0.48761,0.920834,0.238705,0.69795,0.13676,0.621197,0.367591,0.820192,0.778642,0.039462,0.0992153,0.114845,0.704414,0.127811,0.836632,0.695867,0.219625,0.209684,0.781765,0.65787,0.0655739,0.869144,0.271062,0.845385,0.688399,0.166025,0.672744,0.207578,0.314678,0.933518,0.189478,0.802288,0.854352,0.428183,0.500238,0.991112,0.0493803,0.867828,0.811304,0.828023,0.90729,0.91052,0.942868,0.611704,0.0383306,0.7795,0.307571,0.257956,0.989184,0.089336,0.915826,0.0547577,0.95848,0.186888,0.900142,0.646879,0.352913,0.572886,0.854457,0.667591,0.506405,0.0439346,0.469879,0.360757,0.472118,0.970116,0.351869,0.521498,0.837945,0.163173,0.349521,0.745235,0.0736924,0.292388,0.35694,0.112023,0.0718883,0.664511,0.369979,0.0610721,0.753847,0.285805,0.11583,0.712327,0.472693,0.015972,0.359205,0.825606,0.588858,0.213662,0.493196,0.0952633,0.257597,0.963075,0.45602,0.729714,0.933191,0.807889,0.251212,0.771136,0.971062,0.600733,0.516372,0.0447539,0.893121,0.873311,0.156777,0.96501,0.537822,0.526756,0.0260817,0.291669,0.81256,0.141912,0.00399552,0.285253,0.157884,0.363201,0.110858,0.746742,0.576863,0.604054,0.842005,0.834459,0.567129,0.298025,0.564174,0.500321,0.105914,0.815386,0.271457,0.0769753,0.416118,0.787829,0.121729,0.30924,0.66114,0.278506,0.274249,0.198962,0.805262,0.300331,0.490631,0.617822,0.442243,0.494627,0.903074,0.600126,0.857827,0.0139323,0.346868,0.43469,0.617987,0.188873,0.269149,0.185116,0.486899,0.833323,0.685436,0.592812,0.648708,0.956893,0.669788,0.0648269,0.744722,0.791517,0.374067,0.405862,0.0700229,0.648316,0.604824,0.875285,0.948647,0.095455,0.493106,0.39089,0.590082,0.39618,0.991016,0.447909,0.410113,0.337884,0.882599,0.0280996,0.526757,0.151748,0.213215,0.0136559,0.985071,0.898652,0.606468,0.633779,0.855545,0.276256,0.698606,0.600267,0.0677726,0.072673,0.00612891,0.137796,0.720989,0.610953,0.0130801,0.669636,0.706408,0.506186,0.060526,0.296489,0.902367,0.0515419,0.744398,0.31248,0.389426,0.626997,0.340579,0.916183,0.778745,0.553795,0.929839,0.763816,0.452447,0.536307,0.397596,0.307992,0.812563,0.0962019,0.908259,0.880336,0.168875,0.914388,0.0181313,0.889864,0.525341,0.0312114,0.5595,0.231748,0.537398,0.620026,0.528238,0.439765,0.671568,0.272636,0.752244,0.0609942,0.899633,0.0928235,0.977177,0.678378,0.646618,0.907017,0.442194,0.0990648,0.443324,0.83979,0.407057,0.255887,0.935992,0.315316,0.136223,0.104867,0.229703,0.154354,0.994731,0.755044,0.185566,0.554231,0.986793,0.722963,0.174257,0.51503,0.162728,0.845826,0.787666,0.914972,0.90682,0.687299,0.00779556,0.883997,0.365677,0.654414,0.791014,0.807871,0.753479,0.234338,0.647661,0.160535,0.490225,0.583653,0.475851,0.626448,0.68852,0.705554,0.780802,0.68325,0.460598,0.966368,0.237482,0.447391,0.689331,0.411739,0.962421,0.852059,0.257565,0.750087,0.767031,0.164384,0.437386,0.774827,0.0483815,0.803063,0.429241,0.839395,0.610934,0.182719,0.0737334,0.258595,0.343254,0.563959,0.842248,0.819105,0.190407,0.530768,0.524659,0.971209,0.214019,0.985258,0.937577,0.4515,0.432649,0.626909,0.863239,0.39507,0.478968,0.120804,0.145157,0.245999,0.285188,0.582543,0.0208262,0.333569,0.385606,0.450067,0.172965,0.996541,0.632786,0.246698,0.255136,0.97604,0.810657,0.0973847,0.795146,0.00106348,0.628153,0.319805,0.972273,0.842171,0.305063,0.90985,0.293672,0.737711,0.536759,0.156911,0.132781,0.0157266,0.277714,0.277938,0.261726,0.562902,0.860481,0.282552,0.896472,0.246088,0.732619,0.0694363,0.242629,0.365405,0.316134,0.497765,0.341446,0.126791,0.595149,0.136591,0.127855,0.223302,0.456396,0.100127,0.0654736,0.761459,0.00997738,0.359145,0.49917,0.546736,0.516056,0.631951,0.562463,0.79377,0.909889,0.824189,0.356672,0.770371,0.106741,0.253144,0.0164586,0.83936,0.32258,0.259087,0.204765,0.638715,0.756852,0.54621,0.765506,0.352001,0.682801,0.89336,0.575304,0.139197,0.993488,0.640777,0.900656,0.00346528,0.999922,0.399826,0.550201,0.515978,0.0317771,0.112664,0.309748,0.941666,0.936853,0.666421,0.712037,0.0435934,0.919564,0.728496,0.882953,0.242144,0.987583,0.0877179,0.880859,0.744435,0.633928,0.646365,0.0964362,0.31673,0.539725,0.67174,0.455927,0.533213,0.312517,0.356583,0.536678,0.312439,0.756409,0.0868798,0.828418,0.788186,0.199544,0.138166,0.729853,0.136396,0.804587,0.44189,0.17999,0.724151,0.170386,0.0629429,0.966295,0.157969,0.150661,0.847154,0.902403,0.784589,0.493519,0.99884,0.101319,0.0332444,0.670579,0.557246,0.566458,0.983096,0.913829,0.103136,0.295536,0.670238,0.190016,0.123954,0.458424,0.38956,0.26212,0.188276,0.525956,0.0667061,0.630166,0.705946,0.790857,0.800552,0.768889,0.757152,0.95852,0.91955,0.604306,0.860924,0.704139,0.0978254,0.859763,0.805458,0.13107,0.530343,0.362703,0.697527,0.513439,0.276532,0.800663,0.808975,0.94677,0.990679,0.932929,0.405194,0.380239,0.195048,0.59347,0.906195,0.261754,0.223636,0.612141,0.0526113,0.024188,0.38103,0.809763,0.982708,0.30058,0.41407,0.843632,0.00471855,0.511895,0.703395,0.810176,0.642965,0.233738,0.17288,0.340492,0.747177,0.449412,0.141156,0.556153,0.396182,0.131835,0.489081,0.801376,0.512074,0.68413,0.394846,0.418269,0.945884,0.618482,0.03041,0.998495,0.64267,0.41144,0.808259,0.625378,0.71202,0.222328,0.46901,0.716738,0.734224,0.172406,0.526914,0.377188,0.406144,0.699794,0.717681,0.153321,0.149206,0.858836,0.709474,0.545388,0.990671,0.198555,0.346764,0.502745,0.882685,0.74161,0.921014,0.828569,0.360092,0.951424,0.827064,0.00276177,0.362864,0.635323,0.62814,0.0748836,0.857651,0.0971505,0.791622,0.591875,0.269556,0.318536,0.969063,0.6757,0.0183301,0.686744,0.829022,0.167536,0.54558,0.538496,0.712924,0.536251,0.737051,0.0596878,0.0389964,0.619736,0.801297,0.96001,0.448305,0.161389,0.911434,0.275369,0.164151,0.274298,0.910692,0.792291,0.349182,0.768343,0.889441,0.140804,0.360217,0.158998,0.45934,0.32928,0.834698,0.47767,0.0160241,0.66372,0.645206,0.561604,0.202216,0.35813,0.0978555,0.939267,0.417818,0.136852,0.559003,0.219115,0.0968624,0.00730831,0.380504,0.00829682,0.282677,0.544655,0.282595,0.193369,0.336946,0.631777,0.961712,0.226388,0.772581,0.321929,0.385386,0.231921,0.65121,0.220084,0.70959,0.667234,0.883804,0.354796,0.228838,0.0860195,0.712927,0.326694,0.0252865,0.130745,0.463545,0.58429,0.34986,0.560408,0.591598,0.730364,0.568705,0.874276,0.275019,0.8513,0.0676448,0.611966,0.483077,0.0293568,0.838353,0.255658,0.351286,0.223739,0.487578,0.002496,0.443823,0.197169,0.66973,0.327626,0.551965,0.898568,0.413646,0.264892,0.225261,0.438932,0.395636,0.688807,0.023222,0.745496,0.249215,0.61482,0.475861,0.817919,0.489096,0.75088,0.669219,0.55674,0.362846,0.152296,0.586097,0.201199,0.407954,0.937384,0.424938,0.895532,0.93988,0.86876,0.0927014,0.609609,0.196386,0.644667,0.508177,0.610032,0.909559,0.733439,0.0489641,0.305195,0.422245,0.0721861,0.0506916,0.67146,0.687006,0.526552,0.489379,0.176102,0.277432,0.158598,0.732842,0.640278,0.310895,0.31894,0.841476,0.718849,0.256323,0.266414,0.614381,0.196203,0.135174,0.707082,0.805812,0.33156,0.351749,0.313989,0.941592,0.261308,0.0474276,0.990556,0.566503,0.469673,0.0627425,0.617195,0.141133,0.749749,0.143747,0.630512,0.925851,0.421179,0.789111,0.658693,0.0614563,0.100005,0.977632,0.902933,0.818854,0.233956,0.169347,0.433235,0.430158,0.304521,0.140317,0.23597,0.636081,0.492066,0.549959,0.577673,0.753374,0.597387,0.56823,0.319877,0.0670597,0.630972,0.937071,0.208193,0.380721,0.0808179,0.838705,0.306572,0.501997,0.627816,0.965265,0.563453,0.727821,0.942897,0.466386,0.546674,0.176853,0.635732,0.979909,0.607011,0.940253,0.120226,0.842981,0.576334,0.612292,0.39294,0.154008,0.365665,0.990326,0.722237,0.685542,0.0573861,0.35321,0.622613,0.265579,0.733931,0.703431,0.104284,0.0405022,0.205428,0.732099,0.00576675,0.768881,0.45992,0.948664,0.235267,0.00659458,0.125516,0.870999,0.986504,0.732527,0.811252,0.106729,0.575507,0.387586,0.719021,0.968447,0.541594,0.0846865,0.958773,0.263831,0.770229,0.0161596,0.617041,0.392842,0.281739,0.350971,0.0962732,0.386022,0.391473,0.301701,0.118122,0.39724,0.070582,0.578042,0.345904,0.305849,0.584637,0.47142,0.176848,0.57114,0.203947,0.9881,0.677869,0.779454,0.375686,0.396891,0.747902,0.91728,0.481577,0.706675,0.181111,0.251806,0.722835,0.798151,0.644648,0.00457321,0.149122,0.740921,0.390596,0.540596,0.0426219,0.508717,0.937836,0.113204,0.0867595,0.28374,0.419053,0.671396,0.75516,0.5959,0.242536,0.959107,0.584,0.920406,0.738561,0.959685,0.317296,0.486463,0.876965,0.798873,0.193138,0.0580757,0.0506792,0.915973,0.856227,0.695327,0.920546,0.00534931,0.436248,0.311141,0.545945,0.47887,0.819859,0.483781,0.592074,0.906618,0.76752,0.011126,0.578014,0.52268,0.607026,0.820551,0.481787,0.191026,0.740956,0.220348,0.150711,0.0582527,0.706811,0.0276761,0.857126,0.899949,0.0857518,0.907805,0.815921,0.941979,0.603132,0.736467,0.947328,0.0393798,0.0476084,0.493273,0.518249,0.867467,0.977054,0.110323,0.774085,0.744574,0.121449,0.3521,0.267254,0.728475,0.17265,0.749041,0.919501,0.913607,0.96939,0.0702118,0.971859,0.676201,0.0978879,0.828986,0.576149,0.18364,0.736791,0.392071,0.125618,0.339923,0.128538,0.0729464,0.379303,0.176146,0.566219,0.897552,0.0436132,0.543273,0.00787536,0.817699,0.287848,0.129324,0.169798,0.555102,0.857799,0.342449,0.304143,0.7773,0.256055,0.273533,0.847512,0.227915,0.949734,0.9454,0.0569002,0.525883,0.12904,0.793691,0.917953,0.254658,0.133614,0.0464911,0.327604,0.512917,0.222637,0.893824,0.41047,0.26625,0.437097,0.418345,0.0839489,0.724945,0.547669,0.253747,0.280047,0.405469,0.596196,0.58419,0.182769,0.852251,0.857723,0.0302809,0.0801655,0.807456,0.975681,0.137066,0.333339,0.10472,0.930757,0.251293,0.359378,0.0643711,0.297784,0.686982,0.577288,0.520421,0.580806,0.987758,0.786671,0.0179032,0.406103,0.87062,0.742848,0.953772,0.124367,0.0228943,0.359241,0.720563,0.607084,0.54201,0.572814,0.464807,0.572291,0.652979,0.272264,0.547971,0.790045,0.605603,0.652692,0.720802,0.856896,0.0120699,0.785173,0.15468,0.699052,0.362461,0.675101,0.279859,0.350219,0.461772,0.297762,0.756322,0.332392,0.0406095,0.710094,0.45676,0.0635038,0.0693348,0.177323,0.670588,0.611345,0.750137,0.135395,0.183635,0.403116,0.407659,0.731607,0.193161,0.013262,0.384298,0.913963,0.870158,0.396368,0.699136,0.0248375,0.0954207,0.0615978,0.699938,0.375279,0.411817,0.16171,0.673041,0.168139,0.494102,0.713651,0.878233,0.950862,0.777154,0.947568,0.128184,0.447742,0.558913,0.878321,0.583138,0.742548,0.281437,0.990797,0.474154,0.474598,0.00405879,0.858453,0.388562,0.874217,0.254821,0.0876981,0.899054,0.350242,0.149296,0.598992,0.725521,0.561113,0.760702,0.398562,0.729252,0.254805,0.112213,0.607485,0.205666,0.889368,0.555053,0.333851,0.33711,0.113965,0.212172,0.920248,0.856513,0.493609,0.911045,0.330668,0.968207,0.915103,0.189121,0.356768,0.78932,0.443942,0.444467,0.688374,0.794184,0.593763,0.287366,0.519705,0.154875,0.0480685,0.918267,0.884127,0.302873,0.0304805,0.491612,0.508539,0.919848,0.0466654,0.84239,0.256958,0.160631,0.0545616,0.177206,0.0171441,0.54817,0.0882504,0.347812,0.516377,0.00335375,0.536932,0.873145,0.792674,0.980874,0.317612,0.481048,0.775058,0.911375,0.768414,0.294763,0.06625,0.816483,0.21303,0.950377,0.119356,0.243511,0.44199,0.627895,0.163359,0.488655,0.470285,0.420317,0.649286,0.524846,0.597523,0.66643,0.0730167,0.685773,0.0142421,0.589394,0.689127,0.551175,0.462539,0.481801,0.532049,0.780151,0.962849,0.307107,0.691526,0.731263,0.60187,0.757776,0.547745,0.8149,0.708153,0.667101,0.0584111,0.150143,0.294996,0.22177,0.638798,0.765281,0.642087,0.288085,0.290127,0.23961,0.954515,0.363144,0.925384,0.968757,0.952538,0.614511,0.519932,0.415077,0.0963118,0.0519805,0.195228,0.0591604,0.359087,0.886753,0.790423,0.960957,0.644529,0.338168,0.775857,0.352682,0.00526913,0.834268,0.502825,0.300265,0.0560382,0.141624,0.0655457,0.698125,0.429709,0.355673,0.937736,0.384224,0.718817,0.86312,0.352981,0.671354,0.477631,0.872913,0.086431,0.573942,0.924893,0.281659,0.633103,0.28398,0.168412,0.423526,0.244937,0.812942,0.761694,0.020794,0.165624,0.766963,0.855062,0.668449,0.0672282,0.9111,0.810073,0.132774,0.609226,0.239782,0.488447,0.546961,0.624006,0.207264,0.410081,0.976987,0.878618,0.887712,0.8499,0.965049,0.461654,0.774793,0.246708,0.0947567,0.0587738,0.41512,0.518283,0.303711,0.228062,0.279977,0.324505,0.393685,0.04694,0.179567,0.0621349,0.114168,0.0906671,0.872208,0.246942,0.699893,0.11199,0.735389,0.246854,0.735996,0.942652,0.656935,0.712984,0.82127,0.544647,0.562884,0.786319,0.00630077,0.337677,0.0330269,0.101057,0.396451,0.448147,0.61934,0.700162,0.676209,0.899317,0.0246669,0.069894,0.946257,0.204234,0.132029,0.0604249,0.294901,0.00423714,0.307367,0.994794,0.116228,0.0427558,0.241648,0.852224,0.985408,0.898583,0.565207,0.806678,0.44323,0.128091,0.592998,0.449531,0.465768,0.626025,0.550588,0.862219,0.0741716,0.169928,0.562381,0.75038,0.0692449,0.587048,0.820274,0.0155017,0.791281,0.952303,0.0759266,0.0861824,0.95654,0.383294,0.080976,0.0727677,0.426049,0.322624,0.924992,0.411458,0.221207,0.490199,0.218136,0.664437,0.61829,0.811134,0.113968,0.0840583,0.437158,0.664556,0.946277,0.51133,0.834484,0.508658,0.26171,0.903729,0.0957055,0.0819844,0.919231,0.886987,0.0342875,0.995157,0.973169,0.990828,0.378451,0.0541454,0.0635954,0.8045,0.376769,0.988587,0.215958,0.597976,0.478786,0.434094,0.262413,0.0970765,0.245228,0.376381,0.181135,0.682386,0.0409366,0.127412,0.193716,0.875421,0.63607,0.455426,0.77915,0.731776,0.537411,0.69838,0.618763,0.571698,0.693538,0.591932,0.562526,0.0719883,0.646077,0.626121,0.876489,0.0228465,0.614708,0.0924463,0.620823,0.0934946,0.52654,0.883236,0.190571,0.771768,0.259617,0.371706,0.454154,0.300553,0.499118,0.64787,0.175974,0.135188,0.103296,0.955124,0.866964,0.640707,0.653504,0.485726,0.212406,0.347041,0.0776582,0.774931,0.41903,0.723735,0.401053,0.295518,0.746582,0.0157612,0.387965,0.367405,0.109256,0.914505,0.250641,0.299827,0.686272,0.510257,0.671533,0.140426,0.810811,0.170651,0.788296,0.986785,0.305839,0.891593,0.941909,0.172803,0.5323,0.595412,0.658529,0.744705,0.942454,0.736187,0.519637,0.361484,0.459923,0.92069,0.657002,0.206505,0.936451,0.0449666,0.573909,0.0457066,0.959471,0.82455,0.345533,0.645744,0.334807,0.0170663,0.78617,0.145618,0.187717,0.574466,0.132403,0.493556,0.466058,0.0743116,0.666359,0.998358,0.669724,0.324888,0.743063,0.612178,0.0610753,0.2627,0.973662,0.520998,0.18339,0.630664,0.727503,0.119841,0.675631,0.301412,0.165547,0.635102,0.125962,0.511081,0.280845,0.460769,0.528147,0.067015,0.606387,0.715864,0.641481,0.738791,0.20942,0.107539,0.813102,0.875779,0.105897,0.482826,0.200667,0.84896,0.0950043,0.261743,0.111661,0.0686662,0.78274,0.29505,0.69933,0.510243,0.414891,0.374961,0.811655,0.580438,0.0100625,0.937617,0.0915186,0.290908,0.398386,0.619665,0.357923,0.00477333,0.335529,0.999404,0.743564,0.54495,0.106943,0.556666,0.420729,0.21284,0.0394922,0.621396,0.0618002,0.134497,0.883139,0.173461,0.203163,0.665879,0.468511,0.902493,0.176122,0.883402,0.277454,0.987777,0.46384,0.287516,0.925394,0.555359,0.578424,0.323779,0.175024,0.936347,0.328553,0.510553,0.935751,0.0721166,0.0555028,0.0426935,0.628783,0.476232,0.255533,0.668275,0.0976277,0.317334,0.802771,0.980766,0.490794,0.00593414,0.646645,0.959305,0.908427,0.822767,0.842707,0.185881,0.810544,0.306547,0.473397,0.735938,0.861906,0.0518214,0.0597174,0.0369299,0.988168,0.38827,0.547483,0.923919,0.460387,0.602986,0.966613,0.0891694,0.0792177,0.222146,0.757444,0.176845,0.539479,0.560216,0.157612,0.0302738,0.56615,0.804257,0.989579,0.474577,0.627024,0.832287,0.660458,0.437569,0.138834,0.133855,0.173507,0.000739934,0.185676,0.233224,0.0376699,0.173845,0.621494,0.585153,0.097764,0.0818812,0.188139,0.0643767,0.171051,0.267357,0.286523,0.928495,0.444202,0.826002,0.48871,0.601814,0.856276,0.0548601,0.406071,0.845855,0.529437,0.0330952,0.678142,0.189895,0.470664,0.816976,0.32375,0.644171,0.817716,0.509426,0.877395,0.855385,0.683271,0.498889,0.440539,0.781035,0.58077,0.628678,0.845412,0.751821,0.896035,0.131934,0.680316,0.340237,0.957936,0.169026,0.94205,0.814212,0.223886,0.348121,0.660068,0.753323,0.381216,0.338209,0.943218,0.85188,0.155185,0.266968,0.496051,0.972901,0.776394,0.373446,0.828286,0.459665,0.872335,0.268825,0.2407,0.453106,0.897502,0.0861113,0.204927,0.793537,0.218045,0.885243,0.133774,0.175982,0.0542692,0.0758241,0.990194,0.278156,0.423945,0.650262,0.0314791,0.805162,0.988471,0.974697,0.657042,0.143656,0.241665,0.153094,0.116557,0.0180592,0.52654,0.944843,0.477724,0.398875,0.213668,0.718424,0.851981,0.11117,0.804535,0.0569083,0.904707,0.0225806,0.942151,0.038481,0.198563,0.99642,0.114305,0.188757,0.274576,0.538251,0.839019,0.306055,0.343412,0.82749,0.280752,0.000454585,0.971146,0.522418,0.153548,0.087703,0.540477,0.680088,0.0325461,0.0182011,0.0789634,0.246214,0.736625,0.930945,0.357384,0.54116,0.987853,0.262092,0.563741,0.930004,0.300573,0.762303,0.926425,0.414878,0.95106,0.201001,0.953128,0.790079,0.507056,0.296541,0.617568,0.787808,0.296995,0.588715,0.310226,0.450543,0.676418,0.850703,0.130632,0.708964,0.868904,0.209595,0.955178,0.605529,0.14054,0.312562,0.146689,0.128393,0.574654,0.71043,0.0583967,0.875227,0.472733,0.984821,0.290105,0.423793,0.185822,0.243233,0.213872,0.692878,0.539774,0.83144,0.480686,0.836769,0.420155,0.790912,0.287313,0.0965723,0.641615,0.417944,0.805536,0.51052,0.627539,0.760714,0.116049,0.768079,0.0732756,0.262738,0.896471,0.647929,0.973168,0.954868,0.523156,0.445901,0.939689,0.813261,0.869694,0.125511,0.0564936,0.0835655,0.818389,0.596267,0.915006,0.299075,0.433036,0.33516,0.0899875,0.720349,0.431733,0.731603,0.138293,0.237269,0.242122,0.765832,0.997982,0.358171,0.533911,0.0712577,0.620909,0.430382,0.719187,0.594076,0.385249,0.242343,0.0399769,0.324938,0.0556037,0.909671,0.450449,0.112097,0.993236,0.268838,0.708365,0.908242,0.567913,0.141401,0.243402,0.657901,0.86175,0.675135,0.389504,4.32152e-05,0.912404,0.631626,0.765875,0.910386,0.989797,0.299786,0.981643,0.610706,0.730167,0.700831,0.204782,0.115416,0.943174,0.244759,0.440355,0.998777,0.15443,0.890804,0.110875,0.147666,0.159642,0.819239,0.0559076,0.727555,0.960641,0.29931,0.385456,0.822391,0.974445,0.77496,0.822434,0.886848,0.406586,0.588309,0.797234,0.396383,0.888095,0.778878,0.00708943,0.618262,0.479708,0.211872,0.733679,0.422882,0.456631,0.174033,0.421659,0.61106,0.0648371,0.532534,0.758726,0.224479,0.351773,0.814634,0.952034,0.312414,0.113944,0.337491,0.134804,0.0883887,0.112451,0.957238,0.975237,0.519037,0.545547,0.772471,0.915421,0.433642,0.551349,0.92251,0.0519044,0.0310571,0.134382,0.785583,0.453939,0.591012,0.959616,0.875598,0.202073,0.0244531,0.408132,0.960799,0.248932,0.759905,0.775433,0.200967,0.0723187,0.889377,0.538457,0.207123,0.977766,0.650908,0.164361,0.953003,0.169945,0.709909,0.725474,0.0853659,0.143551,0.276823,0.00787587,0.195456,0.30788,0.142257,0.981038,0.761819,0.73327,0.940655,0.637417,0.935343,0.965108,0.0455489,0.896142,0.21404,0.805454,0.671575,0.415007,0.877773,0.560952,0.953464,0.0848955,0.538717,0.604372,0.249257,0.49172,0.774318,0.959166,0.217194,0.859684,0.102717,0.494017,0.867559,0.298172,0.801897,0.00981685,0.279211,0.563716,0.743087,0.219865,0.201133,0.678429,0.184973,0.246682,0.574571,0.399013,0.0521362,0.246146,0.814019,0.929909,0.807098,0.767483,0.0148043,0.345815,0.371856,0.264061,0.837535,0.146173,0.223227,0.054729,0.00585678,0.325944,0.548746,0.873416,0.624116,0.350644,0.883233,0.903327,0.91436,0.62632,0.123193,0.115493,0.304749,0.308166,0.362176,0.87932,0.707178,0.414312,0.125466,0.521198,0.344221,0.932564,0.288681,0.359025,0.278378,0.660537,0.623086,0.115913,0.80671,0.846313,0.170642,0.812567,0.172257,0.719389,0.685983,0.796373,0.0700321,0.569216,0.6997,0.984392,0.195536,0.822893,0.0998854,0.500285,0.131058,0.462061,0.379605,0.838237,0.876373,0.505071,0.359434,0.220594,0.437634,0.648116,0.579619,0.716013,0.308652,0.202705,0.831926,0.115363,0.0490179,0.00256862,0.92793,0.221275,0.721957,0.613913,0.0176479,0.791989,0.183129,0.717348,0.776381,0.378664,0.540241,0.876267,0.878949,0.671299,0.338328,0.258554,0.509536,0.214701,0.763625,0.86897,0.435295,0.201259,0.517086,0.0149138,0.917272,0.825739,0.217619,0.749198,0.941101,0.266637,0.751767,0.869031,0.487911,0.473724,0.482943,0.505559,0.265713,0.666072,0.222908,0.0420946,0.0447362,0.763149,0.918362,0.923685,0.434448,0.25669,0.182239,0.943984,0.471391,0.945864,0.812954,0.906686,0.147123,0.33004,0.9216,0.0643942,0.155779,0.139218,0.813592,0.0968803,0.405855,0.565359,0.965911,0.893767,0.0390824,0.448854,0.399326,0.304796,0.114926,0.622234,0.34689,0.159662,0.385382,0.265252,0.0833474,0.81983,0.521941,0.265586,0.763814,0.993332,0.21145,0.576768,0.900018,0.358573,0.906809,0.821618,0.422967,0.0625878,0.960836,0.236559,0.159468,0.366691,0.801918,0.125379,0.260457,0.841,0.574233,0.659783,0.145795,0.689159,0.282017,0.492686,0.848822,0.667399,0.757937,0.932169,0.487229,0.279879,0.197755,0.251043,0.273211,0.409205,0.827811,0.173229,0.767778,0.73462,0.994846,0.190745,0.797208,0.955682,0.427304,0.956676,0.322373,0.229221,0.0820548,0.58283,0.0702213,0.656288,0.242614,0.216017,0.345447,0.524631,0.708702,0.194269,0.192029,0.46664,0.126438,0.679258,0.746518,0.324193,0.930301,0.0197289,0.733399,0.758112,0.192957,0.501177,0.492732,0.187804,0.691922,0.28994,0.143486,0.119226,0.246615,0.465859,0.348447,0.32867,0.0486889,0.418668,0.984958,0.291303,0.634685,0.330406,0.815933,0.343387,0.524675,0.00796269,0.810027,0.651113,0.687221,0.556545,0.975306,0.617522,0.576274,0.708705,0.375634,0.769232,0.209882,0.868366,0.957035,0.901804,0.158306,0.100521,0.0210298,0.404921,0.566379,0.369477,0.733591,0.615068,0.788145,0.718549,0.906371,0.42283,0.0489549,0.722304,0.766218,0.57363,0.730267,0.576245,0.224742,0.417488,0.13279,0.200048,0.0350099,0.709064,0.908754,0.410644,0.478296,0.118636,0.279011,0.435331,0.0204396,0.437316,0.535852,0.0414694,0.842238,0.102231,0.410946,0.575829,0.717299,0.199092,0.294378,0.62367,0.621922,0.343333,0.345974,0.38814,0.916963,0.0762401,0.964385,0.141705,0.493728,0.0971752,0.341753,0.528738,0.80624,0.250507,0.939382,0.284536,0.369142,0.218392,0.719867,0.389582,0.655709,0.255718,0.431051,0.497946,0.357949,0.841998,0.073775,0.0752481,0.0410895,0.368153,0.698918,0.663012,0.711486,0.0448913,0.0511517,0.628448,0.121131,0.0155367,0.770153,0.614859,0.112712,0.111906,0.143597,0.918952,0.362413,0.0829787,0.203487,0.731555,0.301371,0.923354,0.121137,0.95708,0.179072,0.552189,0.455026,0.537021,0.394186,0.528801,0.612269,0.435276,0.896954,0.311187,0.0982875,0.60844,0.356078,0.149439,0.236888,0.47721,0.164976,0.00704168,0.092069,0.277688,0.118948,0.235666,0.196639,0.481361,0.318645,0.400127,0.212917,0.620016,0.32348,0.334054,0.577096,0.502553,0.886243,0.0321217,0.0395739,0.280429,0.560923,0.651843,0.715705,0.457877,0.96303,0.813993,0.0663169,0.319109,0.963432,0.303205,0.796319,0.128408,0.310247,0.888388,0.406096,0.429195,0.124053,0.602735,0.910556,0.442698,0.00286176,0.123473,0.0627136,0.326342,0.457527,0.639809,0.828895,0.34377,0.671931,0.868469,0.624199,0.232854,0.520312,0.339904,0.690731,0.483342,0.153896,0.757048,0.802451,0.117328,0.060253,0.59877,0.245736,0.3705,0.487157,0.651831,0.799695,0.611211,0.254567,0.710252,0.0539085,0.257428,0.833725,0.116622,0.583771,0.291252,0.756431,0.412665,0.635021,0.428362,0.281134,0.25922,0.661216,0.801446,0.599124,0.351946,0.284788,0.75302,0.108994,0.0872397,0.870348,0.169247,0.686009,0.116084,0.539747,0.173167,0.767915,0.339442,0.784377,0.0224819,0.049694,0.838286,0.27991,0.883419,0.954908,0.863681,0.17467,0.711339,0.276346,0.809692,0.139701,0.55748,0.068912,0.800916,0.358926,0.668036,0.152863,0.643715,0.421056,0.261857,0.730954,0.291404,0.431104,0.416964,0.407488,0.970851,0.59013,0.175403,0.310293,0.374507,0.197885,0.359987,0.212793,0.477796,0.243406,0.167701,0.341476,0.418076,0.87904,0.617822,0.227768,0.0187405,0.175302,0.29668,0.819657,0.534228,0.964716,0.97252,0.177943,0.385772,0.234377,0.908897,0.677176,0.66548,0.325861,0.0846639,0.636331,0.915991,0.260067,0.946624,0.290498,0.457953,0.306612,0.503292,0.935748,0.550017,0.670992,0.277225,0.968094,0.550032,0.895047,0.195862,0.568773,0.070349,0.492542,0.38843,0.604577,0.457258,0.360949,0.78252,0.84303,0.595326,0.691417,0.520206,0.260807,0.0172778,0.604869,0.897138,0.933269,0.864937,0.843762,0.223767,0.32289,0.150374,0.727059,0.258638,0.700391,0.398051,0.535862,0.668485,0.948083,0.430909,0.864347,0.516856,0.501258,0.356889,0.905285,0.105835,0.814147,0.266235,0.888356,0.657177,0.861561,0.579773,0.177382,0.122368,0.597051,0.782252,0.0195057,0.530319,0.647189,0.863268,0.754086,0.970078,0.0136422,0.481145,0.228716,0.714034,0.879196,0.764579,0.382519,0.827279,0.195488,0.246866,0.344135,0.696746,0.603755,0.249421,0.802581,0.417902,0.515655,0.690937,0.0750789,0.377216,0.27071,0.252461,0.499584,0.86776,0.0347129,0.51909,0.39808,0.681902,0.382358,0.152166,0.65198,0.396,0.633312,0.880696,0.110034,0.512508,0.645275,0.492553,0.339787,0.840762,0.739419,0.683922,0.537508,0.343174,0.933343,0.34009,0.761076,0.448998,0.0310266,0.836155,0.826214,0.301736,0.0886161,0.325798,0.169497,0.123329,0.844888,0.567577,0.805231,0.227246,0.719743,0.45721,0.623246,0.353055,0.337907,0.73328,0.865563,0.983181,0.225833,0.20535,0.823943,0.965251,0.889272,0.361452,0.308425,0.822615,0.701541,0.0695014,0.271613,0.732568,0.905656,0.0978279,0.0343045,0.994273,0.423626,0.203801,0.117602,0.268514,0.771378,0.922832,0.49576,0.491121,0.380043,0.119006,0.844176,0.717949,0.852286,0.709739,0.70113,0.0781187,0.915088,0.525074,0.0433701,0.804361,0.886525,0.351795,0.626976,0.588067,0.421297,0.898589,0.320635,0.326953,0.996417,0.354939,0.321226,0.420043,0.558741,0.438827,0.688558,0.330119,0.361659,0.184318,0.82124,0.741702,0.303325,0.665416,0.459651,0.155611,0.375155,0.160781,0.233729,0.290244,0.685855,0.2771,0.0946043,0.57238,0.628895,0.72158,0.160447,0.0501918,0.620169,0.481082,0.377145,0.616586,0.836021,0.698371,0.0366296,0.394762,0.137198,0.725187,0.724881,0.498858,0.909505,0.546122,0.24056,0.21283,0.211538,0.700211,0.368441,0.586693,0.860992,0.60217,0.876937,0.546847,0.87927,0.971541,0.119227,0.508165,0.693121,0.279675,0.558356,0.31329,0.760757,0.935501,0.929877,0.596778,0.633872,0.966506,0.99154,0.77107,0.691694,0.716422,0.269928,0.601199,0.262543,0.510487,0.814029,0.474081,0.210698,0.18247,0.0607745,0.0716899,0.78464,0.937711,0.618537,0.66391,0.909252,0.737764,0.172074,0.602374,0.0174388,0.73043,0.915664,0.778196,0.665932,0.84554,0.374974,0.299804,0.812047,0.366515,0.0708747,0.50374,0.0829362,0.340803,0.104939,0.345479,0.85129,0.918968,0.819561,0.0619882,0.101438,0.880335,0.133678,0.886078,0.818047,0.752215,0.549987,0.727299,0.489979,0.722062,0.329673,0.507418,0.452492,0.245336,0.285614,0.118424,0.090877,0.660588,0.418228,0.902924,0.0271022,0.489103,0.406664,0.110038,0.829905,0.511603,0.455518,0.681196,0.430571,0.275078,0.743184,0.532009,0.155414,0.876862,0.418087,0.97346,0.629077,0.968075,0.700759,0.119056,0.690136,0.0304317,0.626474,0.142628,0.275768,0.912088,0.261052,0.366645,0.572676,0.67928,0.269569,0.599778,0.168383,0.676233,0.709816,0.998288,0.187836,0.165334,0.679484,0.618407,0.440412,0.422668,0.150416,0.595826,0.29953,0.568503,0.569286,0.928607,0.536578,0.270045,0.0476632,0.226714,0.300477,0.674137,0.369342,0.576245,0.586225,0.630394,0.942891,0.158901,0.309674,0.212459,0.758679,0.478057,0.888692,0.468495,0.476346,0.0765276,0.633829,0.15583,0.694934,0.0742413,0.578498,0.84535,0.670067,0.878028,0.413854,0.239354,0.806635,0.950431,0.509399,0.854298,0.177145,0.809877,0.528435,0.546487,0.386122,0.114661,0.176881,0.329013,0.273561,0.486556,0.541472,0.0322403,0.964613,0.430164,0.500735,0.440959,0.506692,0.134564,0.596788,0.201626,0.208805,0.175286,0.0469766,0.878873,0.0533137,0.46083,0.118227,0.859948,0.411261,0.627626,0.714246,0.588407,0.437502,0.242681,0.134894,0.823624,0.357342,0.311775,0.152637,0.630903,0.798331,0.694109,0.663143,0.762944,0.124273,0.163879,0.203902,0.630965,0.298443,0.800691,0.832591,0.507248,0.975977,0.879568,0.386121,0.0292904,0.340398,0.504348,0.889239,0.751659,0.131973,0.603485,0.340066,0.569476,0.846166,0.47496,0.3931,0.203507,0.786735,0.545737,0.83441,0.585066,0.239846,0.497554,0.348009,0.364119,0.661432,0.551911,0.995084,0.959875,0.352602,0.827675,0.467123,0.328579,0.707243,0.853244,0.357869,0.0476409,0.357592,0.247108,0.7993,0.489565,0.850592,0.139366,0.0590407,0.696758,0.614326,0.452141,0.900266,0.401061,0.997877,0.734676,0.986126,0.237723,0.23223,0.334136,0.601843,0.893662,0.886047,0.596927,0.853537,0.238649,0.424602,0.32066,0.567228,0.131845,0.173904,0.925097,0.179486,0.531496,0.172205,0.978786,0.0210611,0.0227971,0.118152,0.0801017,0.719555,0.732478,0.532242,0.619821,0.133539,0.53012,0.354497,0.119665,0.767843,0.586727,0.4538,0.369686,0.480389,0.339847,0.966613,0.333926,0.578496,0.391215,0.654587,0.145724,0.52306,0.828491,0.0708209,0.702545,0.359987,0.243026,0.681332,0.381048,0.265823,0.799484,0.46115,0.985378,0.531962,0.993392,0.605199,0.6655,0.523512,0.959696,0.785165,0.291355,0.546423,0.238966,0.661041,0.0268116,0.578813,0.627654,0.360738,0.157309,0.0188682,0.0153244,0.303033,0.541928,0.843816,0.373854,0.244473,0.203803,0.61688,0.925805,0.584851,0.882702,0.725288,0.0460008,0.868081,0.25725,0.039393,0.473279,0.92275,0.562905,0.432975,0.707915,0.854261,0.979398,0.946881,0.515302,0.00620946,0.525693,0.142955,0.366947,0.683003,0.161824,0.382272,0.986036,0.703751,0.226087,0.35989,0.948225,0.42989,0.97677,0.874029,0.0147406,0.859472,0.599318,0.0607414,0.727553,0.856568,0.100134,0.200832,0.779318,0.66304,0.633808,0.487233,0.5173,0.613206,0.434113,0.0326018,0.619415,0.959807,0.175557,0.986362,0.64281,0.337381,0.368634,0.628846,0.0411322,0.594721,0.988736,0.989357,0.0246109,0.965506,0.863386,0.0393515,0.824978,0.462704,0.100093,0.552531,0.319272,0.200227,0.753364,0.0985895,0.863267,0.387172,0.585822,0.380567,0.000377197,0.0199358,0.413169,0.619792,0.979743,0.588726,0.606155,0.622552,0.926107,0.974789,0.251398,0.967239,0.56951,0.240134,0.956596,0.594121,0.20564,0.819982,0.633472,0.030618,0.282686,0.733565,0.583149,0.601958,0.933792,0.336513,0.700547,0.797059,0.723685,0.28637,0.177626,0.724062,0.306306,0.590795,0.343854,0.286048,0.179521,0.950009,0.908601,0.105628,0.924798,0.159999,0.0728667,0.494307,0.400133,0.0294625,0.0884281,0.605772,0.849445,0.7219,0.63639,0.132131,0.455466,0.21954,0.734089,0.389258,0.556053,0.434636,0.186317,0.279737,0.721006,0.363944,0.00379902,0.0273115,0.954739,0.347653,0.31336,0.13426,0.297662,0.22196,0.239888,0.222459,0.381959,0.312754,0.716767,0.782092,0.342217,0.805195,0.387864,0.191661,0.527095,0.0242547,0.323792,0.982561,0.243794,0.057881,0.371819,0.799847,0.492517,0.558136,0.0795843,0.213523,0.92208,0.0833833,0.240835,0.876819,0.431036,0.554194,0.011079,0.728698,0.776155,0.250967,0.951158,0.158114,0.563721,0.667925,0.940206,0.905938,0.473119,0.32807,0.0975994,0.000214639,0.352325,0.421392,0.982775,0.596119,0.479273,0.354594,0.395966,0.97179,0.91273,0.475551,0.185313,0.83481,0.558934,0.426148,0.711629,0.98997,0.980342,0.722708,0.718669,0.756497,0.973675,0.669826,0.914611,0.537396,0.337751,0.854817,0.443334,0.81087,0.182888,0.540933,0.811085,0.535213,0.962325,0.79386,0.131332,0.441598,0.148455,0.527298,0.413387,0.061185,0.00284902,0.5987,0.895995,0.561783,0.0248482,0.607625,0.551753,0.00519032,0.330333,0.270422,0.761687,0.304008,0.940248,0.676299,0.841404,0.277999,0.531116,0.284738,0.0888697,0.714004,0.825672,0.899955,0.249216,0.787997,0.693815,0.380548,0.229594,0.84227,0.907847,0.642982,0.903454,0.910696,0.241682,0.79945,0.472479,0.26653,0.407075,0.0242322,0.271721,0.737408,0.294654,0.033408,0.0414161,0.234903,0.709707,0.88282,0.512902,0.240823,0.167559,0.601772,0.954827,0.99323,0.501727,0.204043,0.781227,0.195542,0.584592,0.0108208,0.0378112,0.492438,0.653802,0.941266,0.403134,0.895485,0.740716,0.875613,0.162015,0.14779,0.899845,0.433736,0.885198,0.1945,0.467144,0.926614,0.429403,0.17685,0.809434,0.942305,0.417673,0.976993,0.544077,0.3725,0.970223,0.0458033,0.576543,0.75145,0.241345,0.161135,0.762271,0.279156,0.653573,0.416073,0.220422,0.0567075,0.311558,0.961137,0.932321,0.473573,0.108928,0.832166,0.907308,0.994126,0.026666,0.374452,0.92074,0.456069,0.551302,0.730174,0.398373,0.968975,0.707168,0.94245,0.341475,0.677391,0.988253,0.918018,0.42884,0.229598,0.0791527,0.191111,0.508754,0.732726,0.607184,0.729176,0.789433,0.918742,0.690314,0.721754,0.392314,0.799241,0.55392,0.299623,0.793367,0.580586,0.674074,0.714107,0.0366548,0.225376,0.444281,0.435028,0.194351,0.151449,0.377478,0.535827,0.82884,0.365732,0.453845,0.25768,0.59533,0.532997,0.448791,0.104084,0.265723,0.0559751,0.83326,0.0551565,0.974717,0.523574,0.776911,0.367031,0.322815,0.330831,0.666654,0.116182,0.911417,0.340728,0.830289,0.948072,0.566105,0.27457,0.3831,0.760456,0.426019,0.760578,0.296283,0.254859,0.12631,0.750127,0.512539,0.721639,0.283124,0.96133,0.825724,0.548848,0.017305,0.658984,0.604004,0.992022,0.182558,0.380915,0.359053,0.505373,0.711746,0.0257074,0.621555,0.623163,0.366436,0.451844,0.571235,0.93254,0.726415,0.954335,0.692996,0.152434,0.714913,0.989279,0.407293,0.841222,0.739406,0.919832,0.562862,0.0225304,0.881162,0.388585,0.571378,0.898467,0.0475693,0.175382,0.890489,0.230127,0.556297,0.249542,0.735501,0.268043,0.27525,0.357056,0.891205,0.641685,0.8089,0.46244,0.574226,0.535315,0.416775,0.267222,0.68775,0.131687,0.256501,0.0950432,0.97291,0.995907,0.0148754,0.535771,0.018437,0.896038,0.924356,0.589815,0.794505,0.971926,0.765197,0.684994,0.202053,0.321494,0.934536,0.937553,0.589537,0.209786,0.294609,0.480743,0.851471,0.10351,0.943183,0.425697,0.638825,0.359957,0.692919,0.326575,0.491645,0.949419,0.421618,0.464554,0.945326,0.436494,0.000325315,0.963763,0.332531,0.924682,0.553578,0.127036,0.896607,0.318775,0.81203,0.0986602,0.640269,0.746566,0.0362136,0.229806,0.956352,0.330823,0.710549,0.807823,0.434333,0.653732,0.23352,0.0731581,0.0136889,0.926438,0.399733,0.505333,0.875857,0.821352,0.969888,0.821183,0.257845,0.970213,0.784946,0.590377,0.894895,0.338523,0.717413,0.791502,0.657298,0.529442,0.890162,0.297568,0.276008,0.926376,0.527374,0.23236,0.257199,0.237923,0.0401826,0.691532,0.891654,0.273702,0.76469,0.905343,0.20014,0.164423,0.410677,0.0759977,0.985774,0.380564,0.897181,0.24362,0.350777,0.682126,0.833996,0.245672,0.0206493,0.551409,0.037174,0.677947,0.0808512,0.927336,0.975515,0.356859,0.853712,0.502889,0.589219,0.110911,0.740812,0.629402,0.802442,0.632466,0.903104,0.567132,0.537809,0.103244,0.731555,0.948486,0.179242,0.717329,0.32905,0.0764225,0.960949,0.679828,0.758549,0.794945,0.9255,0.779198,0.346354,0.962674,0.457145,0.427206,0.89001,0.43266,0.784065,0.743722,0.935549,0.373284,0.854632,0.676361,0.0026856,0.657074,0.308827,0.905789,0.224206,0.846636,0.00903378,0.955761,0.795122,0.188276,0.67309,0.124173,0.264698,0.634039,0.804,0.0232469,0.428985,0.7295,0.802445,0.775339,0.692174,0.25959,0.202545,0.582184,0.692251,0.986609,0.325905,0.6278,0.359893,0.180537,0.304161,0.362579,0.837612,0.612988,0.268368,0.0618179,0.459624,0.277402,0.0175789,0.254747,0.465678,0.690669,0.378919,0.730376,0.324709,0.182919,0.753623,0.753693,0.912419,0.556068,0.529033,0.604593,0.815658,0.731577,0.186777,0.507909,0.718187,0.512682,0.135709,0.07808,0.693219,0.43987,0.440659,0.530831,0.0528586,0.709028,0.592649,0.512483,0.98643,0.610228,0.76723,0.452108,0.300897,0.146149,0.182484,0.625606,0.329069,0.936107,0.379299,0.241488,0.492175,0.908332,0.846081,0.307834,0.639909,0.0328581,0.815743,0.358095,0.54554,0.951452,0.436175,0.238759,0.391323,0.876834,0.76959,0.444181,0.585862,0.362239,0.956664,0.572292,0.972467,0.723894,0.0243994,0.273364,0.870043,0.206884,0.89897,0.199112,0.142991,0.278269,0.4406,0.635166,0.186601,0.286681,0.943,0.826509,0.31954,0.758743,0.184605,0.86508,0.710195,0.62078,0.103839,0.101518,0.497614,0.873429,0.545699,0.0834764,0.235669,0.502364,0.655768,0.208136,0.226258,0.680167,0.4815,0.0963013,0.887051,0.380469,0.295413,0.0300417,0.658738,0.736013,0.665208,0.845339,0.0226949,0.608208,0.671848,0.342234,0.366951,0.856453,0.207314,0.0771458,0.477233,0.311153,0.178664,0.974848,0.184583,0.724363,0.0583239,0.420251,0.226726,0.714092,0.628387,0.452984,0.394259,0.109886,0.549285,0.28131,0.490356,0.844699,0.311352,0.149094,0.580712,0.97656,0.994433,0.603407,0.584768,0.666282,0.945642,0.951718,0.522735,0.152956,0.0288642,0.999968,0.464109,0.207528,0.974815,0.648691,0.93189,0.0331391,0.0689426,0.158617,0.747231,0.697329,0.611601,0.14149,0.807216,0.160886,0.422801,0.297572,0.00558527,0.734153,0.446666,0.586298,0.710713,0.441099,0.189705,0.295481,0.107381,0.135347,0.247199,0.630116,0.288303,0.276063,0.630083,0.752412,0.483591,0.604898,0.401103,0.415482,0.638038,0.470046,0.574098,0.385269,0.167375,0.185699,0.526759,0.974591,0.346586,0.94956,0.272163,0.352171,0.683713,0.718829,0.938469,0.394426,0.159928,0.128174,0.689906,0.267309,0.26352,0.937105,0.897425,0.551823,0.213169,0.527508,0.304234,0.69676,0.132407,0.705337,0.112242,0.770444,0.175383,0.68634,0.155713,0.342758,0.872039,0.682472,0.317349,0.218625,0.632032,0.589512,0.570796,0.315744,0.30834,0.509265,0.71017,0.468269,0.637438,0.400076,0.735578,0.900959,0.337181,0.633003,0.452782,0.55035,0.160512,0.757016,0.24711,0.292918,0.462354,0.359351,0.0633625,0.637737,0.0456914,0.219075,0.980495,0.917731,0.901547,0.297844,0.136356,0.533579,0.887355,0.707152,0.849323,0.195696,0.216417,0.559493,0.663965,0.853855,0.959569,0.399543,0.754814,0.29675,0.0325463,0.207595,0.8471,0.193058,0.964611,0.09421,0.485976,0.426965,0.453561,0.549339,0.0647018,0.499253,0.768414,0.0451965,0.416984,0.669961,0.34304,0.55334,0.20354,0.230396,0.260491,0.0528627,0.426091,0.476908,0.612355,0.0900562,0.330763,0.571924,0.489599,0.085577,0.868674,0.522146,0.293172,0.715774,0.715203,0.257784,0.809985,0.20118,0.684749,0.263546,0.750518,0.749451,0.762799,0.518932,0.794647,0.179783,0.188893,0.137687,0.733122,0.392433,0.368083,0.993614,0.445296,0.794174,0.470522,0.0576513,0.884231,0.801285,0.629575,0.37383,0.886862,0.49825,0.895975,0.180034,0.214024,0.611179,0.437818,0.0240088,0.812358,0.122567,0.287555,0.562876,0.872018,0.0503537,0.0818085,0.666665,0.230136,0.270702,0.804352,0.963258,0.663135,0.172435,0.956872,0.108431,0.96661,0.427394,0.166082,0.85084,0.228679,0.795657,0.22467,0.115541,0.293907,0.120646,0.295575,0.507932,0.731824,0.733393,0.53194,0.544182,0.85596,0.819495,0.107059,0.727978,0.869849,0.188867,0.394643,0.0999851,0.459569,0.198995,0.0632434,0.122704,0.371431,0.0201154,0.231134,0.33804,0.447509,0.397216,0.188881,0.676188,0.192873,0.413551,0.791729,0.486781,0.534197,0.0873038,0.994712,0.266021,0.820697,0.526652,0.810203,0.676658,0.346148,0.917262,0.404636,0.215996,0.106129,0.799279,0.315981,0.565699,0.998274,0.379225,0.688402,0.369704,0.39934,0.919537,0.707745,0.846849,0.316753,0.896625,0.523037,0.509626,0.310176,0.314766,0.996407,0.844373,0.40207,0.991119,0.110394,0.222767,0.517771,0.920597,0.899425,0.863919,0.837859,0.30406,0.0799152,0.943989,0.103339,0.395897,0.509687,0.101613,0.775122,0.19809,0.471317,0.174462,0.117626,0.179062,0.0213112,0.434379,0.0756874,0.544348,0.944005,0.385864,0.859114,0.940412,0.230237,0.261184,0.931531,0.340631,0.483951,0.449302,0.261228,0.383376,0.313221,0.0990868,0.687436,0.393136,0.0430755,0.790775,0.789033,0.552763,0.892388,0.564154,0.750852,0.363705,0.738616,0.868478,0.542767,0.759927,0.302857,0.618454,0.304275,0.246863,0.00431817,0.16339,0.187275,0.234555,0.424574,0.118806,0.575185,0.908525,0.568107,0.836413,0.291901,0.881328,0.9355,0.979337,0.274464,0.978575,0.770112,0.0634968,0.531338,0.6625,0.627651,0.28219,0.0262046,0.366267,0.150669,0.568972,0.126194,0.453526,0.187426,0.430469,0.700389,0.191744,0.593859,0.887663,0.426299,0.0184332,0.0064689,0.00148456,0.926958,0.574576,0.837898,0.218859,0.455905,0.773397,0.198196,0.730369,0.751973,0.968308,0.793866,0.283311,0.630807,0.421516,0.565501,0.657012,0.787783,0.716169,0.225983,0.913977,0.169695,0.413409,0.344447,0.870084,0.605154,0.938306,0.757747,0.0314528,0.956739,0.764216,0.0329373,0.883697,0.338793,0.870835,0.102556,0.794697,0.644232,0.300752,0.525066,0.396205,0.26906,0.318932,0.679516,0.899867,0.740448,0.245017,0.556879,0.528231,0.961186,0.782862,0.442209,0.130881,0.196272,0.786655,0.000965364,0.801425,0.724961,0.758713,0.832878,0.6817,0.522929,0.865815,0.565397,0.861722,0.73665,0.667954,0.656419,0.380883,0.968706,0.181485,0.777088,0.237766,0.500417,0.456604,0.137633,0.240865,0.701621,0.694511,0.769096,0.662807,0.477373,0.211305,0.793688,0.673645,0.99796,0.794653,0.47507,0.722921,0.553366,0.307948,0.404621,0.0762952,0.173763,0.970018,0.938017,0.910414,0.637972,0.594436,0.291296,0.606678,0.775921,0.0683842,0.844443,0.276337,0.524988,0.982076,0.517202,0.226609,0.676587,0.286298,0.889415,0.15396,0.497603,0.683104,0.827605,0.495563,0.477757,0.302675,0.218484,0.0311233,0.610623,0.623105,0.107419,0.784386,0.593123,0.0454354,0.6948,0.231095,0.639871,0.986096,0.837772,0.415792,0.0544803,0.682215,0.692129,0.579468,0.664291,0.209331,0.806077,0.340878,0.49563,0.695493,0.494838,0.993233,0.378596,0.322443,0.488795,0.856353,0.625118,0.707279,0.887477,0.235741,0.330384,0.994895,0.0201267,0.923507,0.0403304,0.714926,0.154601,0.680202,0.701022,0.992373,0.0959933,0.755503,0.674589,0.788122,0.334971,0.33888,0.997454,0.141048,0.679758,0.493083,0.836541,0.174596,0.486316,0.215137,0.497039,0.975111,0.0714902,0.122157,0.682391,0.958967,0.357897,0.0127745,0.953862,0.378024,0.936281,0.994192,0.0929503,0.0908825,0.674394,0.793973,0.083256,0.770387,0.549475,0.757845,0.55851,0.884447,0.0967247,0.555963,0.0254948,0.776482,0.0490467,0.862036,0.951078,0.535363,0.0771724,0.448117,0.510474,0.148663,0.570273,0.192865,0.107629,0.928171,0.205639,0.0614911,0.306194,0.14192,0.0556834,0.399145,0.232803,0.730077,0.193117,0.316059,0.500464,0.742593,0.0739037,0.0589738,0.627039,0.170628,0.614937,0.652534,0.947111,0.663984,0.51457,0.898189,0.199346,0.591742,0.346306,0.70982,0.740405,0.916579,0.902685,0.848034,0.84475,0.108324,0.909525,0.150944,0.250244,0.965209,0.550089,0.483047,0.695286,0.743206,0.799106,0.19575,0.485799,0.87301,0.254724,0.112839,0.0436382,0.869661,0.765373,0.990749,0.533645,0.279943,0.888938,0.732991,0.871685,0.235244,0.442812,0.61209,0.151823,0.345497,0.460124,0.996573,0.453821,0.369649,0.147518,0.704065,0.334858,0.697607,0.187113,0.0301433,0.440813,0.986219,0.225893,0.926613,0.859229,0.480617,0.0394514,0.902867,0.350278,0.804824,0.893616,0.883922,0.0847672,0.782554,0.616914,0.956452,0.017798,0.0597253,0.568542,0.169621,0.405222,0.0286657,0.166195,0.859043,0.398315,0.313713,0.563108,0.733172,0.0113194,0.750221,0.763315,0.452133,0.73644,0.989209,0.378745,0.595668,0.469826,0.418197,0.498535,0.820103,0.223021,0.392151,0.704026,0.307788,0.174706,0.320939,0.264241,0.192504,0.380665,0.832782,0.362125,0.785887,0.861448,0.52832,0.644929,0.259763,0.842032,0.208037,0.992935,0.853352,0.958258,0.756251,0.305485,0.694697,0.745459,0.68423,0.290366,0.215285,0.102427,0.788901,0.0353881,0.325448,0.181052,0.739414,0.633236,0.355758,0.0603532,0.897477,0.548262,0.441018,0.730259,0.910387,0.226904,0.591707,0.438706,0.871834,0.85147,0.280739,0.0798707,0.844405,0.134091,0.0381285,0.600656,0.439575,0.732826,0.346115,0.123805,0.0231914,0.5614,0.226232,0.812092,0.596788,0.55168,0.993145,0.336202,0.184916,0.348903,0.396555,0.082393,0.897164,0.837573,0.812652,0.807551,0.0644773,0.40436,0.246258,0.936311,0.25583,0.526996,0.0161815,0.100235,0.661087,0.05431,0.700891,0.100663,0.787136,0.0470063,0.224468,0.810327,0.608406,0.4507,0.62242,0.205194,0.00238015,0.615564,0.541396,0.187296,0.964467,0.937952,0.269689,0.861632,0.775525,0.0823416,0.669183,0.840002,0.486701,0.91544,0.776313,0.742531,0.442437,0.792494,0.842767,0.103524,0.846804,0.543658,0.204187,0.63394,0.590664,0.428655,0.444267,0.19907,0.879355,0.0666867,0.404265,0.881736,0.682251,0.945661,0.0690319,0.646718,0.883613,0.338721,0.50835,0.659137,0.421063,0.177533,0.499139,0.907764,0.0929735,0.275452,0.650296,0.53541,0.0679462,0.493062,0.638935,0.91475,0.03672,0.843122,0.54869,0.627384,0.271777,0.992958,0.826454,0.151132,0.0596442,0.230719,0.0328678,0.741895,0.17638,0.1019,0.388614,0.0599929,0.440621,0.896964,0.71913,0.861684,0.0744969,0.218269,0.769448,0.16747,0.493721,0.419744,0.702881,0.561667,0.912806,0.341815,0.476418,0.949526,0.184937,0.0251081,0.57691,0.456714,0.0180657,0.403364,0.607846,0.07771,0.634083,0.640714,0.819605,0.810464,0.742613,0.208219,0.870457,0.183234,0.105183,0.589587,0.0449182,0.17968,0.807856,0.814366,0.34715,0.301577,0.23411,0.0500307,0.863244,0.146916,0.391846,0.339662,0.0964415,0.576783,0.36477,0.673351,0.0334964,0.382836,0.0767155,0.641342,0.460546,0.710799,0.282056,0.280151,0.521263,0.0246693,0.48837,0.391719,0.207904,0.593553,0.981306,0.252822,0.773232,0.789162,0.0671882,0.120382,0.0907385,0.301298,0.170413,0.953983,0.448214,0.562259,0.293645,0.544655,0.139042,0.658415,0.218007,0.172538,0.0412503,0.294722,0.813881,0.501796,0.00552102,0.0959364,0.781947,0.526784,0.120606,0.270317,0.918503,0.328509,0.86387,0.899808,0.581331,0.637102,0.68897,0.64852,0.757485,0.779708,0.949818,0.927898,0.733691,0.398031,0.490157,0.0273356,0.942687,0.629199,0.68575,0.160693,0.801737,0.727,0.455415,0.615618,0.228796,0.460936,0.711554,0.0107434,0.98772,0.83216,0.28106,0.906222,0.160669,0.14493,0.806031,0.742001,0.782032,0.495001,0.39052,0.539517,0.274709,0.340338,0.467415,0.00839998,0.738369,0.957572,0.0357355,0.681056,0.58677,0.721486,0.841749,0.388507,0.448486,0.297164,0.00412504,0.677282,0.758101,0.715679,0.688026,0.745821,0.547839,0.969086,0.652043,0.708508,0.114016,0.458074,0.450509,0.896048,0.953074,0.841029,0.435565,0.227783,0.181367,0.90298,0.236183,0.919736,0.860551,0.271919,0.600792,0.447321,0.993405,0.442541,0.835829,0.441891,0.739705,0.839954,0.119173,0.497806,0.555633,0.807199,0.243626,0.103472,0.776285,0.89567,0.81198,0.890301,0.353743,0.262489,0.786349,0.306818,0.103518,0.221914,0.534601,0.284885,0.124894,0.770784,0.204621,0.985445,0.0427033,0.805413,0.432766,0.0361079,0.247954,0.268595,0.477998,0.987659,0.108549,0.597171,0.485465,0.664182,0.40437,0.729092,0.767654,0.180655,0.624761,0.579634,0.0709557,0.978505,0.842123,0.857305,0.285322,0.945641,0.0792189,0.819923,0.230526,0.204113,0.590708,0.435148,0.189557,0.633411,0.240561,0.622324,0.669519,0.488515,0.890919,0.147517,0.476174,0.999467,0.744689,0.96164,0.663649,0.149059,0.690732,0.431303,0.329714,0.315493,0.010937,0.40067,0.293997,0.85306,0.257974,0.57932,0.798701,0.337193,0.399243,0.0292279,0.541306,0.989951,0.464376,0.730863,0.623362,0.704936,0.353187,0.292881,0.193451,0.244105,0.440399,0.669626,0.243573,0.185087,0.631266,0.907222,0.334146,0.321997,0.338525,0.66386,0.63749,0.349462,0.0645297,0.931487,0.202522,0.322504,0.510807,0.00122358,0.659697,0.91005,0.0304515,0.201003,0.900001,0.494827,0.931866,0.523364,0.199764,0.285053,0.816245,0.393215,0.529158,0.256643,0.0628408,0.772731,0.441731,0.694106,0.679953,0.775877,0.0161034,0.0184779,0.439737,0.653593,0.36794,0.504267,0.585081,0.570462,0.826771,0.0958881,0.571686,0.486468,0.00593853,0.602137,0.687471,0.90594,0.0969642,0.619337,0.429304,0.296728,0.90439,0.245548,0.689943,0.433548,0.502192,0.752784,0.206279,0.943923,0.44689,0.886232,0.7198,0.462993,0.90471,0.159537,0.116587,0.27265,0.663804,0.701668,0.843112,0.490575,0.797556,0.414798,0.977043,0.803494,0.0169347,0.664514,0.709434,0.113899,0.283851,0.138738,0.410627,0.188242,0.384286,0.100569,0.62179,0.886478,0.853353,0.828069,0.830401,0.300243,0.714302,0.5502,0.763236,0.619012,0.709737,0.879823,0.891662,0.373541,0.581491,0.734774,0.864115,0.379046,0.149571,0.841158,0.18254,0.166506,0.505672,0.891974,0.280405,0.789523,0.0307122,0.691031,0.977765,0.414998,0.791601,0.599555,0.301477,0.644954,0.427624,0.131877,0.945197,0.141926,0.682077,0.708433,0.760938,0.391814,0.588256,0.652599,0.765355,0.169747,0.387373,0.62947,0.548793,0.536944,0.470628,0.731333,0.70345,0.976301,0.623307,0.983854,0.765824,0.65402,0.674886,0.743589,0.0690181,0.466486,0.343144,0.370495,0.11144,0.770768,0.502372,0.056637,0.912694,0.184449,0.76507,0.673632,0.576263,0.353326,0.326231,0.341619,0.523073,0.713604,0.971089,0.0718654,0.250547,0.441717,0.803198,0.953997,0.418018,0.426506,0.937851,0.183842,0.0805255,0.612737,0.927431,0.149544,0.0792235,0.270575,0.520038,0.190664,0.0413431,0.0224099,0.247301,0.954037,0.206859,0.0123708,0.627669,0.783122,0.365697,0.9539,0.124741,0.88877,0.667504,0.09583,0.960635,0.918051,0.537547,0.763833,0.872048,0.955566,0.190339,0.8099,0.139407,0.270865,0.422637,0.0668383,0.420408,0.50186,0.337413,0.940446,0.692524,0.378756,0.962856,0.939825,0.332794,0.169715,0.952196,0.960463,0.952838,0.317892,0.914363,0.0775788,0.206662,0.581867,0.173409,0.167297,0.499918,0.710956,0.93113,0.371967,0.666522,0.121469,0.181866,0.805929,0.392334,0.604503,0.872767,0.812742,0.106363,0.210181,0.753188,0.798887,0.588937,0.716045,0.738712,0.92173,0.88576,0.690908,0.882193,0.838598,0.00880026,0.796557,0.916177,0.215462,0.378424,0.0895853,0.382759,0.878342,0.800542,0.313889,0.250309,0.467063,0.435358,0.432175,0.272992,0.827692,0.0366783,0.14576,0.640434,0.143042,0.35594,0.393622,0.941929,0.944877,0.109667,0.680641,0.866608,0.995427,0.371549,0.748801,0.834025,0.380349,0.545358,0.750201,0.595812,0.923781,0.839787,0.978571,0.802124,0.640328,0.29246,0.0524324,0.107392,0.727818,0.484608,0.380384,0.55551,0.521286,0.526144,0.195944,0.664327,0.882084,0.589566,0.606256,0.826962,0.699233,0.286898,0.693569,0.69466,0.658447,0.44237,0.528685,0.0387961,0.987728,0.278886,0.634608,0.911509,0.118673,0.613178,0.713633,0.759001,0.905638,0.766065,0.866393,0.633456,0.250673,0.246777,0.188965,0.771959,0.772921,0.384909,0.436286,0.655005,0.974475,0.0425426,0.481967,0.673709,0.32944,0.175536,0.368369,0.987887,0.617906,0.897054,0.0266832,0.605634,0.17594,0.661291,0.517143,0.294613,0.274469,0.230776,0.0536147,0.180107,0.996841,0.920008,0.813562,0.247514,0.166785,0.00252763,0.019473,0.939706,0.387437,0.455759,0.594711,0.361912,0.498302,0.0766776,0.0356208,0.827742,0.252213,0.40399,0.815629,0.87012,0.301043,0.842312,0.475753,0.476984,0.503603,0.992897,0.771597,0.778072,0.223673,0.825212,0.958179,0.220514,0.745219,0.771741,0.468029,0.912004,0.774269,0.487502,0.85171,0.161706,0.943261,0.446421,0.523618,0.441563,0.523098,0.559239,0.269305,0.775312,0.963228,0.0849337,0.645431,0.264272,0.927246,0.121185,0.741255,0.430849,0.114081,0.512853,0.208921,0.337754,0.338064,0.1671,0.558269,0.0832838,0.938841,0.0262972,0.995288,0.71311,0.513799,0.846998,0.874816,0.45706,0.293418,0.398433,0.898622,0.816516,0.957672,0.167927,0.591828,0.9209,0.25286,0.237259,0.185172,0.180106,0.358443,0.926428,0.610956,0.472525,0.43928,0.819877,0.810279,0.777344,0.986977,0.368547,0.860628,0.925819,0.394845,0.855916,0.638929,0.908644,0.702914,0.513744,0.365703,0.996332,0.912177,0.264325,0.812848,0.869849,0.432252,0.404676,0.79075,0.685112,0.641935,0.975922,0.865219,0.000378452,0.902349,0.476175,0.472903,0.341629,0.296052,0.283182,0.118974,0.283029,0.65173,0.979602,0.208847,0.0465742,0.835518,0.847776,0.955218,0.538432,0.36152,0.320921,0.534764,0.273697,0.585246,0.347612,0.143547,0.0174984,0.752288,0.934296,0.702611,0.394223,0.910218,0.56783,0.394602,0.812567,0.0440044,0.867505,0.154197,0.340056,0.150687,0.27317,0.623085,0.802417,0.252772,0.831932,0.848991,0.0882904,0.679708,0.804209,0.626722,0.0412282,0.12513,0.161486,0.314926,0.710376,0.509098,0.458472,0.727874,0.261387,0.392769,0.430485,0.65561,0.302987,0.998315,0.0502121,0.115555,0.0423194,0.917717,0.269751,0.382375,0.0684043,0.542922,0.00546036,0.870821,0.795694,0.837393,0.719812,0.883985,0.517101,0.52402,0.510707,0.558329,0.64915,0.672193,0.873255,0.359526,0.181292,0.331727,0.0874002,0.442678,0.724496,0.517885,0.0982887,0.0274832,0.5162,0.148501,0.143038,0.55852,0.066218,0.412789,0.940895,0.134622,0.955711,0.946356,0.00544315,0.751405,0.783748,0.725255,0.63539,0.300849,0.249275,0.146096,0.859178,0.898425,0.818289,0.732433,0.257951,0.999581,0.0641599,0.345351,0.442259,0.788656,0.863237,0.540548,0.816139,0.379437,0.689049,0.959177,0.937957,0.755267,0.371966,0.878852,0.889889,0.327677,0.825208,0.895332,0.0790819,0.608956,0.620587,0.714472,0.909806,0.869862,0.860568,0.768984,0.768288,0.678857,0.501417,0.0262391,0.678438,0.565576,0.371591,0.120698,0.354232,0.234827,0.661246,0.170371,0.614265,0.350295,0.129548,0.552222,0.105561,0.501514,0.431074,0.99545,0.829191,0.256282,0.890783,0.908273,0.865239,0.51137,0.622745,0.775044,0.381232,0.483312,0.544028,0.14952,0.16217,0.0454447,0.175759,0.840608,0.611021,0.54735,0.961306,0.965254,0.782177,0.622552,0.135625,0.396442,0.972846,0.265173,0.948664,0.0784079,0.766688,0.379738,0.0738584,0.595879,0.636021,0.964641,0.504152,0.501259,0.476011,0.126896,0.276304,0.857243,0.610209,0.820332,0.00676337,0.772378,0.865777,0.182523,0.612987,0.476798,0.729872,0.574293,0.442052,0.512049,0.196844,0.577677,0.908491,0.169691,0.84285,0.857155,0.248099,0.609537,0.236893,0.321957,0.205416,0.872914,0.286598,0.709568,0.374173,0.762609,0.836464,0.650477,0.619852,0.446672,0.470809,0.626616,0.219051,0.336586,0.809138,0.832038,0.813384,0.53901,0.40633,0.255435,0.0510599,0.603174,0.833112,0.959551,0.772865,0.675962,0.816706,0.0209637,0.2855,0.0535997,0.342921,0.490916,0.926514,0.629519,0.200483,0.300687,0.392128,0.0369474,0.951164,0.0119801,0.48362,0.421973,0.638596,0.702671,0.758559,0.447734,0.534708,0.571943,0.986744,0.941038,0.827379,0.0378042,0.544213,0.660491,0.997355,0.317078,0.336453,0.814062,0.338041,0.621953,0.867661,0.680962,0.112868,0.794175,0.310481,0.313352,0.094862,0.702609,0.350299,0.0460262,0.714589,0.833919,0.468,0.353184,0.53659,0.226559,0.800918,0.0712982,0.798502,0.787663,0.0123365,0.625881,0.825467,0.556549,0.286372,0.822822,0.873627,0.622825,0.636884,0.211668,0.244777,0.504545,0.89263,0.357646,0.29872,0.203111,0.670997,0.393582,0.90572,0.0212964,0.439608,0.620308,0.855215,0.907608,0.973492,0.391805,0.134167,0.774411,0.463104,0.932669,0.562073,0.47544,0.55855,0.38754,0.0319893,0.844922,0.210362,0.905616,0.467747,0.847246,0.117284,0.712525,0.351791,0.0099147,0.0701704,0.650511,0.213026,0.741168,0.0440935,0.118745,0.762464,0.483702,0.739053,0.61768,0.39131,0.712546,0.009485,0.525477,0.486956,0.472589,0.458146,0.0490294,0.948029,0.0166968,0.436569,0.980018,0.861619,0.646931,0.885634,0.329367,0.494177,0.0029185,0.0418913,0.845968,0.0128332,0.112062,0.496479,0.225859,0.853229,0.540573,0.344604,0.615694,0.0242747,0.0836575,0.233373,0.415585,0.796203,0.242858,0.941062,0.28316,0.715447,0.399208,0.332189,0.663475,0.415905,0.768758,0.643493,0.277524,0.415689,0.529127,0.606891,0.909866,0.532046,0.648782,0.755835,0.544879,0.760844,0.252314,0.770738,0.614073,0.792887,0.115342,0.229767,0.817161,0.199,0.46314,0.232746,0.995203,0.705998,0.173808,0.278363,0.421445,0.573015,0.610552,0.0849201,0.98892,0.37931,0.728413,0.266444,0.795,0.257541,0.873335,0.704866,0.789587,0.522117,0.460701,0.334466,0.28296,0.713014,0.105204,0.897033,0.505901,0.220546,0.1268,0.323062,0.419546,0.58994,0.555808,0.414749,0.295938,0.729616,0.693111,0.717382,0.302631,0.303663,0.802302,0.291551,0.682973,0.530716,0.557996,0.477973,0.788257,0.431331,0.182839,0.577843,0.953447,0.643539,0.912309,0.236408,0.356554,0.0175128,0.133441,0.862454,0.238059,0.260241,0.185517,0.657604,0.850181,0.741325,0.0723532,0.146119,0.470941,0.765465,0.863501,0.773572,0.0691278,0.665803,0.0651233,0.752101,0.196519,0.623119,0.230074,0.984776,0.0544496,0.412912,0.562619,0.00789694,0.0564517,0.474928,0.244305,0.413005,0.492441,0.377746,0.27546,0.7305,0.637987,0.460976,0.388104,0.488168,0.202301,0.460458,0.634286,0.673242,0.225922,0.497787,0.446814,0.29505,0.163591,0.511937,0.047151,0.36011,0.135056,0.277225,0.344886,0.189506,0.690137,0.907505,0.197403,0.746589,0.382434,0.441707,0.159594,0.874875,0.819453,0.435054,0.605375,0.45744,0.89603,0.993479,0.945608,0.0983314,0.453937,0.579894,0.771573,0.679859,0.0776813,0.218387,0.974909,0.241272,0.730324,0.02206,0.601382,0.86538,0.299285,0.946268,0.054886,0.989422,0.853773,0.252289,0.736011,0.236206,0.693996,0.895604,0.111081,0.513449,0.330658,0.716456,0.970889,0.226688,0.709935,0.916497,0.32502,0.163872,0.496391,0.0965929,0.843731,0.574072,0.31498,0.81864,0.815344,0.0453043,0.8407,0.416726,0.910685,0.139985,0.362994,0.965571,0.129406,0.216767,0.217859,0.865417,0.452973,0.911855,0.761021,0.564054,0.425304,0.0916796,0.28051,0.396193,0.318368,0.990445,0.31269,0.643387,0.154317,0.809081,0.73998,0.998048,0.383153,0.0549603,0.816688,0.198497,0.100265,0.657387,0.615224,0.0109493,0.797372,0.978217,0.97652,0.926778,0.194984,0.194379,0.792195,0.647957,0.106234,0.553217,0.212011,0.531538,0.644896,0.492521,0.927731,0.963264,0.482965,0.240421,0.606652,0.637282,0.0495018,0.346632,0.63533,0.432655,0.401592,0.452017,0.631153,0.501857,0.109405,0.246376,0.512806,0.906777,0.224593,0.489326,0.833555,0.419577,0.683705,0.625751,0.0675339,0.789939,0.178967,0.279545,0.321477,0.823864,0.772065,0.249208,0.787128,0.25503,0.489629,0.39378,0.892313,0.539131,0.740411,0.527642,0.971786,0.142004,0.979659,0.602938,0.64386,0.0890642,0.849315,0.156667,0.995841,0.0739079,0.645992,0.829396,0.493485,0.329697,0.455147,0.561019,0.119636,0.634114,0.840563,0.441113,0.457978,0.612629,0.690321,0.245106,0.867659,0.17995,0.638885,0.759971,0.719081,0.379297,0.287614,0.690867,0.5213,0.267273,0.293805,0.165161,0.356337,0.14312,0.321827,0.352178,0.217028,0.96782,0.181574,0.710513,0.297517,0.636721,0.271532,0.417154,0.270835,0.112095,0.858267,0.728812,0.724724,0.548588,0.973918,0.592383,0.728539,0.612803,0.352354,0.44762,0.9921,0.639968,0.138487,0.513401,0.907241,0.432292,0.678562,0.263579,0.575412,0.000388877,0.615757,0.79244,0.968209,0.797331,0.502953,0.265726,0.434052,0.774485,0.68288,0.704887,0.88658,0.541146,0.433699,0.611304,0.0897346,0.407617,0.203687,0.818273,0.0204209,0.556041,0.265893,0.0125211,0.19601,0.40438,0.525922,0.103251,0.836672,0.204483,0.36683,0.412084,0.204872,0.982587,0.204524,0.173081,0.779918,0.707477,0.438807,0.21397,0.481962,0.121686,0.918857,0.368542,0.662833,0.352557,0.979846,0.752567,0.760174,0.183533,0.57084,0.780595,0.739575,0.836733,0.793116,0.935584,0.241113,0.319038,0.0388351,0.0777846,0.523521,0.405665,0.489868,0.728393,0.388252,0.694392,0.901474,0.16817,0.401869,0.340281,0.38214,0.883831,0.461967,0.300998,0.252373,0.1248,0.653554,0.232219,0.877367,0.413728,0.415752,0.448207,0.194323,0.155326,0.284941,0.987439,0.0909105,0.526053,0.306477,0.129746,0.603838,0.829998,0.53541,0.0937064,0.558391,0.923662,0.788099,0.459865,0.091832,0.189968,0.800146,0.473972,0.0737984,0.262113,0.77497,0.326171,0.386912,0.428524,0.55839,0.264279,0.842252,0.974142,0.712486,0.0365755,0.129468,0.997427,0.0240149,0.220379,0.52348,0.330492,0.350124,0.127318,0.16049,0.885535,0.221025,0.718881,0.809197,0.00912336,0.178747,0.901029,0.199091,0.978892,0.375001,0.272889,0.241005,0.149971,0.599061,0.627917,0.578495,0.15745,0.892197,0.420747,0.131592,0.604683,0.457322,0.26106,0.60211,0.481337,0.481439,0.125591,0.811829,0.831563,0.252909,0.972319,0.717097,0.473934,0.691201,0.526294,0.483057,0.869948,0.427323,0.682148,0.84884,0.802323,0.955038,0.0898452,0.952294,0.554098,0.717763,0.530789,0.711548,0.609959,0.951536,0.84314,0.214642,0.408858,0.104201,0.816752,0.890195,0.585639,0.942343,0.702024,0.417202,0.195252,0.674344,0.134299,0.669186,0.365545,0.660593,0.152243,0.235492,0.0879155,0.834391,0.0843325,0.890239,0.789428,0.174178,0.842533,0.343526,0.89194,0.373322,0.0550748,0.5019,0.324857,0.898215,0.716542,0.733715,0.00241582,0.533294,0.62391,0.588055,0.475637,0.325935,0.00525672,0.670889,0.000278592,0.139556,0.340075,0.365823,0.800149,0.492317,0.601316,0.888064,0.326708,0.685648,0.778303,0.116136,0.859826,0.620836,0.459663,0.751766,0.994158,0.514737,0.253666,0.319015,0.412953,0.970208,0.0527301,0.415368,0.503502,0.67664,0.00342337,0.97914,0.00257507,0.00868009,0.650029,0.00285366,0.148236,0.990104,0.368677,0.948385,0.482421,0.969993,0.836449,0.809129,0.655641,0.614752,0.925266,0.515467,0.235588,0.384928,0.267234,0.229746,0.899665,0.5209,0.548761,0.312618,0.491108,0.601491,0.727987,0.99461,0.278131,0.73141,0.97375,0.280706,0.74009,0.623779,0.28356,0.888326,0.613883,0.652237,0.83671,0.0963045,0.62223,0.673159,0.905434,0.277871,0.287911,0.830699,0.793339,0.523499,0.215628,0.0605727,0.753245,0.115293,0.581473,0.302005,0.427911,0.0725806,0.903496,0.155898,0.067191,0.181627,0.887308,0.0409412,0.462334,0.627398,0.664721,0.745894,0.515724,0.278604,0.398131,0.352434,0.374908,0.0203606,0.0255933,0.280342,0.298232,0.313504,0.111042,0.0915708,0.837003,0.326669,0.152144,0.590248,0.441962,0.733616,0.892253,0.869874,0.806197,0.79575,0.0257713,0.873388,0.977377,0.913079,0.914329,0.439711,0.540477,0.579049,0.185604,0.0562006,0.857653,0.583735,0.408635,0.232561,0.604095,0.434228,0.512904,0.902327,0.747732,0.623945,0.993898,0.584736,0.950614,0.146042,0.174984,0.392577,0.879658,0.0672373,0.26245,0.685854,0.862987,0.288222,0.559242,0.840364,0.201301,0.473571,0.280075,0.741778,0.0526202,0.465679,0.797978,0.910273,0.0494136,0.206613,0.142835,0.653509,0.640841,0.655738,0.555836,0.388573,0.279684,0.549734,0.973309,0.230298,0.695776,0.148293,0.622875,0.575433,0.21553,0.885325,0.261288,0.078517,0.173547,0.82053,0.918881,0.374847,0.294101,0.198956,0.116625,0.346721,0.664634,0.914603,0.256994,0.714048,0.121216,0.399829,0.367557,0.762057,0.0555671,0.923393,0.15063,0.335251,0.473127,0.123939,0.565549,0.168903,0.272232,0.188423,0.744336,0.487762,0.0737485,0.00562384,0.566279,0.247295,0.826154,0.48516,0.622143,0.120254,0.684116,0.738768,0.466975,0.34875,0.653371,0.723969,0.0627981,0.774587,0.123798,0.430355,0.536644,0.179365,0.353748,0.687274,0.514615,0.826875,0.811213,0.0801639,0.995778,0.0834451,0.268587,0.740114,0.571207,0.342336,0.745738,0.137486,0.589631,0.571891,0.622646,0.211774,0.692145,0.306762,0.950541,0.15912,0.655512,0.603912,0.883089,0.71831,0.378498,0.00688661,0.148665,0.915142,0.186251,0.502413,0.602416,0.700867,0.329288,0.413629,0.78103,0.325066,0.497074,0.0496178,0.0651794,0.0682813,0.391954,0.810917,0.205768,0.981585,0.382808,0.828414,0.193358,0.0749535,0.135176,0.143899,0.234074,0.790688,0.747811,0.117163,0.508999,0.126309,0.124049,0.657664,0.0414511,0.310301,0.160077,0.643867,0.0111671,0.489365,0.0574959,0.792198,0.814431,0.55457,0.841815,0.87961,0.622851,0.233769,0.690527,0.828619,0.215353,0.0733352,0.657033,0.408711,0.148289,0.792209,0.552611,0.382362,0.582898,0.300421,0.499525,0.0918963,0.42673,0.623574,0.74956,0.468182,0.933875,0.909637,0.112048,0.945042,0.399002,0.169544,0.73724,0.213433,0.724114,0.579055,0.0930427,0.346966,0.812824,0.78357,0.175585,0.0281771,0.856905,0.832618,0.436888,0.00519366,0.624827,0.989499,0.387556,0.207725,0.28992,0.887081,0.299621,0.716651,0.510656,0.0491811,0.184832,0.44453,0.958818,0.296881,0.389573,0.35782,0.466425,0.126812,0.571253,0.190539,0.705867,0.664296,0.537505,0.518691,0.447865,0.71309,0.546868,0.30477,0.545707,0.983756,0.309964,0.170534,0.973255,0.69752,0.378259,0.263175,0.584601,0.67788,0.979826,0.0952568,0.727061,0.164658,0.539787,0.685879,0.461539,0.92936,0.0436998,0.927964,0.0561719,0.614953,0.118503,0.762039,0.279249,0.656008,0.28073,0.727114,0.369098,0.827597,0.0318845,0.914805,0.811354,0.341849,0.0853396,0.784609,0.0393686,0.463599,0.0477842,0.62397,0.141479,0.0276103,0.719227,0.86854,0.192269,0.259014,0.554419,0.653808,0.188374,0.598119,0.581772,0.244546,0.213072,0.700275,0.00658449,0.492321,0.356283,0.287314,0.219435,0.725381,0.114911,0.251319,0.640186,0.926265,0.593168,0.725526,0.710873,0.632536,0.189125,0.758658,0.256506,0.330603,0.786268,0.975733,0.199143,0.978537,0.234747,0.753563,0.632344,0.42312,0.351682,0.214116,0.667666,0.564754,0.914391,0.674251,0.0570743,0.270674,0.961565,0.276509,0.996055,0.0764758,0.527828,0.636241,0.0027406,0.120996,0.361767,0.713614,0.753532,0.550891,0.472272,0.010038,0.881495,0.25854,0.985771,0.080638,0.237077,0.220518,0.834201,0.869421,0.643638,0.185882,0.0835366,0.311304,0.750636,0.997927,0.985555,0.807711,0.268601,0.947119,0.0842195,0.264656,0.023595,0.612048,0.900897,0.0263356,0.733043,0.262663,0.73995,0.486575,0.813555,0.212222,0.496613,0.69505,0.470761,0.482384,0.775688,0.707838,0.702902,0.609888,0.577259,0.34654,0.795771,0.660795,0.657844,0.546407,0.658722,0.643398,0.354117,0.927323,0.590517,0.438337,0.191979,0.614112,0.0503844,0.0928755,0.640448,0.783428,0.355539,0.380398,0.270003,0.169094,0.592619,0.766616,0.864143,0.0633806,0.249,0.639831,0.771218,0.951901,0.249719,0.348477,0.298441,0.0454893,0.0092723,0.956285,0.591896,0.667995,0.599683,0.946013,0.595318,0.1902,0.38435,0.787297,0.804313,0.434734,0.880173,0.444761,0.218162,0.235712,0.825158,0.488165,0.404805,0.417778,0.254781,0.268948,0.481158,0.50378,0.908779,0.252377,0.455682,0.158498,0.600854,0.754123,0.203987,0.610126,0.710407,0.795883,0.278121,0.31009,0.741896,0.873439,0.50029,0.126246,0.660736,0.304603,0.560981,0.540909,0.749364,0.779143,0.77662,0.574522,0.267308,0.181425,0.9923,0.522088,0.450373,0.473458,0.0258688,0.359152,0.725835,0.481551,0.51765,0.326688,0.235673,0.721637,0.936814,0.946081,0.51752,0.214935,0.256171,0.259416,0.0883738,0.756461,0.385663,0.74911,0.0610645,0.946644,0.290018,0.810428,0.725787,0.0666385,0.38495,0.993094,0.248064,0.37725,0.515183,0.698437,0.850708,0.541051,0.0575896,0.576542,0.022602,0.575239,0.903231,0.258275,0.296876,0.840045,0.204356,0.814396,0.0549799,0.460527,0.0738122,0.143354,0.216989,0.459475,0.892463,0.278053,0.406118,0.182482,0.0884812,0.131905,0.24912,0.473431,0.124999,0.497184,0.850681,0.640182,0.195621,0.701389,0.181233,0.253211,0.277932,0.203835,0.82845,0.181163,0.46211,0.125327,0.0212076,0.666467,0.939723,0.0761875,0.126994,0.013535,0.219541,0.343982,0.47301,0.112005,0.622035,0.879128,0.294486,0.710516,0.0110328,0.543607,0.183948,0.136032,0.040791,0.0346293,0.776214,0.236412,0.736019,0.957447,0.489623,0.0139506,0.161282,0.318074,0.195113,0.623392,0.443401,0.216321,0.289859,0.383123,0.292508,0.416852,0.396658,0.51205,0.760835,0.869668,0.624054,0.38287,0.748796,0.918541,0.0933863,0.759829,0.462148,0.277334,0.895861,0.502939,0.311963,0.672074,0.739351,0.0479821,0.629521,0.228974,0.0619327,0.790803,0.547048,0.257046,0.414195,0.990449,0.473367,0.704053,0.373572,0.765875,0.120906,0.770231,0.277925,0.88174,0.639899,0.901979,0.26461,0.388695,0.82052,0.357997,0.148524,0.282668,0.635331,0.0443846,0.785606,0.947294,0.716459,0.524957,0.995276,0.34598,0.753932,0.0572089,0.136783,0.30098,0.314255,0.550977,0.291429,0.787622,0.255031,0.665001,0.553497,0.375937,0.435232,0.831422,0.257677,0.0751303,0.733402,0.522287,0.463825,0.553922,0.880284,0.612349,0.836589,0.515615,0.656733,0.622196,0.462909,0.373192,0.147153,0.458185,0.719172,0.901085,0.515394,0.855955,0.202065,0.829649,0.406932,0.493494,0.61727,0.661963,0.158495,0.170768,0.0378999,0.593726,0.00219013,0.295577,0.668857,0.735592,0.817864,0.132682,0.289513,0.698148,0.745031,0.126103,0.213763,0.401764,0.748299,0.676671,0.774956,0.895452,0.134856,0.494129,0.796536,0.65025,0.350084,0.998601,0.479899,0.757016,0.492095,0.097169,0.418979,0.65059,0.267937,0.456879,0.244316,0.270127,0.752456,0.913173,0.00571871,0.57032,0.0458546,0.295232,0.268468,0.790885,0.421335,0.482231,0.192649,0.169634,0.158903,0.967606,0.0650852,0.293759,0.461735,0.861622,0.944009,0.811818,0.860223,0.423908,0.568834,0.352318,0.521077,0.987813,0.00290771,0.789013,0.444692,0.247224,0.0591403,0.197148,0.160397,0.064859,0.767468,0.206251,0.360091,0.035937,0.997137,0.781426,0.518168,0.189786,0.95106,0.677071,0.157392,0.0161452,0.97083,0.619126,0.877767,0.914839,0.430945,0.73799,0.338747,0.999779,0.0903077,0.859823,0.987592,0.0932154,0.648836,0.432284,0.340439,0.707977,0.629433,0.500836,0.772836,0.396901,0.707087,0.132927,0.432838,0.704224,0.914353,0.951006,0.89401,0.865413,0.628078,0.0514019,0.881558,0.598908,0.670528,0.759325,0.513747,0.101473,0.497315,0.852493,0.101252,0.587623,0.712316,0.0888443,0.680838,0.361153,0.521129,0.0212773,0.0691297,0.150561,0.522113,0.841965,0.547463,0.229201,0.974892,0.980301,0.933425,0.889246,0.931307,0.827435,0.754659,0.559385,0.878836,0.636217,0.158292,0.549365,0.395542,0.672039,0.650838,0.892857,0.524532,0.75209,0.48048,0.236849,0.840934,0.161318,0.598002,0.362063,0.182595,0.667131,0.512624,0.704708,0.509097,0.060087,0.933909,0.483989,0.0403877,0.867333,0.373235,0.971695,0.694768,0.127894,0.531079,0.573604,0.764111,0.689372,0.122969,0.159653,0.36141,0.773807,0.0525101,0.885943,0.525897,0.53299,0.122791,0.366832,0.694308,0.720793,0.728895,0.876903,0.387924,0.241519,0.581611,0.897021,0.301606,0.51552,0.381011,0.341994,0.382853,0.754245,0.313688,0.0776212,0.882139,0.844768,0.651226,0.64625,0.534139,0.774195,0.805903,0.89555,0.548002,0.858413,0.781493,0.0738993,0.391403,0.904284,0.440731,0.0857105,0.625077,0.169625,0.962613,0.0130013,0.411144,0.544224,0.910022,0.712751,0.0597439,0.291033,0.0547442,0.442597,0.0452784,0.368433,0.520218,0.927418,0.213201,0.171444,0.573667,0.74734,0.945639,0.37957,0.64289,0.493641,0.237983,0.424382,0.56754,0.629386,0.328666,0.00827113,0.715097,0.953743,0.177897,0.67771,0.966744,0.589041,0.221934,0.876767,0.301792,0.281678,0.1678,0.356536,0.724275,0.213078,0.724969,0.244493,0.140496,0.938169,0.415938,0.714163,0.685509,0.361576,0.0937334,0.328399,0.855217,0.331717,0.752781,0.422758,0.961103,0.0814473,0.431029,0.6762,0.0351903,0.608925,0.353909,0.00193459,0.197966,0.575843,0.878701,0.499758,0.857521,0.0465011,0.856294,0.581796,0.259579,0.581263,0.82629,0.400075,0.519432,0.242227,0.114238,0.204941,0.603804,0.207971,0.53334,0.459021,0.539688,0.286121,0.881779,0.500791,0.367568,0.312807,0.176991,0.402759,0.921733,0.5309,0.404693,0.119699,0.106744,0.283394,0.619457,0.964265,0.329896,0.475751,0.546061,0.589475,0.0570138,0.372351,0.98955,0.576446,0.614578,0.103788,0.781386,0.218382,0.311759,0.314726,0.677403,0.851447,0.600847,0.559181,0.352238,0.968415,0.871989,0.529229,0.371174,0.793721,0.0601289,0.775867,0.913421,0.166872,0.0592614,0.532878,0.131137,0.389157,0.00862907,0.677198,0.978632,0.0656428,0.0495488,0.968181,0.642088,0.664127,0.0719692,0.423475,0.882508,0.383728,0.738201,0.559911,0.235175,0.339048,0.119093,0.587413,0.307463,0.991081,0.116642,0.678637,0.784803,0.176771,0.454504,0.698223,0.343643,0.513765,0.231101,0.47478,0.902922,0.23973,0.151979,0.881554,0.305373,0.201527,0.849735,0.947461,0.865654,0.921704,0.370936,0.748163,0.305433,0.109137,0.308074,0.540608,0.448185,0.427166,0.128021,0.755648,0.418247,0.244664,0.434285,0.20305,0.421435,0.888788,0.901273,0.765078,0.402553,0.132375,0.239858,0.305475,0.372105,0.391837,0.187029,0.677478,0.593364,0.0367644,0.62494,0.459018,0.958469,0.995876,0.207181,0.263902,0.105013,0.515254,0.80451,0.553198,0.94242,0.932531,0.308845,0.360668,0.177195,0.74313,0.563718,0.598629,0.631918,0.464992,0.363707,0.0344716,0.597366,0.603565,0.339947,0.969471,0.995402,0.526976,0.646949,0.588766,0.56374,0.271889,0.0477844,0.522209,0.267764,0.254965,0.786111,0.372777,0.770219,0.59062,0.925974,0.71264,0.523151,0.23482,0.0733078,0.700346,0.97795,0.637026,0.298975,0.609868,0.102017,0.662682,0.64434,0.699383,0.266247,0.984287,0.668855,0.261649,0.511263,0.315804,0.850415,0.0750032,0.587692,0.8982,0.597212,0.855456,0.153165,0.383323,0.228233,0.923384,0.973944,0.154208,0.636024,0.497095,0.389028,0.709332,0.197441,0.366977,0.346358,0.496415,0.976846,0.448375,0.159097,0.621185,0.147759,0.425344,0.605472,0.816613,0.686993,0.116735,0.132417,0.537409,0.191738,0.720109,0.435609,0.78895,0.575565,0.588774,0.172273,0.803799,0.512158,0.146217,0.958007,0.148182,0.643312,0.347034,0.857515,0.840752,0.714011,0.203872,0.337168,0.690857,0.652248,0.496265,0.312042,0.800006,0.921609,0.917514,0.616619,0.608603,0.0342488,0.749036,0.146012,0.225987,0.469145,0.58162,0.0149368,0.0447107,0.170394,0.18721,0.848509,0.682552,0.333427,0.806516,0.830735,0.976739,0.15355,0.688249,0.817492,0.867561,0.892121,0.154659,0.558418,0.544369,0.650924,0.870461,0.344375,0.572534,0.787975,0.960995,0.181136,0.822224,0.710031,0.327148,0.0482101,0.179176,0.908768,0.0631469,0.223887,0.0791621,0.250357,0.0723965,0.761714,0.583785,0.878912,0.592449,0.560524,0.0324623,0.280698,0.378015,0.900024,0.172819,0.532675,0.458442,0.717189,0.183599,0.328903,0.0615638,0.756133,0.116878,0.0225586,0.937269,0.939101,0.73259,0.264417,0.987311,0.911766,0.173185,0.0504583,0.135653,0.252347,0.300816,0.20805,0.0140616,0.8846,0.0869621,0.60651,0.445124,0.119424,0.887209,0.823139,0.0194481,0.060028,0.355814,0.47789,0.777216,0.539413,0.806793,0.83878,0.295546,0.923671,0.861339,0.232815,0.862772,0.593929,0.497232,0.850084,0.505695,0.670417,0.900542,0.641348,0.922765,0.201357,0.849398,0.936826,0.0859575,0.93636,0.543337,0.531081,0.0557841,0.430545,0.35422,0.0752322,0.490573,0.710034,0.553123,0.26779,0.249448,0.359916,0.10657,0.544994,0.283587,0.967909,0.777809,0.146359,0.561838,0.275041,0.996442,0.0675323,0.945459,0.896984,0.70888,0.868224,0.0983417,0.558278,0.80505,0.184299,0.494638,0.348387,0.715381,0.550422,0.778932,0.069601,0.625654,0.269505,0.779635,0.178776,0.537295,0.029083,0.538692,0.643865,0.574077,0.822279,0.611774,0.351886,0.968638,0.173612,0.626927,0.96508,0.241144,0.572386,0.862065,0.950024,0.440609,0.960406,0.508302,0.245659,0.144706,0.00293969,0.594046,0.860086,0.553361,0.372978,0.929687,0.179015,0.642483,0.709323,0.357792,0.179779,0.738406,0.896484,0.823644,0.312482,0.718763,0.435418,0.664368,0.687401,0.60903,0.291295,0.652481,0.850174,0.863681,0.514546,0.800198,0.30429,0.474952,0.3085,0.549949,0.619658,0.31144,0.143996,0.479744,0.864801,0.516974,0.409431,0.0438163,0.159457,0.118753,0.401608,0.339236,0.857159,0.298092,0.162879,0.169641,0.0168544,0.598297,0.834009,0.704255,0.207327,0.125304,0.356736,0.0575006,0.988984,0.871282,0.857699,0.293274,0.346234,0.166199,0.843224,0.965891,0.477638,0.987219,0.445635,0.34244,0.504193,0.855065,0.386256,0.66365,0.973819,0.787864,0.00288585,0.830977,0.0859553,0.165765,0.000618079,0.10281,0.764063,0.834627,0.807065,0.971389,0.95993,0.163801,0.02889,0.948915,0.0350825,0.886589,0.242189,0.381316,0.0527872,0.0854129,0.347207,0.530426,0.0726323,0.792842,0.872865,0.576825,0.647908,0.259121,0.240476,0.621726,0.0469849,0.243361,0.452703,0.13294,0.409127,0.453322,0.23575,0.173189,0.287948,0.0428146,0.144579,0.247879,0.206615,0.173469,0.196793,0.241698,0.0600571,0.438982,0.623014,0.112844,0.524395,0.970222,0.64327,0.597028,0.763064,0.516135,0.173853,0.410971,0.775257,0.414329,0.0326977,0.822241,0.65769,0.485401,0.955182,0.0668165,0.938723,0.190931,0.240006,0.226671,0.233746,0.384584,0.47455,0.440362,0.558053,0.671343,0.682059,0.61811,0.110325,0.305074,0.730954,0.634721,0.275295,0.374224,0.231749,0.0383591,0.89036,0.405602,0.449331,0.665616,0.81993,0.482028,0.487858,0.47762,0.967429,0.443039,0.544437,0.906152,0.633971,0.784442,0.132823,0.867717,0.169026,0.607372,0.308079,0.727079,0.278715,0.990138,0.345189,0.389041,0.295212,0.0761431,0.0237619,0.570507,0.450367,0.25551,0.608866,0.340727,0.661112,0.0581967,0.00634357,0.481042,0.540225,0.494201,0.958662,0.507654,0.937241,0.503099,0.413806,0.571212,0.287541,0.546629,0.438929,0.456567,0.154002,0.747007,0.183646,0.432717,0.737146,0.528835,0.821758,0.0323573,0.604978,0.84552,0.602864,0.0553455,0.10103,0.211731,0.396073,0.762142,0.269927,0.402416,0.243185,0.810152,0.896618,0.201847,0.317807,0.833859,0.704945,0.731613,0.40507,0.992486,0.278242,0.843999,0.449053,0.432243,0.591007,0.632699,0.864961,0.328152,0.161534,0.686719,0.36051,0.766512,0.532239,0.963374,0.821858,0.633269,0.175104,0.21793,0.395411,0.445032,0.620347,0.638596,0.255184,0.516964,0.840443,0.572991,0.350823,0.545388,0.304603,0.755893,0.537874,0.582845,0.599893,0.986927,0.0150888,0.190899,0.619627,0.880049,0.519052,0.781161,0.566768,0.879561,0.547673,0.0990066,0.842935,0.369531,0.732276,0.0180398,0.587461,0.127687,0.463072,0.207808,0.766283,0.718256,0.724773,0.606726,0.291246,0.0755955,0.152114,0.595849,0.831489,0.689988,0.178695,0.431382,0.676915,0.193784,0.622281,0.296542,0.073833,0.141333,0.0777023,0.640601,0.0208941,0.625375,0.739608,0.863829,0.994906,0.471883,0.881869,0.582367,0.59957,0.344941,0.790175,0.365853,0.0631961,0.514948,0.972579,0.354442,0.590543,0.124692,0.950292,0.422032,0.81468,0.128986,0.853414,0.491595,0.32277,0.475695,0.788137,0.396603,0.617028,0.865839,0.037204,0.637922,0.491214,0.776812,0.501751,0.48612,0.248695,0.38362,0.0684875,0.848265,0.728561,0.858663,0.214118,0.791757,0.373611,0.186697,0.146199,0.964154,0.31139,0.0964908,0.386186,0.12607,0.225477,0.2396,0.617665,0.548247,0.715295,0.405801,0.94485,0.332323,0.27164,0.982054,0.970245,0.762854,0.758866,0.471996,0.248975,0.00756088,0.855616,0.317462,0.855826,0.584177,0.176125,0.0699445,0.375934,0.549736,0.256642,0.522133,0.513889,0.568031,0.618624,0.900076,0.694101,0.844101,0.139676,0.311765,0.392348,0.854971,0.717567,0.337199,0.187294,0.989207,0.319253,0.157539,0.752061,0.0781189,0.629534,0.00103548,0.0856798,0.48515,0.318498,0.941506,0.0693272,0.494622,0.0114503,0.445261,0.0443579,0.268092,0.967394,0.558247,0.836123,0.586018,0.458323,0.530224,0.430119,0.597999,0.841989,0.822467,0.45297,0.559556,0.159666,0.640264,0.548763,0.478919,0.797802,0.300824,0.557037,0.427337,0.301859,0.642717,0.912487,0.620357,0.584223,0.981814,0.114979,0.595674,0.427075,0.159337,0.863765,0.394469,0.717584,0.699889,0.980487,0.175908,0.230113,0.410605,0.773907,0.072102,0.233072,0.226877,0.631658,0.392738,0.86714,0.180421,0.871656,0.664943,0.481244,0.428694,0.0922795,0.783103,0.0714111,0.00476658,0.40346,0.655634,0.986581,0.518439,0.251308,0.413656,0.677776,0.115073,0.808125,0.39536,0.814962,0.788612,0.571268,0.0450745,0.199217,0.345175,0.117176,0.432289,0.572051,0.748834,0.825027,0.439192,0.929255,0.696683,0.104134,0.410499,0.125377,0.196414,0.193603,0.196788,0.201181,0.597063,0.852422,0.187761,0.115502,0.10373,0.601417,0.793278,0.218803,0.409542,0.188638,0.0337652,0.198154,0.759906,0.0788397,0.397371,0.105081,0.196016,0.829659,0.677132,0.944851,0.654686,0.116323,0.874106,0.351369,0.220458,0.284605,0.476746,0.416872,0.478207,0.673534,0.618052,0.07527,0.525957,0.805814,0.190772,0.629687,0.407231,0.984049,0.84849,0.816773,0.172687,0.882255,0.0149269,0.932593,0.961095,0.412297,0.0376742,0.157111,0.241957,0.714806,0.101962,0.896643,0.831129,0.976067,0.248012,0.0515874,0.260672,0.724759,0.468459,0.73888,0.398293,0.0865115,0.81415,0.92425,0.892325,0.00492138,0.553937,0.299556,0.988971,0.402427,0.116329,0.161658,0.284682,0.131256,0.0942515,0.245777,0.543553,0.131926,0.402888,0.78551,0.846732,0.50485,0.682154,0.677861,0.480918,0.930166,0.729448,0.74159,0.654925,0.197908,0.480469,0.0532178,0.284419,0.294619,0.977468,0.176744,0.299541,0.531404,0.4763,0.288511,0.933831,0.59263,0.450169,0.218513,0.723886,0.544421,0.46429,0.267439,0.676346,0.867179,0.0529495,0.523078,0.372029,0.735103,0.200939,0.852946,0.665269,0.930388,0.594536,0.320194,0.128295,0.0750056,0.373411,0.412715,0.369625,0.350879,0.589459,0.669165,0.882283,0.0657594,0.957677,0.816114,0.658389,0.407846,0.0346271,0.382275,0.952267,0.498917,0.649714,0.628613,0.366096,0.702663,0.151691,0.738125,0.437766,0.35263,0.591071,0.103035,0.283018,0.185607,0.423229,0.411313,0.260613,0.79664,0.824028,0.630237,0.147519,0.413487,0.299403,0.0298022,0.479246,0.25708,0.845916,0.137635,0.664926,0.880543,0.51991,0.617192,0.379461,0.169623,0.245805,0.745556,0.872287,0.397496,0.483681,0.310053,0.750127,0.0747519,0.413088,0.0331445,0.260359,0.836316,0.444458,0.520971,0.632957,0.268486,0.151209,0.780476,0.681972,0.450612,0.810278,0.161219,0.707691,0.656194,0.298854,0.372617,0.536738,0.818763,0.989809,0.916198,0.988387,0.235614,0.661755,0.860673,0.633111,0.145436,0.170726,0.383237,0.220188,0.583814,0.416382,0.480547,0.42013,0.86084,0.00151786,0.0530869,0.129325,0.152727,0.833563,0.811298,0.603338,0.643841,0.972516,0.311029,0.300035,0.27137,0.683646,0.836773,0.0901335,0.673455,0.752971,0.0785204,0.909069,0.414726,0.939194,0.54218,0.560161,0.10992,0.925417,0.780349,0.693734,0.341799,0.260895,0.113864,0.202639,0.262413,0.166951,0.331964,0.41514,0.000513419,0.143262,0.018478,0.644354,0.115778,0.329507,0.944389,0.387148,0.0131528,0.781162,0.477282,0.686607,0.534132,0.555802,0.595676,0.948858,0.494996,0.137856,0.509019,0.604916,0.0632733,0.289368,0.298649,0.405073,0.550264,0.412513,0.607712,0.812677,0.579464,0.939676,0.227817,0.579977,0.0829379,0.246295,0.224332,0.198716,0.575802,0.168721,0.585864,0.588955,0.949882,0.0631462,0.275562,0.484014,0.618948,0.871238,0.432872,0.113944,0.00909452,0.941892,0.71886,0.0723678,0.23126,0.0175094,0.47744,0.781524,0.430023,0.0851518,0.594201,0.00948667,0.0248277,0.822018,0.589464,0.107766,0.0683125,0.813796,0.306482,0.644114,0.982516,0.892346,0.233069,0.932398,0.955493,0.508631,0.416413,0.574441,0.37987,0.849285,0.688385,0.388964,0.791177,0.407245,0.461332,0.0224365,0.424755,0.938772,0.80396,0.854777,0.0239243,0.398161,0.864264,0.048752,0.220178,0.453728,0.156518,0.288491,0.267524,0.463,0.932605,0.25004,0.355346,0.165674,0.182438,0.310838,0.674306,0.598851,0.885279,0.0541756,0.448136,0.573665,0.44314,0.239313,0.98091,0.904472,0.261749,0.405664,0.843245,0.0657091,0.260442,0.867169,0.46387,0.124705,0.915921,0.684048,0.578433,0.0724386,0.972539,0.845957,0.535438,0.905144,0.0959969,0.890784,0.0708181,0.278435,0.201622,0.745124,0.877286,0.0869018,0.799299,0.325422,0.660566,0.242439,0.564734,0.641476,0.146911,0.826483,0.0471406,0.990156,0.892192,0.307582,0.857325,0.356062,0.432288,0.773246,0.0401097,0.010721,0.845684,0.0126483,0.856678,0.381123,0.917792,0.952675,0.271907,0.98861,0.23111,0.473529,0.733734,0.108396,0.560431,0.533033,0.433817,0.220997,0.775473,0.998551,0.862473,0.922384,0.825034,0.909614,0.91254,0.717226,0.217196,0.769865,0.0732882,0.649484,0.543111,0.113398,0.660205,0.388795,0.126046,0.516883,0.769918,0.0438381,0.469558,0.0418244,0.0324482,0.700668,0.515353,0.766182,0.809063,0.075784,0.299215,0.24288,0.296781,0.074688,0.241431,0.159254,0.997072,0.0664655,0.0688683,0.909612,0.783692,0.286064,0.679477,0.85698,0.935548,0.222588,0.970378,0.595753,0.611383,0.0964241,0.112635,0.381301,0.140262,0.582193,0.423125,0.17271,0.28286,0.938479,0.938892,0.0919236,0.0142627,0.238108,0.334804,0.311044,0.312796,0.576235,0.470298,0.309868,0.642701,0.539167,0.21948,0.426393,0.825231,0.898957,0.283373,0.760779,0.121545,0.253751,0.356531,0.732928,0.350175,0.469167,0.114229,0.490437,0.0513595,0.537354,0.663147,0.33422,0.475833,0.60204,0.426144,0.490096,0.840148,0.760947,0.80114,0.152944,0.337183,0.271438,0.462811,0.979883,0.810605,0.682291,0.406276,0.635836,0.581248,0.689649,0.396614,0.702793,0.943399,0.753146,0.435721,0.293574,0.222312,0.549951,0.784011,0.273672,0.087305,0.447158,0.607892,0.563138,0.049198,0.0340353,0.0532341,0.889346,0.794983,0.854374,0.0422893,0.132165,0.125812,0.505101,0.112048,0.936417,0.187392,0.518324,0.572252,0.76864,0.207973,0.968867,0.471434,0.151372,0.722012,0.907155,0.444946,0.944325,0.457105,0.228956,0.217997,0.54441,0.676115,0.825888,0.107549,0.725313,0.859924,0.160783,0.614658,0.654906,0.0151565,0.656948,0.787072,0.140968,0.162048,0.89912,0.077385,0.34944,0.417444,0.649637,0.11808,0.625417,0.618504,0.589514,0.776789,0.340516,0.496669,0.221735,0.284841,0.953774,0.450691,0.502837,0.498185,0.126806,0.328726,0.605733,0.852118,0.188649,0.766516,0.466777,0.843556,0.781673,0.123725,0.630627,0.922641,0.285773,0.529747,2.61571e-05,0.635213,0.947192,0.649663,0.753294,0.572609,0.268167,0.342808,0.349398,0.608683,0.839477,0.571132,0.893524,0.793251,0.0218234,0.396361,0.291436,0.148629,0.725087,0.897169,0.00074776,0.913737,0.663685,0.467525,0.757292,0.445358,0.591249,0.387919,0.367999,0.877022,0.917667,0.368025,0.512236,0.864858,0.0176888,0.265529,0.437467,0.285856,0.608337,0.786864,0.894539,0.447813,0.357997,0.788063,0.241064,0.37982,0.184424,0.5325,0.528449,0.909512,0.429669,0.529197,0.823248,0.0933544,0.996722,0.58054,0.538713,0.587971,0.96846,0.906712,0.464994,0.886127,0.274737,0.977229,0.750985,0.292426,0.242758,0.188452,0.578282,0.851095,0.975316,0.472821,0.298908,0.333313,0.260884,0.539972,0.713133,0.445308,0.0724721,0.241582,0.35482,0.502141,0.77078,0.178068,0.595495,0.767502,0.758609,0.134208,0.355473,0.727068,0.0409198,0.820467,0.613195,0.315657,0.797696,0.36418,0.608083,0.040454,0.552631,0.186365,0.891549,0.527948,0.659186,0.190457,0.86126,0.920069,0.73043,0.574394,0.365378,0.802902,0.815976,0.720198,0.305043,0.586756,0.898266,0.900538,0.354257,0.656875,0.0347462,0.70973,0.383943,0.075666,0.530197,0.997138,0.391323,0.327892,0.361318,0.999406,0.368346,0.913949,0.185771,0.259895,0.441897,0.844956,0.450353,0.303157,0.765026,0.180782,0.877551,0.130404,0.983684,0.693527,0.850601,0.288727,0.280283,0.748867,0.189265,0.63454,0.405742,0.224011,0.34427,0.789685,0.299677,0.874467,0.786823,0.691,0.20236,0.148141,0.690406,0.570706,0.0620903,0.876177,0.830602,0.503987,0.721133,0.280954,0.807145,0.486159,0.461736,0.684696,0.616562,0.44542,0.378223,0.467163,0.734147,0.658506,0.216031,0.923411,0.293046,0.621772,0.147422,0.637316,0.411457,0.447099,0.511783,0.19828,0.138099,0.714143,0.346421,0.828505,0.284849,0.408512,0.704682,0.115451,0.912499,0.425815,0.396405,0.719644,0.911973,0.858141,0.40434,0.528535,0.303561,0.782562,0.995698,0.0377076,0.441068,0.211729,0.961119,0.734114,0.833501,0.108541,0.37143,0.244959,0.555641,0.883213,0.443239,0.69374,0.597356,0.78966,0.522245,0.882205,0.198172,0.226927,0.997656,0.110671,0.652742,0.39406,0.830314,0.564715,0.252201,0.234654,0.0932498,0.555762,0.0172163,0.0889482,0.59347,0.458284,0.300677,0.554589,0.192398,0.134179,0.66313,0.563828,0.379137,0.218771,0.447042,0.822377,0.912511,0.0443981,0.612037,0.434757,0.926603,0.810209,0.661684,0.924259,0.920879,0.314426,0.31832,0.751194,0.87914,0.570521,0.985848,0.97239,0.126283,0.00306398,0.0613383,0.719753,0.461348,0.362016,0.274342,0.653747,0.496194,0.937472,0.217575,0.875332,0.156244,0.664617,0.697708,0.0687552,0.709015,0.309745,0.503512,0.635618,0.119954,0.165196,0.559877,0.0408331,0.479622,0.878197,0.792027,0.358762,0.448718,0.777875,0.331152,0.575001,0.780939,0.392491,0.294755,0.242287,0.754506,0.569097,0.896034,0.2507,0.506569,0.113609,0.126032,0.662813,0.778225,0.82374,0.731568,0.48724,0.133485,0.23508,0.122859,0.253439,0.400276,0.682736,0.294272,0.879898,0.560933,0.0862991,0.23866,0.00965126,0.864174,0.569812,0.584653,0.645112,0.962303,0.879407,0.887399,0.716809,0.448504,0.783432,0.967509,0.955073,0.897041,0.0935414,0.617886,0.675267,0.917282,0.349454,0.162507,0.0507672,0.584534,0.285365,0.304206,0.984811,0.968101,0.598479,0.864709,0.529035,0.684778,0.103369,0.538686,0.548951,0.673181,0.123339,0.194063,0.635484,0.00274584,0.0814623,0.352293,0.45125,0.864895,0.319802,0.406323,0.761936,0.413343,0.0242091,0.437202,0.330625,0.373663,0.599709,0.381392,0.958198,0.885075,0.685599,0.943008,0.853176,0.284077,0.807717,0.382211,0.968855,0.911086,0.920897,0.517806,0.584267,0.0442352,0.71187,0.219751,0.046981,0.793332,0.572043,0.498231,0.658227,0.891845,0.904554,0.420162,0.305189,0.928763,0.857365,0.635814,0.302426,0.457074,0.0172064,0.260624,0.342149,0.702805,0.203632,0.195325,0.986882,0.0113488,0.577536,0.955737,0.922434,0.498433,0.473544,0.506701,0.542668,0.185413,0.726452,0.589649,0.978745,0.298495,0.0878796,0.636971,0.19034,0.992434,0.0571339,0.495529,0.921197,0.914499,0.131343,0.223623,0.371573,0.148549,0.484247,0.713722,0.851354,0.687878,0.909047,0.838237,0.699227,0.486583,0.793974,0.621662,0.985016,0.267518,0.128363,0.527684,0.452931,0.854814,0.117333,0.431676,0.153309,0.205212,0.0686471,0.343649,0.197646,0.125781,0.839178,0.118842,0.0402799,0.970521,0.342465,0.411853,0.119071,0.826712,0.125575,0.970425,0.51459,0.0346225,0.808662,0.213817,0.521206,0.602636,0.835479,0.506222,0.870154,0.963842,0.0339056,0.323085,0.818656,0.151238,0.75476,0.971965,0.35645,0.823407,0.315614,0.554096,0.949188,0.154792,0.672938,0.989468,0.125314,0.0154034,0.401321,0.244384,0.842115,0.526896,0.21481,0.356705,0.561519,0.0234715,0.570522,0.0827248,0.626108,0.406001,0.588947,0.496261,0.369843,0.622852,0.819346,0.188499,0.77409,0.574106,0.160464,0.130541,0.397513,0.476078,0.684637,0.346702,0.63087,0.357575,0.33617,0.756184,0.372978,0.737491,0.000568365,0.215093,0.264388,0.215378,0.571798,0.825907,0.238849,0.142321,0.908632,0.864957,0.548322,0.497578,0.361218,0.918165,0.12043,0.180564,0.106664,0.894521,0.754671,0.267128,0.0250613,0.152184,0.743206,0.709698,0.498886,0.374076,0.067273,0.835056,0.13026,0.440251,0.572547,0.130828,0.655345,0.836935,0.346206,0.227143,0.662841,0.585056,0.369464,0.571473,0.450013,0.917786,0.0690512,0.811231,0.835951,0.189482,0.991796,0.942615,0.0840022,0.746466,0.209743,0.109063,0.89865,0.952949,0.818761,0.397536,0.327025,0.886034,0.232592,0.457285,0.326286,0.805139,0.588114,0.98163,0.642073,0.93432,0.208773,0.304915,0.519376,0.578237,0.876388,0.969389,0.496023,0.945439,0.78062,0.331974,0.134921,0.772415,0.274589,0.218923,0.518882,0.484333,0.327986,0.417532,0.437281,0.146748,0.815068,0.764306,0.0327822,0.0476591,0.221592,0.359068,0.852798,0.809705,0.340698,0.494871,0.744025,0.549472,0.799786,0.263401,0.127709,0.676174,0.23279,0.623732,0.621613,0.0134095,0.955707,0.756534,0.785825,0.230296,0.975457,0.304707,0.714629,0.303443,0.722238,0.15191,0.450191,0.537306,0.916216,0.482973,0.584965,0.137808,0.842041,0.437763,0.947513,0.18274,0.932634,0.691539,0.732212,0.73242,0.95494,0.859921,0.408594,0.18773,0.483653,0.0302075,0.201139,0.43936,0.786742,0.986964,0.669656,0.762199,0.291671,0.384284,0.0656419,0.0139087,0.536194,0.515833,0.551214,0.452411,0.998806,0.136179,0.590219,0.840848,0.573942,0.537732,0.0235874,0.506576,0.229271,0.755799,0.238996,0.184211,0.61572,0.64759,0.37194,0.0993728,0.677798,0.573079,0.538733,0.464539,0.560043,0.208388,0.226738,0.851714,0.592673,0.29238,0.865623,0.128867,0.808213,0.416837,0.581278,0.807019,0.553016,0.171496,0.647867,0.126958,0.709228,0.671454,0.633534,0.938499,0.427253,0.87253,0.12271,0.0429729,0.52012,0.49465,0.142346,0.197918,0.0677294,0.681078,0.662457,0.627773,0.889467,0.889195,0.479487,0.482139,0.181574,0.345109,0.611006,0.989787,0.761946,0.192283,0.796806,0.314963,0.36378,0.444673,0.441921,0.0730083,0.116127,0.0754556,0.0115076,0.54338,0.947986,0.134217,0.586353,0.468106,0.628868,0.728699,0.666024,0.696597,0.409777,0.328481,0.32437,0.299244,0.217676,0.803856,0.781383,0.39925,0.148965,0.392389,0.389037,0.910912,0.584672,0.185843,0.225874,0.948452,0.630516,0.667795,0.0214605,0.746643,0.743251,0.0329681,0.290024,0.691237,0.167186,0.876377,0.159343,0.796053,0.605076,0.825367,0.49265,0.0148535,0.153848,0.81702,0.314097,0.371524,0.620876,0.0954804,0.770774,0.769841,0.487869,0.159811,0.680753,0.0725418,0.345654,0.906627,0.020994,0.97617,0.574423,0.0424545,0.722814,0.317674,0.0754226,0.0128374,0.00891058,0.242608,0.889214,0.168254,0.0386611,0.49429,0.993621,0.531311,0.509144,0.147469,0.348331,0.823241,0.518993,0.969207,0.918722,0.289767,0.739048,0.406591,0.449578,0.419801,0.479133,0.795233,0.326428,0.500127,0.771403,0.90085,0.542581,0.494217,0.218524,0.618004,0.507054,0.227435,0.860612,0.396268,0.395688,0.899273,0.890559,0.389309,0.430584,0.399703,0.536778,0.778915,0.222944,0.0557717,0.748122,0.141666,0.345539,0.48717,0.548257,0.795117,0.90697,0.0273902,0.59035,0.233398,0.527517,0.361753,0.134249,0.0700988,0.855969,0.352773,0.688103,0.363023,0.580207,0.548715,0.759291,0.975896,0.447988,0.64985,0.365205,0.878573,0.0495528,0.901983,0.657488,0.272497,0.957755,0.40561,0.414163,0.303294,0.89278,0.96242,0.098411,0.79975,0.98981,0.688761,0.0331483,0.517328,0.0505132,0.167397,0.587426,0.906482,0.52017,0.275529,0.269505,0.100377,0.824244,0.0287967,0.0762724,0.272233,0.678647,0.441477,0.150805,0.7282,0.343461,0.808293,0.000696586,0.301215,0.213903,0.41486,0.604509,0.106683,0.37728,0.70292,0.906433,0.36709,0.391681,0.939581,0.884418,0.442194,0.106978,0.471844,0.348677,0.627148,0.747374,0.618182,0.727524,0.571618,0.646978,0.803797,0.843851,0.325625,0.245274,0.994656,0.0538249,0.588735,0.802949,0.0545214,0.88995,0.0168527,0.469381,0.494459,0.123536,0.846661,0.19738,0.0299684,0.213751,0.589061,0.96955,0.0981691,0.031255,0.0765277,0.570014,0.379932,0.703675,0.317387,0.998113,0.4312,0.889006,0.645092,0.234997,0.732856,0.970717,0.480271,0.727513,0.024542,0.0690053,0.530462,0.0790634,0.958955,0.547315,0.548444,0.453415,0.67085,0.395105,0.650795,0.700819,0.608856,0.239856,0.670368,0.707025,0.271111,0.746896,0.277039,0.651042,0.450571,0.594426,0.649156,0.881771,0.483432,0.294247,0.116768,0.216288,0.264965,0.597038,0.943801,0.289507,0.666044,0.474263,0.36857,0.624999,0.0215776,0.917014,0.078414,0.692428,0.31212,0.729209,0.393246,0.920976,0.969064,0.0636146,0.628002,0.240175,0.810511,0.90504,0.891217,0.261082,0.499467,0.540373,0.142853,0.982899,0.83462,0.25962,0.199187,0.0995847,0.856659,0.142988,0.389091,0.522702,0.617251,0.757661,0.147701,0.638828,0.674676,0.226115,0.331256,0.986796,0.955324,0.724502,0.907772,0.924388,0.788117,0.535773,0.164563,0.598627,0.440814,0.0557804,0.859709,0.94028,0.596153,0.00256179,0.923179,0.430773,0.262182,0.122366,0.530358,0.118841,0.265354,0.919449,0.641543,0.882604,0.677111,0.789244,0.521432,0.351786,0.0153593,0.852688,0.338582,0.970683,0.577191,0.246354,0.895072,0.365307,0.782127,0.059635,0.963935,0.222941,0.115415,0.823644,0.163221,0.711569,0.826206,0.0864001,0.142342,0.0883878,0.208766,0.6727,0.207228,0.47412,0.592149,0.848771,0.356724,0.269259,0.638015,0.878156,0.621046,0.653374,0.730845,0.959628,0.624058,0.308035,0.205982,0.519129,0.673343,0.988109,0.578764,0.637277,0.211049,0.69418,0.460921,0.37427,0.405748,0.287127,0.46067,0.54809,0.375515,0.669436,0.22079,0.582743,0.143556,0.812939,0.431515,0.50028,0.082198,0.0695299,0.378436,0.703244,0.722904,0.109281,0.662871,0.346962,0.417316,0.868853,0.866092,0.0906586,0.856961,0.444856,0.727936,0.0680106,0.139036,0.188857,0.442281,0.544784,0.475985,0.902951,0.0928744,0.8515,0.572387,0.313664,0.434243,0.715943,0.126603,0.865758,0.216223,0.208801,0.935288,0.59466,0.912044,0.658192,0.703941,0.574916,0.00515409,0.121257,0.443769,0.871246,0.211915,0.30073,0.316102,0.939851,0.368741,0.455138,0.128709,0.811022,0.999922,0.604693,0.713973,0.0927964,0.456193,0.28636,0.406461,0.890436,0.00230364,0.533063,0.756194,0.218527,0.741864,0.691481,0.813187,0.653908,0.349673,0.517127,0.228824,0.354827,0.638384,0.672593,0.226073,0.850299,0.973324,0.542175,0.79015,0.342065,0.997313,0.918858,0.153087,0.997235,0.523552,0.86706,0.0900309,0.979745,0.15342,0.496491,0.870181,0.155724,0.0295547,0.626374,0.374251,0.771419,0.317856,0.187437,0.425327,0.667529,0.704564,0.654151,0.0223564,0.342948,0.326745,0.248429,0.193246,0.300068,0.790604,0.983396,0.642133,0.787917,0.902255,0.79522,0.785151,0.425806,0.66228,0.875182,0.405551,0.8157,0.371674,0.275731,0.971423,0.401228,0.902106,0.345674,0.172647,0.219961,0.533111,0.597974,0.88749,0.237675,0.252126,0.909847,0.580623,0.57887,0.158276,0.773869,0.878939,0.948881,0.757266,0.521072,0.736798,0.65952,0.316292,0.521949,0.0853262,0.978572,0.397131,0.490877,0.794272,0.768805,0.766608,0.765695,0.170033,0.668714,0.111369,0.34268,0.888675,0.64448,0.940655,0.776166,0.882156,0.19278,0.686013,0.462779,0.771651,0.844289,0.236648,0.65059,0.793169,0.993914,0.171662,0.529967,0.653434,0.487954,0.0519159,0.73876,0.466526,0.449047,0.229637,0.260798,0.217852,0.996245,0.0264936,0.387885,0.664959,0.137863,0.730566,0.553634,0.782343,0.67122,0.3298,0.664499,0.864,0.0158127,0.127278,0.635651,0.860102,0.363926,0.286241,0.653271,0.35784,0.457903,0.183238,0.0112733,0.945857,0.235154,0.750033,0.412383,0.684201,0.97967,0.673181,0.902053,0.975915,0.699675,0.289938,0.640874,0.837538,0.0205037,0.194508,0.619881,0.691724,0.524309,0.28438,0.555724,0.540121,0.411658,0.191375,0.400223,0.775584,0.477616,0.0534937,0.133424,0.935519,0.236732,0.144697,0.881375,0.471886,0.89473,0.293759,0.156087,0.8744,0.96694,0.0581395,0.850315,0.666615,0.348078,0.491189,0.504153,0.368581,0.685698,0.124035,0.0603053,0.210006,0.408415,0.61603,0.750127,0.820073,0.807405,0.15035,0.595658,0.285021,0.203844,0.729081,0.22054,0.440576,0.873779,0.101915,0.912461,0.768509,0.395674,0.0685479,0.642909,0.362614,0.126687,0.493225,0.0292294,0.474765,0.984414,0.533383,0.843346,0.670112,0.657417,0.903652,0.880118,0.0658326,0.519681,0.630246,0.885906,0.327086,0.780596,0.481564,0.612107,0.98444,0.210645,0.832647,0.425015,0.0844236,0.934562,0.337476,0.852933,0.330236,0.406024,0.495842,0.69285,0.532712,0.989066,0.72208,0.00747672,0.973481,0.255463,0.850823,0.643592,0.91288,0.754475,0.523711,0.978713,0.274156,0.153956,0.864619,0.601242,0.934552,0.346182,0.21335,0.918992,0.556827,0.0459969,0.344007,0.641251,0.980559,0.681483,0.494183,0.310796,0.0875075,0.990025,0.00364598,0.620219,0.979092,0.725726,0.627696,0.952572,0.981188,0.478519,0.596165,0.894068,0.232994,0.119875,0.872781,0.50715,0.273831,0.7374,0.108392,0.208383,0.0835817,0.321742,0.127375,0.640409,0.367739,0.471381,0.28166,0.348298,0.152865,0.775843,0.659094,0.240372,0.765869,0.66274,0.860591,0.74496,0.388465,0.488287,0.697533,0.369654,0.966806,0.293697,0.263722,0.1998,0.413573,0.136503,0.70695,0.687404,0.873903,0.815342,0.895787,0.957484,0.137084,0.0231618,0.597893,0.504823,0.494543,0.879553,0.853121,0.647408,0.655396,0.512215,0.88778,0.421265,0.174955,0.748371,0.166225,0.56342,0.236659,0.863758,0.933074,0.203465,0.157455,0.196796,0.403265,0.571028,0.333299,0.110216,0.258432,0.207202,0.925558,0.154219,0.164686,0.0626425,0.17738,0.762579,0.567466,0.671924,0.642132,0.420587,0.319331,0.297529,0.932802,0.207111,0.718794,0.107757,0.955483,0.885019,0.671177,0.192141,0.748777,0.604251,0.395607,0.906232,0.801047,0.798872,0.47726,0.134346,0.909087,0.735691,0.341548,0.834645,0.88991,0.506234,0.897288,0.0672901,0.268813,0.464754,0.739214,0.910946,0.885341,0.0585451,0.208474,0.818143,0.265656,0.927268,0.9259,0.221139,0.812286,0.597077,0.413281,0.561063,0.201328,0.808887,0.467295,0.0023752,0.607759,0.944555,0.136721,0.516846,0.680246,0.47827,0.351492,0.570156,0.984504,0.24878,0.637446,0.253317,0.713533,0.376659,0.164262,0.598874,0.435204,0.372737,0.417017,0.700861,0.300004,0.342917,0.922,0.112291,0.939994,0.335281,0.673354,0.141322,0.144168,0.140649,0.143698,0.751927,0.0852034,0.280419,0.268773,0.765449,0.758689,0.620265,0.335605,0.743192,0.869045,0.97305,0.996509,0.582578,0.34971,0.160772,0.181453,0.784914,0.533508,0.59847,0.485775,0.833512,0.941387,0.407775,0.945803,0.881382,0.743055,0.619157,0.0227042,0.887223,0.759806,0.166402,0.63915,0.845009,0.446821,0.907923,0.610458,0.20551,0.528188,0.946063,0.948702,0.397233,0.919113,0.945211,0.979812,0.268823,0.105982,0.161265,0.0537364,0.63949,0.759735,0.539511,0.473003,0.701122,0.947286,0.418806,0.582504,0.690341,0.0379627,0.605208,0.577564,0.797768,0.77161,0.216714,0.642777,0.218431,0.124637,0.253236,0.42394,0.652825,0.199298,0.372642,0.0500586,0.118411,0.317853,0.0298705,0.387234,0.423836,0.191135,0.44097,0.0633261,0.95087,0.980481,0.536329,0.651992,0.927767,0.955135,0.234496,0.618108,0.993097,0.839704,0.195672,0.790866,0.611314,0.412385,0.433643,0.829745,0.537022,0.686879,0.253686,0.189847,0.886177,0.626328,0.239905,0.00458845,0.944181,0.269776,0.391822,0.368017,0.460911,0.832793,0.431343,0.411781,0.813274,0.967672,0.0637736,0.741041,0.922806,0.29827,0.359149,0.915904,0.137974,0.55482,0.70677,0.749288,0.967206,0.140413,0.579034,0.504227,0.827291,0.832719,0.694074,0.713468,0.459047,0.93398,0.718057,0.403228,0.203756,0.109879,0.771245,0.664667,0.942672,0.202587,0.0764481,0.755946,0.170259,0.140222,0.496987,0.0930652,0.438492,0.856136,0.00896894,0.576466,0.410957,0.715738,0.325754,0.378162,0.856151,0.904788,0.88239,0.683443,0.737507,0.576464,0.396911,0.196554,0.510444,0.114968,0.599782,0.714199,0.224847,0.371027,0.378866,0.167519,0.573614,0.455314,0.923465,0.743873,0.595536,0.420453,0.836938,0.0340277,0.276589,0.845907,0.610493,0.687546,0.561646,0.936248,0.0657086,0.417797,0.841035,0.948098,0.101239,0.578543,0.524562,0.49815,0.775097,0.0350063,0.613118,0.374879,0.749206,0.837965,0.745906,0.128072,0.00548471,0.31952,0.583386,0.92895,0.0633933,0.178922,0.349403,0.900331,0.21295,0.625992,0.746239,0.823444,0.313538,0.307884,0.759691,0.379247,0.725681,0.600727,0.327346,0.826921,0.179269,0.851908,0.325071,0.954366,0.886914,0.93819,0.329245,0.63612,0.776155,0.0751508,0.764192,0.78164,0.394671,0.347578,0.71059,0.458064,0.526501,0.059993,0.358396,0.739451,0.685985,0.104635,0.562894,0.999524,0.412519,0.322586,0.378771,0.1382,0.923312,0.706116,0.965121,0.102581,0.558024,0.290192,0.0569474,0.444939,0.228382,0.386192,0.0810587,0.00453663,0.461343,0.845251,0.786176,0.856014,0.192829,0.496766,0.314078,0.71933,0.556759,0.672474,0.458781,0.242744,0.777109,0.021675,0.242268,0.189628,0.344261,0.621039,0.327828,0.267573,0.327156,0.292949,0.370154,0.88518,0.583141,0.427102,0.330119,0.811522,0.813294,0.411177,0.816059,0.274637,0.256428,0.602235,0.130651,0.449257,0.0990017,0.44473,0.168587,0.655761,0.117204,0.627367,0.898505,0.894312,0.649042,0.140774,0.0839401,0.993303,0.761813,0.411768,0.260876,0.0889684,0.704717,0.63103,0.974148,0.287858,0.0581316,0.304267,0.09938,0.871426,0.715445,0.915439,0.146063,0.971873,0.517675,0.276714,0.42113,0.616676,0.721443,0.589716,0.272437,0.838647,0.217083,0.170943,0.732959,0.866126,0.311716,0.816899,0.859428,0.0735291,0.228667,0.120304,0.162497,0.933384,0.751334,0.136646,0.221241,0.809466,0.440913,0.320621,0.680891,0.156357,0.236061,0.826954,0.12823,0.753735,0.103667,0.54936,0.370412,0.825111,0.139076,0.642849,0.663758,0.356159,0.813791,0.396717,0.222285,0.125508,0.213616,0.0817134,0.199037,0.442284,0.202017,0.361534,0.375668,0.953351,0.49818,0.596909,0.762817,0.939093,0.917531,0.443708,0.0954504,0.153591,0.270662,0.22368,0.907326,0.37433,0.77304,0.277738,0.19944,0.912116,0.920587,0.863198,0.268275,0.734378,0.259915,0.49056,0.859886,0.473531,0.572274,0.0589225,0.915815,0.774291,0.420457,0.291483,0.727643,0.918637,0.888392,0.49046,0.85773,0.805923,0.934168,0.95318,0.959514,0.204831,0.176861,0.86684,0.57916,0.949901,0.144578,0.778601,0.862017,0.0651648,0.641799,0.130292,0.799543,0.901714,0.620852,0.659429,0.375245,0.193126,0.718351,0.29106,0.967417,0.138808,0.582543,0.69506,0.0574445,0.470935,0.18552,0.915174,0.276858,0.119688,0.868354,0.236372,0.324519,0.0452147,0.103212,0.903679,0.995115,0.24779,0.68228,0.857132,0.312955,0.324079,0.987424,0.112498,0.225792,0.608276,0.771926,0.601038,0.801402,0.490277,0.892098,0.76882,0.629085,0.474641,0.46388,0.686529,0.945577,0.649399,0.601704,0.222435,0.769087,0.470058,0.458806,0.0936061,0.515272,0.562018,0.997285,0.510388,0.809808,0.679565,0.36752,0.122763,0.00364345,0.354944,0.23526,0.229436,0.96322,0.0071862,0.830474,0.764623,0.497463,0.722572,0.533442,0.126548,0.197213,0.997322,0.813078,0.14279,0.646721,0.414781,0.365225,0.415809,0.884839,0.824031,0.509415,0.400112,0.386049,0.5067,0.910499,0.195857,0.186265,0.278019,0.318619,0.189908,0.632963,0.553879,0.419344,0.596184,0.561065,0.249818,0.360807,0.0585289,0.972389,0.894249,0.185077,0.169602,0.891571,0.998155,0.312392,0.538292,0.412936,0.677617,0.954101,0.297775,0.501648,0.463516,0.697887,0.887696,0.970215,0.608386,0.0835531,0.15648,0.886406,0.402172,0.346388,0.519369,0.956052,0.765732,0.115553,0.517117,0.0155501,0.476359,0.575646,0.987939,0.370608,0.760723,0.157542,0.262179,0.758878,0.469934,0.800472,0.171814,0.147551,0.754572,0.46959,0.649199,0.218088,0.167477,0.536895,0.188303,0.775863,0.620448,0.344784,0.662269,0.0226208,0.691172,0.181638,0.978673,0.456904,0.297191,0.49579,0.472454,0.77355,0.0714357,0.460394,0.144159,0.832159,0.617936,0.406338,0.591037,0.0878699,0.206809,0.762852,0.235421,0.961382,0.232441,0.88462,0.17947,0.399918,0.421515,0.367773,0.175781,0.0419638,0.712557,0.83805,0.0645846,0.403729,0.019688,0.0432571,0.860633,0.316879,0.539047,0.333088,0.090429,0.610482,0.793481,0.234588,0.442641,0.411417,0.640925,0.0336784,0.499287,0.847735,0.79653,0.734708,0.809117,0.0289713,0.619328,0.988586,0.428889,0.0408434,0.356359,0.60467,0.0828072,0.068916,0.442721,0.147392,0.472645,0.462409,0.190649,0.333278,0.779287,0.729696,0.666365,0.869716,0.340178,0.459847,0.104304,0.782819,0.871264,0.74523,0.816498,0.370551,0.592964,0.613028,0.105259,0.402081,0.641999,0.724587,0.390667,0.0708884,0.76543,0.747027,0.675559,0.848237,0.815943,0.118279,0.995629,0.288587,0.580688,0.186278,0.621865,0.359975,0.915974,0.28823,0.229692,0.256152,0.748077,0.333996,0.0389716,0.619341,0.0792253,0.855469,0.989892,0.67219,0.468497,0.0951504,0.0742707,0.110496,0.819737,0.464938,0.181385,0.585167,0.211965,0.856943,0.433405,0.0279075,0.975223,0.429034,0.316495,0.555911,0.615312,0.93836,0.915886,0.531286,0.226591,0.145578,0.787438,0.974668,0.479574,0.82641,0.594009,0.558799,0.681879,0.5839,0.230989,0.150377,0.679051,0.305259,0.260873,0.498788,0.770197,0.442258,0.0839553,0.982162,0.299201,0.51736,0.0100696,0.274424,0.946394,0.326565,0.830335,0.561707,0.264925,0.746221,0.0929928,0.491515,0.891799,0.880431,0.466183,0.371372,0.706841,0.0601917,0.930171,0.38872,0.644092,0.16116,0.539097,0.323143,0.466419,0.79997,0.82193,0.236616,0.242227,0.905886,0.218778,0.541429,0.423246,0.228848,0.815852,0.36964,0.555413,0.646187,0.931347,0.820337,0.392408,0.0243395,0.311852,0.284207,0.904771,0.778035,0.655579,0.611612,0.838227,0.58575,0.000332197,0.482319,0.74691,0.539429,0.805462,0.213329,0.339399,0.627392,0.449945,0.581627,0.533278,0.668724,0.123055,0.956524,0.897572,0.938908,0.326164,0.452984,0.585095,0.25751,0.273322,0.977503,0.28185,0.585174,0.26171,0.18662,0.363209,0.917288,0.798232,0.201437,0.503039,0.798564,0.683756,0.249948,0.337994,0.489217,0.463277,0.677393,0.116609,0.913222,0.259019,0.649887,0.581946,0.382075,0.60641,0.479518,0.320982,0.932574,0.932502,0.906077,0.190084,0.205824,0.88358,0.471934,0.790998,0.14529,0.658554,0.154207,0.0625782,0.456787,0.355644,0.565617,0.255351,0.0393992,0.815565,0.593345,0.528616,0.278842,0.270737,0.645226,0.192065,0.529757,0.295112,0.774011,0.911832,0.901523,0.253529,0.232814,0.834097,0.186031,0.138891,0.0241811,0.391855,0.0224713,0.496115,0.182852,0.167761,0.15467,0.337059,0.230339,0.611456,0.692703,0.795956,0.866807,0.732102,0.611521,0.460152,0.260719,0.890363,0.73089,0.905944,0.0824282,0.260646,0.201057,0.856439,0.172478,0.10258,0.109968,0.405292,0.936676,0.295999,0.544183,0.960857,0.687854,0.566654,0.456973,0.870706,0.734415,0.611642,0.207766,0.964755,0.223098,0.900469,0.760711,0.0899058,0.632571,0.372232,0.550058,0.89329,0.262595,0.280947,0.799234,0.345024,0.541594,0.000290834,0.201463,0.714072,0.10287,0.311431,0.119364,0.0395468,0.60743,0.663547,0.000404303,0.295284,0.230202,0.457377,0.16599,0.964617,0.0690191,0.373755,0.929372,0.292118,0.274224,0.690083,0.382023,0.906795,0.0623146,0.932081,0.800085,0.32491,0.213029,0.599319,0.669934,0.754623,0.59961,0.871396,0.468695,0.70248,0.182827,0.588059,0.742027,0.790257,0.251606,0.742431,0.0855403,0.481807,0.199808,0.25153,0.446425,0.268827,0.625285,0.375796,0.560945,0.899509,0.0658792,0.942968,0.806305,0.128194,0.875049,0.606389,0.453104,0.0880779,0.205708,0.123037,0.8427,0.805318,0.994434,0.311395,0.507798,0.177261,0.899454,0.249825,0.967517,0.151059,0.992256,0.0530578,0.632867,0.192065,0.304588,0.0792914,0.460892,0.929873,0.455088,0.0218366,0.829383,0.520967,0.964805,0.635687,0.649161,0.839854,0.242077,0.102265,0.927932,0.447785,0.225302,0.770632,0.253103,0.219735,0.0820273,0.760901,0.396996,0.981481,0.0107261,0.364513,0.13254,0.00298247,0.417571,0.765407,0.195047,0.722159,0.844699,0.655939,0.652032,0.299786,0.677776,0.481415,0.820754,0.64258,0.117102,0.469914,0.482434,0.359179,0.572179,0.410366,0.806964,0.797481,0.180998,0.0600664,0.0172161,0.263026,0.820967,0.414212,0.244507,0.831693,0.778726,0.377047,0.834676,0.196297,0.142454,0.0297229,0.918456,0.987153,0.685662,0.570488,0.286939,0.363437,0.0519033,0.107693,0.00601756,0.169006,0.577607,0.488452,0.528184,0.149786,0.898818,0.335148,0.947267,0.0798163,0.395214,0.964483,0.342842,0.216182,0.378695,0.587349,0.047875,0.157421,0.964396,0.882551,0.353717,0.10685,0.912274,0.272173,0.0940028,0.597936,0.842662,0.380942,0.961373,0.894565,0.488635,0.967391,0.0635705,0.066242,0.455842,0.591755,0.216028,0.35466,0.926903,0.163295,0.434476,0.322117,0.127778,0.777319,0.538299,0.506473,0.364667,0.586174,0.663894,0.329063,0.468725,0.0176112,0.435913,0.380998,0.289785,0.529916,0.978934,0.132446,0.910858,0.940307,0.0270113,0.399493,0.907698,0.0905818,0.465735,0.36354,0.682337,0.681763,0.7182,0.609239,0.845058,0.152677,0.931356,0.972836,0.929995,0.469655,0.47931,0.294663,0.055829,0.143203,0.623726,0.524554,0.160814,0.0596395,0.905552,0.450599,0.589556,0.884486,0.583045,0.500414,0.824793,0.610057,0.899907,0.732491,0.700638,0.365643,0.096031,0.382975,0.0474059,0.814231,0.992214,0.892464,0.966908,0.923571,0.865301,0.896903,0.393226,0.34461,0.191566,0.449055,0.487814,0.815292,0.973609,0.648628,0.874931,0.879161,0.0992272,0.464487,0.763647,0.682273,0.964901,0.588441,0.292329,0.864808,0.320932,0.992968,0.230451,0.416963,0.375943,0.277857,0.231194,0.368157,0.170321,0.198101,0.291728,0.0356222,0.0950044,0.684954,0.380233,0.28657,0.134009,0.868046,0.101862,0.107618,0.516674,0.976793,0.986778,0.615902,0.44128,0.750425,0.298174,0.406181,0.338866,0.590503,0.27099,0.659798,0.583471,0.501441,0.0767601,0.959414,0.779298,0.307954,0.32757,0.949619,0.506055,0.619298,0.985241,0.60106,0.304252,0.365474,0.88763,0.438261,0.23352,0.989492,0.545878,0.750195,0.966285,0.532657,0.366096,0.407565,0.283082,0.664271,0.813747,0.621948,0.254774,0.0847364,0.281746,0.838245,0.586177,0.358506,0.797659,0.365475,0.66646,0.125229,0.315094,0.172515,0.744527,0.300336,0.773574,0.0487794,0.66581,0.661204,0.48704,0.89933,0.650696,0.0329187,0.649525,0.61698,0.565575,0.0156208,0.0245456,0.848658,0.679891,0.838292,0.470606,0.934665,0.923029,0.752351,0.77291,0.509206,0.110857,0.570569,0.874681,0.777317,0.695798,0.189775,0.949831,0.440326,0.490111,0.723406,0.489105,0.155921,0.38461,0.976145,0.0552506,0.0353054,0.00906395,0.704775,0.652286,0.574639,0.720396,0.676832,0.423297,0.400287,0.515124,0.893903,0.334953,0.438152,0.646254,0.107863,0.947358,0.757111,0.678432,0.822039,0.534428,0.37423,0.0118147,0.48426,0.814556,0.501926,0.207665,0.303661,0.657847,0.592275,0.279806,0.713097,0.627581,0.28887,0.417872,0.279867,0.863509,0.138268,0.956698,0.286806,0.538555,0.471822,0.180709,0.873508,0.909974,0.826963,0.981371,0.857333,0.584075,0.659803,0.679372,0.118503,0.0340332,0.691187,0.602763,0.848589,0.193112,0.810428,0.15225,0.850959,0.402703,0.432056,0.564056,0.030284,0.720926,0.981928,0.310151,0.584435,0.120196,0.266849,0.871241,0.658752,0.738671,0.05195,0.53226,0.648645,0.878913,0.513631,0.505978,0.462988,0.173434,0.18535,0.581491,0.207467,0.876536,0.184253,0.056056,0.0696487,0.994681,0.208306,0.920608,0.397385,0.640361,0.484664,0.427669,0.361287,0.466592,0.737819,0.945722,0.586789,0.00466828,0.816963,0.245541,0.743339,0.868913,0.7778,0.391984,0.747826,0.291431,0.897962,0.210814,0.464865,0.0833117,0.792304,0.672332,0.959848,0.976558,0.728388,0.0294967,0.971239,0.936693,0.950104,0.368624,0.577055,0.434768,0.796293,0.938341,0.901361,0.534112,0.884063,0.48815,0.53878,0.701026,0.73369,0.282119,0.569939,0.51149,0.674104,0.317765,0.802921,0.572066,0.528579,0.267786,0.655378,0.320883,0.940118,0.615225,0.297441,0.668505,0.644722,0.26868,0.605199,0.594827,0.637304,0.182253,0.0295951,0.433596,0.120595,0.930956,0.967708,0.00465801,0.419105,0.506489,0.705684,0.152795,0.788608,0.275623,0.664286,0.462712,0.593388,0.467207,0.0347774,0.121966,0.734993,0.690155,0.442849,0.675111,0.30538,0.74029,0.343616,0.950103,0.00897009,0.948815,0.544929,0.646274,0.131069,0.574524,0.0798703,0.251663,0.50548,0.0475786,0.256321,0.924586,0.554067,0.962005,0.077381,0.342675,0.237628,0.741667,0.805387,0.831016,0.208874,0.840164,0.952982,0.943867,0.530319,0.395831,0.618978,0.835699,0.136121,0.962594,0.785802,0.145091,0.911409,0.330731,0.791365,0.042478,0.905256,0.871235,0.294141,0.410736,0.918814,0.550463,0.335322,0.472881,0.512468,0.412703,0.815556,0.750096,0.154369,0.620943,0.581112,0.363243,0.461107,0.534094,0.30711,0.991426,0.929925,0.926088,0.827125,0.0660459,0.888682,0.612927,0.211137,0.800092,0.943658,0.00250235,0.84257,0.848914,0.873738,0.136711,0.25965,0.792552,0.687174,0.594972,0.265433,0.199642,0.00767428,0.0809889,0.949738,0.162044,0.701932,0.53085,0.525287,0.163038,0.0649433,0.832397,0.154464,0.994868,0.758485,0.981589,0.0609139,0.647168,0.594515,0.272051,0.44726,0.538174,0.274553,0.28983,0.387088,0.148291,0.426541,0.646738,0.940843,0.113715,0.241709,0.206276,0.313357,0.249384,0.287265,0.263095,0.411427,0.989196,0.793944,0.936714,0.152235,0.858888,0.769111,0.306698,0.853756,0.527597,0.288287,0.91467,0.174765,0.882802,0.186721,0.622024,0.420976,0.461274,0.911854,0.808064,0.609565,0.338395,0.454801,0.550408,0.45211,0.69651,0.756684,0.765466,0.945894,0.0439488,0.0285608,0.357321,0.0331452,0.822505,0.294036,0.18538,0.681393,0.063147,0.492078,0.535148,0.590744,0.780365,0.449818,0.765508,0.663167,0.636538,0.387533,0.0841427,0.0978122,0.299387,0.892206,0.707377,0.637781,0.347007,0.257785,0.0898911,0.0435179,0.0144695,0.855357,0.989412,0.0584182,0.883918,0.346733,0.0915634,0.706423,0.640769,0.276943,0.387816,0.703916,0.769021,0.922964,0.29466,0.549386,0.372782,0.0601681,0.212553,0.00932015,0.447701,0.296695,0.107132,0.747087,0.188901,0.81451,0.384869,0.535909,0.0722952,0.47476,0.579427,0.0867647,0.330117,0.568839,0.145183,0.214035,0.915572,0.236746,0.920458,0.556341,0.513689,0.308274,0.260257,0.28271,0.231238,0.554917,0.832096,0.60402,0.615085,0.0446485,0.61334,0.0627858,0.341344,0.720472,0.809873,0.530245,0.534982,0.194742,0.066154,0.607277,0.669502,0.645581,0.694042,0.999619,0.214419,0.839225,0.213654,0.129992,0.0759712,0.134112,0.686333,0.589661,0.442386,0.94659,0.872371,0.673624,0.501507,0.704467,0.277644,0.116592,0.749116,0.890984,0.179378,0.0904594,0.611456,0.989251,0.620705,0.146438,0.183993,0.686859,0.753716,0.853495,0.332439,0.447758,0.853113,0.546859,0.286982,0.0667672,0.67685,0.362954,0.200879,0.363183,0.952614,0.643265,0.309774,0.824985,0.316889,0.811281,0.529452,0.594533,0.927873,0.278568,0.485516,0.107252,0.369027,0.0969725,0.0965029,0.989732,0.243411,0.280496,0.676591,0.997126,0.133991,0.0090299,0.444884,0.987104,0.555889,0.731866,0.0538715,0.232739,0.09482,0.254751,0.595923,0.0474343,0.898016,0.905696,0.87242,0.214905,0.716977,0.401872,0.809437,0.64485,0.68044,0.294954,0.752102,0.0494674,0.391926,0.848605,0.0391994,0.635337,0.129101,0.71579,0.632463,0.263092,0.72482,0.0773468,0.250196,0.280709,0.809213,0.304067,0.513448,0.904033,0.558818,0.10937,0.951467,0.456834,0.0150664,0.823887,0.671738,0.732043,0.225759,0.481175,0.376894,0.906199,0.776129,0.128995,0.955667,0.168055,0.9776,0.994866,0.803392,0.106701,0.710656,0.435855,0.369792,0.435476,0.513202,0.619988,0.716184,0.322415,0.924056,0.229632,0.226448,0.482874,0.339002,0.177915,0.939707,0.354069,0.00180197,0.611446,0.0861122,0.227561,0.0926211,0.463006,0.13376,0.86875,0.592001,0.0894266,0.0368052,0.569601,0.0842925,0.840197,0.676302,0.794948,0.276052,0.0460942,0.230424,0.789253,0.666082,0.946608,0.111668,0.590138,0.17624,0.338116,0.0730117,0.515243,0.516031,0.0127191,0.869311,0.517833,0.624165,0.955424,0.745394,0.716786,0.418429,0.879154,0.585536,0.0104306,0.96858,0.622341,0.580032,0.0528728,0.462538,0.256334,0.847821,0.73859,0.302428,0.0782452,0.527843,0.96851,0.0248535,0.639511,0.558648,0.201094,0.977627,0.63166,0.716336,0.493658,0.644379,0.585648,0.0114902,0.268544,0.541071,0.756884,0.98533,0.959501,0.636037,0.570866,0.969931,0.604618,0.193207,0.549963,0.65749,0.655745,0.806297,0.505312,0.394335,0.108725,0.583557,0.922179,0.0772352,0.60841,0.56169,0.635884,0.809504,0.539317,0.267544,0.52584,0.0329744,0.911923,0.111488,0.0444646,0.180467,0.65256,0.801348,0.165796,0.61206,0.437386,0.736662,0.581992,0.0420035,0.929869,0.131955,0.699494,0.585614,0.938251,0.204806,0.979949,0.0469763,0.788362,0.902128,0.124211,0.396773,0.463818,0.760095,0.206277,0.00313472,0.0276387,0.732117,0.0361091,0.939562,0.843605,0.0805737,0.120028,0.496165,0.881922,0.285824,0.108225,0.319308,0.0224865,0.690216,0.361311,0.952356,0.822171,0.0608055,0.53797,0.760423,0.265611,0.517919,0.807399,0.0539735,0.420047,0.93161,0.450746,0.883865,0.691706,0.657023,0.887,0.719344,0.38914,0.923109,0.658906,0.232745,0.00368277,0.778934,0.72891,0.885605,0.0647584,0.837134,0.204913,0.0872449,0.527351,0.566224,0.0396004,0.349522,0.62703,0.57757,0.109945,0.892641,0.0954895,0.917344,0.946614,0.515537,0.848954,0.39736,0.399402,0.54066,0.0543831,0.286402,0.260004,0.443523,0.209511,0.91891,0.676268,0.213194,0.697844,0.405177,0.0987988,0.762602,0.242312,0.303711,0.849847,0.769663,0.869936,0.889447,0.119185,0.496965,0.467018,0.22913,0.389606,0.562507,0.146474,0.33622,0.0780439,0.995428,0.73358,0.477446,0.536087,0.787964,0.763848,0.796091,0.231486,0.973359,0.715001,0.907754,0.186553,0.412845,0.312932,0.285352,0.175447,0.555244,0.589063,0.0252936,0.324906,0.458999,0.914741,0.444091,0.955964,0.381758,0.673221,0.34557,0.944265,0.819695,0.68179,0.0223093,0.815123,0.41537,0.499755,0.35121,0.203334,0.263603,0.147301,0.43482,0.236963,0.862303,0.342575,0.423516,0.275147,0.655506,0.708868,0.450594,0.21075,0.297931,0.475888,0.535656,0.75693,0.390628,0.979748,0.712894,0.772387,0.652969,0.0584644,0.716652,0.472664,0.740254,0.738961,0.287786,0.155625,0.238717,0.638997,0.358959,0.50232,0.786298,0.793779,0.739283,0.648601,0.136354,0.162799,0.923748,0.79186,0.871666,0.374342,0.00260952,0.169598,0.850229,0.538266,0.926528,0.240858,0.518013,0.639422,0.0132445,0.170982,0.697887,0.729897,0.643646,0.438141,0.468858,0.931432,0.593766,0.707575,0.570429,0.952725,0.209895,0.356727,0.746504,0.949178,0.00532776,0.882857,0.111976,0.929076,0.674717,0.983643,0.303418,0.677327,0.153241,0.153647,0.215592,0.0797686,0.394505,0.733606,0.719191,0.407749,0.904588,0.417078,0.137646,0.548234,0.855219,0.606504,0.479666,0.448985,0.314079,0.0500951,0.40171,0.523974,0.406822,0.148214,0.473151,0.41215,0.0310713,0.585128,0.341226,0.705789,0.568771,0.644643,0.383115,0.722011,0.79829,0.598708,0.80178,0.192795,0.332313,0.520971,0.600545,0.236901,0.938049,0.738191,0.785135,0.793268,0.344695,0.264801,0.242253,0.658773,0.314896,0.643963,0.182747,0.721718,0.792177,0.655899,0.133868,0.823249,0.241026,0.475094,0.529037,0.809797,0.119737,0.912153,0.531808,0.918028,0.51086,0.333588,0.110823,0.843174,0.854559,0.711368,0.0800749,0.792608,0.449558,0.86521,0.585876,0.794253,0.130011,0.828129,0.453026,0.444907,0.472093,0.635774,0.166625,0.26427,0.291672,0.300494,0.0875185,0.532699,0.775588,0.616556,0.342496,0.895325,0.528708,0.874304,0.813353,0.0395687,0.207892,0.924176,0.882742,0.0624508,0.635544,0.962817,0.855058,0.0851025,0.828027,0.440934,0.879355,0.958038,0.269063,0.332382,0.402945,0.741156,0.968156,0.56957,0.00542563,0.259828,0.870064,0.0929442,0.792527,0.645652,0.7095,0.135022,0.540978,0.238208,0.0093264,0.354331,0.277777,0.217218,0.278508,0.16052,0.279669,0.914052,0.123337,0.134728,0.999154,0.951364,0.575662,0.87851,0.909402,0.844725,0.210892,0.312347,0.585881,0.179047,0.881917,0.591306,0.438875,0.751982,0.684251,0.231402,0.397634,0.393751,0.366425,0.938612,0.631959,0.375751,0.292943,0.909736,0.592969,0.57145,0.0702557,0.872638,0.485502,0.193593,0.00736586,0.484656,0.144957,0.583028,0.363166,0.0543585,0.427753,0.574058,0.366705,0.0136334,0.753105,0.248623,0.60494,0.19198,0.000604173,0.289191,0.423382,0.398238,0.682941,0.789807,0.33685,0.3149,0.165558,0.629792,0.224636,0.758527,0.201242,0.294892,0.631165,0.686744,0.488484,0.638531,0.171401,0.633441,0.221559,0.534566,0.687799,0.649311,0.108624,0.0545048,0.662945,0.861729,0.303127,0.267885,0.0537089,0.303732,0.557075,0.477091,0.70197,0.240016,0.266898,0.0388191,0.554916,0.432456,0.668611,0.779553,0.190983,0.869854,0.0744445,0.822148,0.556598,0.562929,0.46068,0.727999,0.19637,0.682239,0.262565,0.884169,0.33155,0.371189,0.938674,0.994495,0.232918,0.241801,0.26238,0.286627,0.545533,0.819455,0.763718,0.247502,0.0594714,0.030616,0.286322,0.614388,0.463072,0.954933,0.393941,0.654055,0.824787,0.468385,0.476203,0.381385,0.0313138,0.936883,0.109384,0.227684,0.619121,0.371949,0.111853,0.950671,0.743138,0.0505267,0.945166,0.976056,0.292328,0.207546,0.262683,0.837861,0.027001,0.0264011,0.0853633,0.0864724,0.0570171,0.371685,0.70086,0.520089,0.326618,0.0948008,0.174144,0.151405,0.563186,0.650347,0.532789,0.5945,0.587229,0.642173,0.822183,0.20635,0.0141224,0.934036,0.157022,0.757261,0.984563,0.102188,0.733317,0.276891,0.309734,0.996,0.114752,0.336735,0.0224012,0.200115,0.423207,0.0794183,0.5718,0.124067,0.599507,0.898417,0.218868,0.773651,0.0498219,0.782054,0.423997,0.582611,0.376554,0.0112263,0.224785,0.198737,0.217577,0.238907,0.132773,0.374598,0.996168,0.117335,0.476786,0.729485,0.394226,0.78652,0.725485,0.508977,0.123254,0.747886,0.709092,0.546461,0.827305,0.280892,0.670529,0.426812,0.179309,0.889397,0.200462,0.229131,0.671451,0.624459,0.811743,0.0480042,0.635686,0.0365272,0.246741,0.853262,0.275434,0.379514,0.227861,0.271602,0.496849,0.704647,0.00108719,0.891075,0.491167,0.726572,0.400052,0.614421,0.474458,0.109145,0.160882,0.301763,0.390036,0.831411,0.728575,0.569346,0.720808,0.929037,0.798477,0.392258,0.553496,0.610219,0.440263,0.189182,0.646747,0.687004,0.0424446,0.922181,0.0665172,0.270305,0.193783,0.563366,0.974952,0.19487,0.454441,0.466119,0.921443,0.854493,0.0805399,0.395901,0.963638,0.241422,0.697664,0.353674,0.0728332,0.426239,0.92302,0.793641,0.355276,0.721497,0.185899,0.908772,0.331716,0.626162,0.097954,0.978462,0.313166,0.140399,0.900643,0.379683,0.410704,0.0944265,0.943049,0.385656,0.289297,0.39749,0.851775,0.210739,0.251983,0.932315,0.60664,0.215621,0.173737,0.304304,0.569295,0.246571,0.730543,0.492315,0.0402116,0.0858187,0.213811,0.226111,0.994591,0.545527,0.852273,0.0925445,0.523989,0.165439,0.232943,0.424632,0.545122,0.643647,0.519059,0.48817,0.0293035,0.808356,0.88566,0.881079,0.0190952,0.137643,0.813394,0.625736,0.353264,0.987131,0.93004,0.922559,0.233702,0.660583,0.414873,0.273913,0.746402,0.628684,0.500024,0.740992,0.174211,0.352298,0.833537,0.6982,0.517736,0.0664801,0.122833,0.0628579,0.710127,0.641892,0.551028,0.739431,0.450248,0.436689,0.62051,0.469343,0.574332,0.433903,0.0950785,0.927596,0.421035,0.0251186,0.850154,0.654737,0.685702,0.265028,0.92865,0.432104,0.893712,0.428675,0.173096,0.0679231,0.780972,0.00663321,0.766124,0.298708,0.0731134,0.888956,0.361566,0.783241,0.530848,0.912595,0.522671,0.981096,0.349283,0.143181,0.450439,0.923615,0.577084,0.545517,0.851211,0.998119,0.570636,0.701365,0.652856,0.256338,0.966393,0.581506,0.688441,0.860105,0.0101802,0.861538,0.928028,0.791152,0.868171,0.694152,0.0898608,0.941284,0.583108,0.451427,0.724525,0.113956,0.364022,0.247196,0.0950523,0.713305,0.390377,0.545491,0.63692,0.967461,0.0910082,0.488131,0.96558,0.661644,0.189496,0.618436,0.917982,0.155889,0.199942,0.606423,0.0159941,0.210122,0.46796,0.944022,0.0012744,0.336131,0.638174,0.0911352,0.277415,0.221282,0.542562,0.00194014,0.335238,0.906584,0.249136,0.43029,0.619889,0.639513,0.975781,0.256809,0.606975,0.0667896,0.74494,0.572555,0.728434,0.934436,0.190991,0.646415,0.0903255,0.390933,0.252838,0.10632,0.601055,0.720799,0.0503417,0.60233,0.0569299,0.688515,0.693465,0.334345,0.909797,0.236027,0.336285,0.245035,0.142611,0.585422,0.675326,0.7625,0.224935,0.651107,0.0193097,0.83191,0.717897,0.76425,0.404465,0.446331,0.698686,0.595457,0.0927458,0.789012,0.98639,0.345584,0.895331,0.587445,0.0663825,0.945673,0.189775,0.123312,0.634188,0.88324,0.457658,0.543986,0.119267,0.793943,0.789021,0.261878,0.379365,0.464347,0.0243786,0.6043,0.115454,0.0436883,0.43621,0.833351,0.807938,0.840675,0.279682,0.506624,0.436132,0.372428,0.295636,0.422522,0.718012,0.190968,0.00996683,0.784394,0.13664,0.199742,0.907707,0.770829,0.0829813,0.365364,0.314815,0.202248,0.159307,0.103836,0.464126,0.538672,0.568183,0.488505,0.142972,0.683637,0.532193,0.579182,0.516988,0.340132,0.419857,0.79667,0.846756,0.855989,0.169098,0.142392,0.27851,0.88711,0.33336,0.288477,0.671504,0.47,0.488219,0.579211,0.240829,0.5712,0.944575,0.555644,0.773448,0.103882,0.659479,0.237575,0.642555,0.227662,0.72608,0.785527,0.911299,0.258273,0.364708,0.428288,0.598405,0.784565,0.224958,0.445161,0.640554,0.394056,0.587553,0.919065,0.281166,0.920913,0.207542,0.95267,0.390913,0.695761,0.531881,0.631742,0.266961,0.476456,0.187386,0.0404096,0.580338,0.846865,0.277984,0.222893,0.0745273,0.00406414,0.0084195,0.985826,0.262337,0.373128,0.414114,0.860742,0.157693,0.639072,0.305903,0.798248,0.0331283,0.893456,0.717312,0.314294,0.814368,0.924854,0.266964,0.205281,0.620615,0.798845,0.837023,0.887576,0.275301,0.0244092,0.927986,0.85564,0.871274,0.20597,0.0785326,0.945802,0.210034,0.0869521,0.931628,0.472372,0.46008,0.345742,0.333114,0.617773,0.984814,0.639016,0.416021,0.0179427,0.532472,0.133333,0.332237,0.34684,0.0581877,0.599202,0.552121,0.678803,0.398047,0.389145,0.566379,0.673348,0.413554,0.494365,0.528988,0.284828,0.700335,0.60752,0.23063,0.91037,0.694473,0.162258,0.382741,0.154553,0.508001,0.715855,0.772326,0.492815,0.354871,0.188347,0.510758,0.887343,0.321681,0.842995,0.234183,0.379868,0.442196,0.786304,0.0586713,0.840243,0.175449,0.625051,0.513591,0.589003,0.119416,0.0425792,0.873831,0.819751,0.6501,0.104461,0.730121,0.344572,0.26672,0.112862,0.499125,0.77472,0.828717,0.271451,0.267535,0.183588,0.459798,0.778293,0.0709308,0.781479,0.621287,0.305114,0.161347,0.0634834,0.0914177,0.220019,0.903726,0.266867,0.845069,0.417318,0.855869,0.964485,0.459897,0.729701,0.784236,0.109997,0.834162,0.514357,0.454569,0.100882,0.627219,0.953694,0.875602,0.455936,0.225144,0.143137,0.639524,0.684943,0.92143,0.710455,0.466421,0.542717,0.0155683,0.627769,0.6062,0.106986,0.847787,0.509927,0.373853,0.692856,0.927245,0.229722,0.657341,0.387141,0.959423,0.441577,0.497138,0.793585,0.955934,0.951707,0.894467,0.583152,0.9054,0.770069,0.039088,0.130545,0.913206,0.678612,0.815487,0.834635,0.389067,0.281909,0.377353,0.404635,0.909677,0.983553,0.511621,0.757465,0.49348,0.885473,0.450321,0.420724,0.115196,0.107662,0.807866,0.0746185,0.549239,0.305004,0.868204,0.505173,0.256711,0.76267,0.088325,0.162111,0.532739,0.127413,0.292656,0.445945,0.806025,0.108143,0.28058,0.195091,0.390051,0.657933,0.599726,0.299729,0.641486,0.111347,0.0571934,0.134966,0.996821,0.507514,0.55569,0.112016,0.615177,0.363556,0.186635,0.164416,0.66856,0.0548382,0.669589,0.925271,0.817508,0.757914,0.0873815,0.350248,0.885327,0.380037,0.796192,0.691352,0.48818,0.0767727,0.886443,0.878231,0.734706,0.486169,0.17796,0.376191,0.597516,0.235154,0.511157,0.594337,0.742668,0.0668472,0.706353,0.357845,0.430403,0.892988,0.52226,0.0989632,0.947826,0.191849,0.0242338,0.765335,0.949762,0.111615,0.115582,0.835089,0.491652,0.911774,0.526441,0.979832,0.988547,0.412884,0.858064,0.723253,0.899053,0.0360239,0.0994441,0.49657,0.271177,0.610601,0.0909067,0.0138454,0.677448,0.79726,0.37169,0.107852,0.690248,0.89395,0.206815,0.638074,0.0857993,0.231049,0.403408,0.0355618,0.342664,0.51899,0.870651,0.834316,0.430765,0.397092,0.814149,0.419312,0.809975,0.672212,0.142565,0.709028,0.708236,0.242009,0.205598,0.979414,0.85261,0.296505,0.993259,0.530058,0.0937647,0.364949,0.63791,0.784012,0.2589,0.844725,0.422086,0.344699,0.0757734,0.825495,0.380261,0.418437,0.344485,0.250912,0.252754,0.77525,0.648003,0.0669026,0.194562,0.457979,0.739115,0.337127,0.167007,0.447351,0.579136,0.372605,0.426765,0.431746,0.66911,0.420024,0.961804,0.762875,0.784974,0.599714,0.546887,0.0438733,0.444439,0.968973,0.388572,0.520212,0.794468,0.768833,0.93865,0.138953,0.0197445,0.191404,0.914203,0.667748,0.258306,0.108765,0.125726,0.997421,0.445892,0.292733,0.444773,0.0250271,0.665338,0.871538,0.456773,0.334448,0.291562,0.418577,0.0973229,0.0765358,0.0182908,0.64421,0.120409,0.46273,0.613183,0.508981,0.982942,0.407651,0.277814,0.921592,0.546604,0.297559,0.112995,0.460807,0.965306,0.371301,0.569572,0.0910327,0.368722,0.0154632,0.383766,0.813495,0.0404903,0.0491044,0.685033,0.497263,0.383553,0.976595,0.91584,0.480876,0.0531308,0.934131,0.125085,0.17354,0.39686,0.738268,0.682521,0.379802,0.145919,0.960336,0.301394,0.692523,0.257894,0.414389,0.15333,0.223201,0.78569,0.722902,0.314234,0.154413,0.738365,0.698,0.967908,0.778856,0.747104,0.652941,0.276119,0.130657,0.629536,0.191958,0.611532,0.682666,0.126089,0.736618,0.856206,0.522949,0.474886,0.538728,0.902751,0.620806,0.499063,0.204145,0.313329,0.756958,0.618534,0.466659,0.980158,0.404225,0.189561,0.294392,0.558637,0.927927,0.992392,0.526545,0.706782,0.739496,0.179486,0.982901,0.870152,0.809021,0.174859,0.481684,0.491688,0.300948,0.218302,0.347894,0.823897,0.693188,0.886622,0.726648,0.313994,0.385685,0.930794,0.627323,0.142643,0.549328,0.093982,0.122801,0.953552,0.283543,0.417193,0.51219,0.21147,0.409585,0.0387346,0.918252,0.14908,0.21822,0.901153,0.0192326,0.0272415,0.0760115,0.500917,0.518929,0.376959,0.719219,0.866823,0.200856,0.412407,0.753445,0.927505,0.726401,0.13913,0.858298,0.353724,0.281773,0.407626,0.447706,0.404575,0.361179,0.731249,0.821768,0.873368,0.942719,0.231353,0.912103,0.860971,0.380433,0.130323,0.762123,0.399666,0.157565,0.838135,0.900583,0.676494,0.215094,0.619802,0.543317,0.415951,0.0322088,0.296763,0.343455,0.75861,0.435893,0.201754,0.112333,0.717666,0.60938,0.560039,0.122241,0.970559,0.291288,0.944009,0.843927,0.234007,0.175361,0.75603,0.0949779,0.555795,0.886353,0.857101,0.95546,0.0439179,0.695236,0.856043,0.720412,0.910331,0.475844,0.263729,0.326281,0.508053,0.560492,0.669737,0.266663,0.996385,0.87149,0.378996,0.714051,0.480871,0.939035,0.836292,0.451429,0.230323,0.780301,0.295356,0.46433,0.955663,0.0513862,0.559308,0.511457,0.937739,0.41641,0.466917,0.981657,0.111646,0.32296,0.702069,0.0219767,0.798805,0.965799,0.348258,0.306858,0.52629,0.0179947,0.573521,0.522676,0.889485,0.952517,0.236727,0.370356,0.891553,0.0730193,0.821785,0.121876,0.85332,0.117141,0.586207,0.808983,0.168528,0.145515,0.32044,0.106267,0.561925,0.787358,0.0879243,0.673571,0.110318,0.789994,0.695548,0.909122,0.755792,0.0438055,0.21598,0.282083,0.0618003,0.789501,0.804758,0.951285,0.742018,0.0414851,0.321641,0.633571,0.114504,0.143426,0.755447,0.967825,0.260568,0.341654,0.776808,0.429095,0.487169,0.097248,0.535362,0.0490934,0.884606,0.623286,0.722664,0.994923,0.41328,0.418212,0.904046,0.169072,0.462017,0.120026,0.451155,0.523817,0.909527,0.255913,0.475103,0.651546,0.297398,0.796744,0.285117,0.411902,0.94017,0.0405642,0.379727,0.200738,0.382218,0.156535,0.629833,0.869386,0.253783,0.165195,0.91848,0.138389,0.788481,0.641144,0.133312,0.201761,0.0593557,0.037358,0.370833,0.521373,0.157384,0.821988,0.0451903,0.0669116,0.0779008,0.520293,0.718458,0.375299,0.317037,0.00357459,0.787201,0.257208,0.0441387,0.166928,0.457946,0.426357,0.323463,0.0877785,0.295743,0.577247,0.252973,0.214223,0.715635,0.0414547,0.855367,0.848947,0.243216,0.914723,0.886305,0.614049,0.436096,0.0436896,0.436037,0.481286,0.110601,0.513938,0.00157894,0.829059,0.889237,0.318616,0.832633,0.676438,0.575824,0.876772,0.843367,0.0337695,0.303129,0.16683,0.121548,0.598872,0.744077,0.374521,0.813095,0.459712,0.415976,0.668462,0.308659,0.659192,0.583185,0.194965,0.273241,0.0192801,0.238654,0.709279,0.500566,0.349255,0.223217,0.502145,0.178314,0.112454,0.820761,0.0109476,0.788892,0.396585,0.88772,0.632258,0.430355,0.190848,0.799088,0.551903,0.78972,0.543165,0.926424,0.602815,0.00287668,0.3424,0.271277,0.311536,0.00159248,0.854462,0.506501,0.274834,0.873742,0.745155,0.984113,0.374308,0.0944103,0.207329,0.876453,0.272724,0.319783,0.697214,0.283672,0.108675,0.0937986,0.171392,0.740933,0.524153,0.36224,0.540021,0.0760557,0.15196,0.0831862,0.00247976,0.754775,0.0860629,0.34488,0.0260525,0.397599,0.346473,0.880514,0.904099,0.621306,0.754256,0.649254,0.605419,0.128564,0.743665,0.812748,0.00501609,0.0163891,0.132531,0.70223,0.300061,0.241206,0.796028,0.471453,0.982139,0.320181,0.833693,0.522161,0.396237,0.985653,0.605347,0.398717,0.740429,0.69141,0.743597,0.766481,0.0890087,0.0900695,0.646995,0.993108,0.711376,0.401251,0.642362,0.316795,0.529815,0.386027,0.129543,0.534831,0.402416,0.262075,0.23706,0.702477,0.503281,0.0330887,0.17393,0.485421,0.35327,0.0076228,0.00758148,0.749507,0.993276,0.612928,0.148224,0.733705,0.304338,0.891821,0.500186,0.393347,0.981891,0.147181,0.386455,0.693267,0.548432,0.0288176,0.0100617,0.0782465,0.414845,0.139605,0.613077,0.817261,0.40168,0.850138,0.519738,0.904961,0.883226,0.693668,0.390382,0.236497,0.701291,0.397964,0.986004,0.694567,0.0108921,0.134228,0.428271,0.31523,0.0260495,0.928457,0.708577,0.00794026,0.0756376,0.0950325,0.701207,0.624069,0.12385,0.711269,0.702316,0.538695,0.850874,0.315393,0.355956,0.252554,0.165531,0.875694,0.157516,0.0487575,0.569362,0.547898,0.285254,0.270652,0.945861,0.271258,0.965219,0.956754,0.405486,0.39349,0.271984,0.431536,0.321947,0.980561,0.439476,0.397585,0.075594,0.140683,0.0216541,0.199444,0.851952,0.72397,0.738139,0.702826,0.0393634,0.0940945,0.95538,0.204894,0.969788,0.112896,0.253652,0.539149,0.660793,0.538906,0.809802,0.606655,0.810164,0.775021,0.563408,0.215651,0.168511,0.835393,0.647187,0.490458,0.815954,0.0866629,0.888042,0.891548,0.227346,0.909697,0.0909921,0.0792981,0.633667,0.829131,0.782124,0.67303,0.923225,0.737504,0.877924,0.893013,0.8504,0.131576,0.432163,0.511193,0.670482,0.241965,0.117848,0.480647,0.0169852,0.681256,0.696297,0.185496,0.516649,0.343484,0.675954,0.332603,0.430147,0.563996,0.224151,0.657493,0.473693,0.315143,0.736791,0.107359,0.144274,0.518915,0.780389,0.0674993,0.256419,0.658314,0.960513,0.106819,0.78989,0.392676,0.618012,0.460372,0.63464,0.73586,0.941019,0.651625,0.417116,0.637316,0.837121,0.933765,0.9808,0.513075,0.266368,0.410947,0.0770716,0.490519,0.0684399,0.550764,0.805662,0.805231,0.658124,0.949936,0.324146,0.438513,0.0174354,0.580565,0.0968269,0.977948,0.687384,0.886717,0.370624,0.305396,0.347089,0.005264,0.041256,0.288108,0.656889,0.458372,0.925424,0.494011,0.392138,0.906224,0.00708627,0.658506,0.317171,0.0841579,0.149025,0.385611,0.634922,0.954687,0.190842,0.293046,0.904623,0.514988,0.731559,0.922059,0.0955534,0.828386,0.900007,0.782938,0.715103,0.270631,0.0883337,0.062192,0.275895,0.12959,0.3503,0.932784,0.587962,0.275724,0.426795,0.9801,0.181948,0.433881,0.638606,0.499119,0.518039,0.787631,0.884729,0.152961,0.742318,0.0755712,0.446007,0.646941,0.590559,0.177567,0.569,0.686113,0.00595258,0.469007,0.46905,0.721055,0.739637,0.557384,0.783247,0.0155319,0.686974,0.133547,0.948316,0.274936,0.409271,0.375111,0.255035,0.591219,0.808992,0.893641,0.090338,0.327031,0.681272,0.975067,0.479992,0.42359,0.0506386,0.926,0.0705313,0.641198,0.103566,0.639531,0.32731,0.109519,0.108538,0.79636,0.830574,0.848175,0.353744,0.613822,0.863707,0.0407178,0.747369,0.812023,0.315653,0.156641,0.187134,0.570689,0.74786,0.996126,0.46433,0.838198,0.323157,0.145602,0.813265,0.80315,0.569192,0.863904,0.729149,0.639723,0.505102,0.832716,0.279254,0.832412,0.942235,0.387792,0.628772,0.772809,0.235968,0.982517,0.386631,0.099675,0.0232344,0.134,0.911698,0.338888,0.290641,0.098832,0.909577,0.0385009,0.094958,0.373906,0.876699,0.418115,0.519508,0.689964,0.221265,0.0886998,0.553868,0.950414,0.728423,0.0589693,0.78313,0.00767703,0.891381,0.725365,0.395469,0.520153,0.498174,0.631437,0.50267,0.884805,0.731112,0.525904,0.0188057,0.64281,0.864792,0.309447,0.741642,0.774369,0.347948,0.8366,0.148275,0.224646,0.254715,0.667783,0.91461,0.47598,0.756483,0.468478,0.426394,0.484906,0.527447,0.209524,0.492583,0.418829,0.934889,0.888052,0.938982,0.433063,0.519489,0.441652,0.317868,0.250601,0.967556,0.336674,0.893412,0.832348,0.646121,0.635054,0.606717,0.994068,0.471654,0.754992,0.218715,0.726369,0.422776,0.133325,0.202349,0.179259,0.601803,0.628744,0.664165,0.129251,0.838268,0.156748,0.548079,0.773157,0.0448,0.487061,0.20622,0.564289,0.928713,0.524088,0.814891,0.896269,0.860762,0.708302,0.728618,0.506883,0.343356,0.335335,0.500952,0.81501,0.0903276,0.719666,0.54138,0.513103,0.852992,0.743729,0.692362,0.454795,0.372473,0.356527,0.584046,0.21074,0.513275,0.132125,0.983897,0.558075,0.619186,0.190117,0.122365,0.547899,0.714205,0.937255,0.444168,0.574968,0.645558,0.172786,0.0818509,0.988914,0.508121,0.582802,0.803924,0.598449,0.302469,0.345304,0.111553,0.15546,0.0890327,0.803915,0.610255,0.461505,0.160442,0.194301,0.672246,0.673717,0.326425,0.656143,0.231793,0.945611,0.84626,0.354157,0.493511,0.560465,0.291413,0.937679,0.135433,0.93697,0.110465,0.217284,0.925884,0.618587,0.800086,0.729809,0.217036,0.102555,0.0751123,0.328589,0.258015,0.164145,0.132504,0.86827,0.62565,0.292946,0.0625709,0.297896,0.966664,0.388996,0.954038,0.198456,0.334608,0.800298,0.552613,0.828118,0.360763,0.844026,0.765797,0.496196,0.780996,0.876263,0.71348,0.706881,0.49485,0.513567,0.436689,0.711886,0.616122,0.511802,0.0404741,0.874137,0.675947,0.172978,0.742407,0.301597,0.465924,0.804978,0.599492,0.432587,0.193975,0.553531,0.631044,0.528582,0.353829,0.183657,0.356701,0.714592,0.027683,0.122498,0.210789,0.808679,0.998761,0.924269,0.51556,0.493611,0.437835,0.952249,0.205496,0.0539572,0.464051,0.24597,0.928094,0.139997,0.418948,0.670501,0.441594,0.884872,0.47548,0.0410862,0.317459,0.669454,0.594617,0.948503,0.198037,0.948446,0.13216,0.554738,0.663038,0.159843,0.677236,0.873827,0.968522,0.675997,0.798096,0.484082,0.169607,0.235931,0.436331,0.375103,0.289888,0.900382,0.621073,0.217982,0.0403793,0.0400215,0.888484,0.481973,0.924893,0.363963,0.523059,0.242353,0.0334178,0.117676,0.190856,0.231455,0.0661222,0.323016,0.786192,0.72916,0.482859,0.463428,0.602987,0.451382,0.139424,0.401083,0.935464,0.309032,0.637014,0.371795,0.684135,0.926902,0.272178,0.305208,0.144884,0.312557,0.34523,0.0333678,0.79453,0.270123,0.397331,0.317589,0.512476,0.430749,0.435266,0.703332,0.662204,0.501388,0.0263478,0.448396,0.230548,0.509207,0.911824,0.833535,0.960589,0.0512479,0.234618,0.896053,0.360279,0.871631,0.267848,0.0444142,0.798533,0.540026,0.349622,0.943417,0.852582,0.694852,0.976785,0.647112,0.964975,0.374116,0.964702,0.477451,0.804865,0.399967,0.180783,0.467069,0.901355,0.207131,0.915465,0.131903,0.716338,0.827288,0.965438,0.676926,0.878536,0.200056,0.572979,0.238816,0.0716873,0.840827,0.28323,0.87022,0.380852,0.632852,0.813637,0.233435,0.327704,0.790422,0.880547,0.29268,0.164539,0.845249,0.770131,0.969404,0.245216,0.950914,0.436473,0.146571,0.158044,0.351938,0.278474,0.874382,0.179226,0.243912,0.551308,0.0577622,0.443968,0.124287,0.296578,0.515655,0.965114,0.579808,0.385876,0.345966,0.21266,0.199513,0.579401,0.540364,0.989935,0.459948,0.833044,0.154474,0.305197,0.603174,0.123878,0.550412,0.554088,0.560351,0.696983,0.712132,0.912289,0.975457,0.586514,0.0915153,0.219369,0.137823,0.149278,0.663337,0.26211,0.445855,0.178992,0.227224,0.0256631,0.564868,0.57319,0.238323,0.764381,0.152591,0.778687,0.754316,0.61254,0.611731,0.90879,0.917736,0.214905,0.0326687,0.468149,0.768993,0.59302,0.165132,0.481125,0.505309,0.140589,0.0676397,0.596825,0.359958,0.205462,0.746102,0.0232944,0.467572,0.191958,0.202287,0.694796,0.217621,0.767154,0.267986,0.455944,0.531535,0.420578,0.234631,0.285851,0.0331172,0.846362,0.194642,0.950854,0.0612672,0.22731,0.419002,0.83026,0.820331,0.584134,0.311386,0.32564,0.724723,0.379025,0.922465,0.0846804,0.584488,0.668567,0.107975,0.0520598,0.860525,0.310261,0.746856,0.0781455,0.0774157,0.0148418,0.534089,0.608951,0.435419,0.76872,0.894802,0.468536,0.615082,0.0894435,0.41939,0.676349,0.316754,0.838392,0.50661,0.137084,0.422526,0.817996,0.462724,0.147249,0.197021,0.385189,0.23193,0.781509,0.053756,0.339904,0.833569,0.914281,0.650166,0.580424,0.992426,0.727582,0.595266,0.526516,0.336532,0.0306853,0.295236,0.231334,0.499222,0.910318,0.320778,0.918612,0.586667,0.637532,0.757004,0.0932771,0.774616,0.179531,0.911273,0.23734,0.32678,0.108294,0.62253,0.558709,0.889802,0.676286,0.898614,0.723371,0.590566,0.54878,0.303795,0.582993,0.276361,0.899061,0.109508,0.612894,0.929747,0.404744,0.844228,0.428968,0.315062,0.165005,0.34758,0.901729,0.802537,0.104584,0.995006,0.577153,0.284115,0.906279,0.814494,0.610895,0.0145728,0.437023,0.169604,0.904375,0.113309,0.0682179,0.627746,0.703875,0.616998,0.931542,0.286868,0.893359,0.830603,0.396376,0.506253,0.760349,0.80112,0.350481,0.189318,0.116182,0.515486,0.536898,0.0179111,0.318023,0.641482,0.0129176,0.895176,0.925597,0.919197,0.70967,0.536491,0.933769,0.146693,0.706095,0.838145,0.260002,0.774313,0.465891,0.963877,0.391311,0.397433,0.250745,0.28467,0.228035,0.647121,0.790923,0.988385,0.448241,0.141403,0.177703,0.564423,0.656889,0.7146,0.582334,0.974912,0.356082,0.595251,0.870089,0.281679,0.514448,0.579759,0.818171,0.448218,0.726452,0.524266,0.286362,0.986454,0.298579,0.752253,0.950331,0.68989,0.149686,0.201077,0.97456,0.377721,0.848198,0.765482,0.366106,0.296438,0.906885,0.543809,0.860861,0.563775,0.258409,0.443195,0.538687,0.614492,0.0384459,0.408775,0.896171,0.552894,0.988534,0.714341,0.00111143,0.714986,0.238607,0.287474,0.70144,0.537186,0.0397268,0.651771,0.227076,0.189413,0.852848,0.201635,0.567134,0.701045,0.967118,0.93324,0.997484,0.874003,0.477048,0.858345,0.437778,0.735458,0.301539,0.976465,0.349949,0.339985,0.38524,0.24612,0.892879,0.373774,0.960461,0.89399,0.0887601,0.199068,0.181464,0.7902,0.736255,0.221191,0.441971,0.96333,0.410603,0.294819,0.164966,0.977737,0.995865,0.132083,0.910977,0.993348,0.00608655,0.388026,0.851693,0.443864,0.123483,0.153232,0.420329,0.473432,0.493217,0.805569,0.719552,0.386096,0.179343,0.680013,0.280086,0.268103,0.879082,0.46155,0.0583032,0.615336,0.682741,0.500275,0.578667,0.0933446,0.795094,0.743632,0.0710819,0.790958,0.875716,0.982059,0.784306,0.881802,0.370085,0.635999,0.325667,0.493568,0.789231,0.745996,0.967,0.282448,0.551565,0.686552,0.668544,0.730908,0.366566,0.94863,0.999011,0.245648,0.41018,0.0573143,0.860984,0.0929211,0.557589,0.439651,0.186266,0.352682,0.183283,0.257348,0.143641,0.0589993,0.239407,0.927947,0.940802,0.609492,0.563946,0.266468,0.103059,0.353177,0.0124642,0.0700595,0.635625,0.564029,0.756612,0.304168,0.294937,0.123177,0.252798,0.293948,0.368825,0.662978,0.351262,0.229809,0.755899,0.908851,0.66946,0.942165,0.261533,0.852743,0.199513,0.405174,0.911743,0.438919,0.333121,0.852544,0.0484109,0.897067,0.119013,0.15147,0.250244,0.131477,0.22153,0.885869,0.695506,0.978142,0.190037,0.990443,0.101319,0.442835,0.28439,0.470144,0.105813,0.635653,0.699953,0.861712,0.544503,0.369413,0.803877,0.806037,0.222157,0.00338957,0.211211,0.133899,0.442309,0.544332,0.986444,0.49072,0.441399,0.105456,0.64219,0.691643,0.236934,0.86372,0.577511,0.932439,0.841862,0.767548,0.922882,0.943181,0.210383,0.207273,0.413325,0.316196,0.842925,0.113278,0.177908,0.387429,0.482692,0.981785,0.193465,0.704848,0.985175,0.404676,0.838747,0.427484,0.949008,0.825191,0.918203,0.390407,0.930647,0.560394,0.0820497,0.167581,0.424114,0.659561,0.10002,0.265976,0.427109,0.0229026,0.209156,0.637493,0.230175,0.622481,0.953689,0.0731004,0.735759,0.131597,0.460529,0.218451,0.113382,0.653995,0.923299,0.0985563,0.0586707,0.762046,0.52604,0.00767873,0.587238,0.444243,0.398086,0.517885,0.00463701,0.480135,0.685466,0.428751,0.139696,0.785487,0.694726,0.566806,0.808389,0.903883,0.204299,0.0385644,0.526364,0.157987,0.111665,0.262123,0.289584,0.572194,0.480574,0.402966,0.226188,0.403873,0.501522,0.284859,0.16592,0.0275618,0.292538,0.753157,0.471805,0.690623,0.271042,0.476442,0.170759,0.956508,0.905193,0.310455,0.741995,0.599919,0.877261,0.550384,0.503802,0.0815598,0.588948,0.0301655,0.239547,0.700613,0.292289,0.529131,0.272807,0.772863,0.932097,0.498995,0.176736,0.433619,0.783855,0.342655,0.46118,0.0763924,0.0958123,0.932986,0.767016,0.366854,0.409428,0.937775,0.323362,0.31462,0.24823,0.0653571,0.91454,0.125491,0.615741,0.418341,0.207051,0.204689,0.448507,0.446598,0.905302,0.740796,0.975729,0.178109,0.513658,0.907826,0.677104,0.690394,0.341445,0.460959,0.0330492,0.802625,0.537351,0.128862,0.735611,0.304367,0.495716,0.145038,0.242142,0.819078,0.459659,0.490372,0.884435,0.374198,0.615863,0.500176,0.79254,0.822914,0.704865,0.241047,0.269512,0.610168,0.981842,0.245241,0.788277,0.495501,0.153067,0.465381,0.185895,0.494511,0.92634,0.218944,0.297136,0.463691,0.347805,0.0327471,0.768059,0.843521,0.177785,0.0102004,0.6626,0.637444,0.500572,0.547035,0.0116421,0.116435,0.0472116,0.804182,0.939348,0.752077,0.0452285,0.20886,0.362245,0.0270709,0.454101,0.150521,0.522572,0.607168,0.615902,0.708466,0.101679,0.542243,0.92741,0.398816,0.00593396,0.275216,0.431563,0.773993,0.118737,0.609348,0.784193,0.781337,0.246792,0.284765,0.328372,0.258434,0.4012,0.375583,0.0626157,0.340548,0.127661,0.107844,0.549408,0.489905,0.134915,0.00350949,0.640427,0.657487,0.610677,0.256329,0.365953,0.712356,0.798572,0.293363,0.111172,0.804506,0.568578,0.542735,0.578498,0.687315,0.152083,0.362691,0.468652,0.398874,0.647456,0.797024,0.657308,0.0486559,0.172607,0.719924,0.389204,0.300268,0.827768,0.938613,0.790173,0.962683,0.942122,0.4306,0.62017,0.552799,0.686929,0.986122,0.265156,0.485501,0.279485,0.376328,0.290007,0.848064,0.919062,0.868505,0.535379,0.071145,0.231196,0.00403137,0.470019,0.878652,0.801055,0.127327,0.927308,0.973663,0.847251,0.316512,0.273931,0.675019,0.255125,0.0641044,0.637702,0.197247,0.494704,0.257872,0.750046,0.181634,0.243994,0.0152022,0.667135,0.52348,0.39153,0.957141,0.371543,0.310593,0.825646,0.906923,0.381738,0.0568423,0.910954,0.851757,0.935494,0.712009,0.979084,0.862802,0.685672,0.826335,0.179315,0.959603,0.501355,0.43444,0.0237075,0.139057,0.631687,0.518412,0.396928,0.381733,0.700046,0.640923,0.396935,0.36718,0.164402,0.788465,0.324322,0.535945,0.0990577,0.149968,0.442868,0.480795,0.20681,0.353822,0.332552,0.142305,0.0658314,0.311636,0.00510714,0.751504,0.137972,0.184422,0.711107,0.639326,0.618861,0.734814,0.778383,0.250548,0.253226,0.175311,0.632281,0.953272,0.816234,0.029216,0.320452,0.980636,0.817681,0.644774,0.516581,0.916739,0.794742,0.959449,0.397534,0.00155232,0.313271,0.730086,0.143857,0.379103,0.0417224,0.148964,0.130606,0.179694,0.333386,0.841713,0.81902,0.952247,0.576527,0.597403,0.202795,0.829753,0.772714,0.835076,0.783025,0.588947,0.864292,0.103477,0.569583,0.681974,0.748251,0.0861643,0.598712,0.542993,0.0456136,0.996247,0.544546,0.358885,0.726333,0.688403,0.737988,0.768055,0.837367,0.868594,0.947749,0.170753,0.710307,0.766769,0.123,0.286834,0.364172,0.325795,0.116588,0.136885,0.160872,0.899613,0.725833,0.0251639,0.00308999,0.295416,0.707137,0.751341,0.38158,0.30585,0.294335,0.427194,0.302096,0.83888,0.786079,0.0284292,0.527283,0.524067,0.796484,0.36465,0.392661,0.744233,0.535402,0.102968,0.511002,0.658403,0.389802,0.875174,0.984198,0.50639,0.0120592,0.14507,0.406002,0.737892,0.170234,0.409092,0.0333077,0.877371,0.160434,0.414888,0.183221,0.454768,0.842082,0.485317,0.293648,0.62816,0.513746,0.820931,0.152227,0.310231,0.185581,0.544888,0.054464,0.720983,0.647855,0.565466,0.379386,0.0376572,0.44064,0.363584,0.544047,0.452699,0.508654,0.950049,0.190591,0.678887,0.359141,0.223899,0.556258,0.519575,0.638787,0.739479,0.974343,0.480869,0.224796,0.267991,0.109029,0.738542,0.0889225,0.261256,0.048773,0.274503,0.806144,0.103237,0.995487,0.453999,0.668703,0.374873,0.491656,0.109343,0.738456,0.035703,0.562043,0.24711,0.985752,0.752634,0.925997,0.344893,0.976533,0.482255,0.864468,0.61532,0.221734,0.838811,0.096189,0.44653,0.106802,0.205218,0.185072,0.195725,0.466474,0.233845,0.470228,0.272618,0.337082,0.465715,0.726617,0.00578533,0.840587,0.218273,0.115129,0.579044,0.253976,0.677172,0.826154,0.239728,0.429806,0.75215,0.584622,0.406339,0.234405,0.44909,0.0216595,0.456139,0.287901,0.117849,0.902669,0.394703,0.323067,0.0877409,0.590427,0.789541,0.321586,0.0606555,0.0621593,0.658668,0.52637,0.788776,0.664453,0.366957,0.00704988,0.779582,0.946001,0.261026,0.456754,0.772155,0.500755,0.88656,0.524305,0.0853763,0.292899,0.758711,0.534466,0.314559,0.21485,0.822366,0.432407,0.117519,0.217069,0.755474,0.205259,0.807497,0.545015,0.526845,0.868152,0.607174,0.185513,0.394522,0.395951,0.849966,0.76148,0.403,0.629549,0.707481,0.664027,0.0863024,0.479636,0.164781,0.972862,0.00394078,0.250158,0.265761,0.762651,0.784624,0.58032,0.977501,0.60699,0.0127268,0.0950197,0.824059,0.768201,0.300279,0.631556,0.313215,0.827124,0.499708,0.920389,0.0126377,0.89423,0.31634,0.862604,0.65571,0.71934,0.492153,0.363191,0.383367,0.578455,0.842827,0.548148,0.551317,0.846768,0.798306,0.817078,0.609419,0.582929,0.397398,0.58692,0.189919,0.410125,0.68194,0.0139786,0.178325,0.982219,0.645535,0.491541,0.809343,0.145243,0.41193,0.821981,0.0394731,0.72827,0.684585,0.695184,0.44761,0.176738,0.0583749,0.830977,0.755193,0.901202,0.379126,0.30651,0.74797,0.177432,0.123589,0.357389,0.760361,0.520987,0.944309,0.95028,0.931112,0.626249,0.964259,0.109437,0.608468,0.609794,0.600978,0.417812,0.755036,0.0129084,0.239793,0.794509,0.741179,0.924378,0.489693,0.188789,0.101116,0.548068,0.0197663,0.85631,0.44927,0.398892,0.16282,0.197239,0.576324,0.286409,0.554628,0.336685,0.807396,0.498938,0.286965,0.738508,0.125187,0.251224,0.847945,0.733655,0.861018,0.448924,0.151467,0.616054,0.461832,0.39126,0.410563,0.20301,0.315638,0.900256,0.391799,0.416754,0.448324,0.411566,0.273064,0.897593,0.810458,0.435884,0.0948327,0.386782,0.722293,0.649461,0.723466,0.529689,0.148399,0.0104315,0.268197,0.273586,0.261656,0.116142,0.00724107,0.122674,0.565066,0.158708,0.738727,0.0268978,0.549968,0.149291,0.229908,0.865606,0.0495465,0.621708,0.282361,0.49787,0.0332735,0.555425,0.395463,0.843731,0.991309,0.490296,0.230513,0.713601,0.139757,0.953979,0.24329,0.288156,0.964411,0.511487,0.561742,0.226066,0.627629,0.568983,0.34874,0.192695,0.727691,0.0874675,0.219593,0.277659,0.236758,0.449501,0.143265,0.286305,0.0712089,0.425626,0.784175,0.104482,0.98105,0.179638,0.948214,0.972359,0.669934,0.178726,0.68596,0.809691,0.132706,0.929251,0.0978474,0.0971162,0.440738,0.659589,0.323183,0.0683668,0.228572,0.671923,0.261062,0.956263,0.75939,0.480655,0.233922,0.996148,0.930156,0.377187,0.282453,0.00136461,0.802813,0.0666275,0.105847,0.783863,0.246266,0.0540606,0.756222,0.9162,0.232787,0.442182,0.725891,0.365493,0.371433,0.823739,0.462609,0.812171,0.483328,0.785791,0.880537,0.7119,0.457714,0.141599,0.668164,0.217104,0.622254,0.902086,0.213252,0.552409,0.279273,0.495705,0.553774,0.0820864,0.562333,0.659621,0.86595,0.808598,0.713682,0.622172,0.724798,0.946469,0.0643542,0.450689,0.311961,0.435787,0.274428,0.77457,0.247958,0.757756,0.560361,0.128495,0.469656,0.0180752,0.270094,0.13782,0.235179,0.892348,0.0399055,0.448431,0.444757,0.319179,0.944137,0.998531,0.401265,0.506469,0.658153,0.267215,0.315067,0.371834,0.889387,0.0398653,0.318303,0.953741,0.490554,0.630264,0.389528,0.764982,0.404834,0.637486,0.522738,0.965195,0.765981,0.992394,0.98327,0.0360752,0.130214,0.218449,0.928423,0.170119,0.666881,0.373181,0.489298,0.611017,0.371712,0.890563,0.117486,0.0298645,0.157778,0.432554,0.401699,0.0471645,0.472419,0.720001,0.000905386,0.962973,0.350265,0.390433,0.727956,0.755099,0.0279193,0.250694,0.720294,0.7939,0.243088,0.703564,0.829975,0.373301,0.922014,0.758399,0.543421,0.588895,0.131579,0.0327185,0.199912,0.503291,0.923282,0.317398,0.533156,0.0810594,0.749952,0.934854,0.128224,0.222371,0.654855,0.129129,0.185345,0.00512062,0.519563,0.9133,0.76022,0.547482,0.163994,0.480513,0.341382,0.407082,0.184078,0.171358,0.780383,0.106091,0.929756,0.323804,0.694986,0.0613355,0.356522,0.894898,0.564627,0.279804,0.212296,0.0977821,0.360863,0.962248,0.0326362,0.489087,0.18462,0.687492,0.618217,0.369964,0.692612,0.13778,0.283265,0.452832,0.685262,0.447259,0.933345,0.026644,0.85434,0.117423,0.198002,0.634724,0.223514,0.127758,0.958528,0.9185,0.189094,0.31505,0.813398,0.75372,0.594854,0.0256942,0.851502,0.955718,0.987943,0.884139,0.444805,0.172562,0.57163,0.0630215,0.542526,0.264243,0.200801,0.825791,0.717075,0.886063,0.27305,0.65042,0.912707,0.12739,0.767843,0.110709,0.762114,0.991357,0.238467,0.720642,0.909857,0.427561,0.0356917,0.723255,0.181281,0.630546,0.748949,0.0327835,0.586263,0.736892,0.916922,0.0310681,0.909454,0.488553,0.0940896,0.451981,0.752795,0.294891,0.277772,0.46987,0.180953,0.550822,0.12029,0.0936601,0.678212,0.888133,0.204369,0.440325,0.87949,0.442836,0.160967,0.789347,0.870396,0.196659,0.512602,0.0516775,0.827205,0.261552,0.084461,0.413468,0.998444,0.00138323,0.444536,0.907898,0.489936,0.538626,0.359879,0.242731,0.833516,0.637651,0.712601,0.0144696,0.188473,0.832891,0.10813,0.866684,0.721023,0.312498,0.30701,0.600513,0.755334,0.467977,0.38986,0.625731,0.664636,0.902463,0.677408,0.49184,0.164014,0.761869,0.905308,0.162458,0.763252,0.349844,0.0703567,0.253188,0.888469,0.430236,0.495919,0.721986,0.0678873,0.20852,0.736455,0.25636,0.0414107,0.844585,0.123045,0.762434,0.157083,0.430054,0.362947,0.912418,0.898031,0.752807,0.538148,0.562667,0.65527,0.215556,0.0545073,0.819284,0.977425,0.959815,0.981742,0.740678,0.309659,0.0520991,0.993866,0.198129,0.482335,0.489785,0.920114,0.550223,0.698305,0.65657,0.806583,0.739716,0.501155,0.929627,0.50215,0.658238,0.359682,0.865097,0.570656,0.257713,0.617904,0.108804,0.82038,0.273174,0.32436,0.874887,0.0924582,0.301786,0.834703,0.0742007,0.0424635,0.144362,0.1263,0.0363294,0.34249,0.608635,0.526115,0.262605,0.158857,0.22442,0.919174,0.96544,0.964136,0.420329,0.895067,0.466286,0.0785672,0.254749,0.331383,0.649223,0.512462,0.949287,0.758027,0.332842,0.222462,0.0823871,0.207729,0.31492,0.384173,0.0424318,0.38912,0.426636,0.186794,0.51542,0.462966,0.529284,0.124055,0.98908,0.791889,0.282913,0.2135,0.711063,0.248353,0.177636,0.131393,0.14342,0.643923,0.20996,0.398169,0.975306,0.859183,0.910631,0.924593,0.61721,0.243473,0.147055,0.699597,0.451202,0.461974,0.0837696,0.493634,0.851095,0.510406,0.680427,0.366515,0.973372,0.209712,0.49057,0.962452,0.00160048,0.773483,0.175952,0.712664,0.0218359,0.353589,0.844057,0.165256,0.997511,0.0540164,0.563425,0.972817,0.913199,0.474056,0.89741,0.530409,0.717529,0.0444647,0.230006,0.168731,0.506439,0.313775,0.662365,0.357534,0.824181,0.342792,0.724049,0.797553,0.552504,0.21462,0.760005,0.554104,0.988103,0.935957,0.266768,0.00993868,0.289546,0.110825,0.175195,0.287058,0.164841,0.73862,0.259874,0.0780403,0.212676,0.157284,0.608449,0.930204,0.201749,0.838455,0.0989352,0.708188,0.15223,0.7613,0.0657224,0.976411,0.104092,0.789772,0.773964,0.656596,0.00439134,0.533969,0.2107,0.992494,0.469927,0.477468,0.00243283,0.759473,0.588293,0.177627,0.0465303,0.753134,0.916247,0.306405,0.831174,0.128923,0.463689,0.439623,0.059127,0.665438,0.278078,0.158062,0.373627,0.430308,0.919362,0.439349,0.40672,0.0234544,0.229121,0.180684,0.68005,0.233512,0.714653,0.89075,0.226006,0.18458,0.368219,0.228439,0.944053,0.956511,0.406066,0.990583,0.709646,0.322314,0.296988,0.54082,0.451236,0.760677,0.980443,0.510363,0.426115,0.258521,0.668426,0.799742,0.68883,0.587788,0.239091,0.0955495,0.611242,0.468212,0.276234,0.291292,0.701724,0.990887,0.182043,0.92773,0.175467,0.550261,0.156169,0.119519,0.506773,0.562235,0.110102,0.216418,0.884549,0.40709,0.757238,0.335785,0.167767,0.737681,0.846148,0.593882,0.996203,0.514574,0.393623,0.685032,0.102362,0.632714,0.780582,0.713604,0.100926,0.0568152,0.00489632,0.802649,0.047702,0.186939,0.730379,0.223169,0.737201,0.886548,0.342688,0.243974,0.448783,0.45279,0.460392,0.333332,0.85988,0.21763,0.669117,0.0276465,0.955311,0.515265,0.621528,0.951514,0.0298389,0.0151516,0.636546,0.132201,0.647866,0.417128,0.845804,0.748792,0.473943,0.850701,0.551441,0.521645,0.0376399,0.28182,0.744814,0.77484,0.168368,0.0875017,0.018814,0.617151,0.540292,0.479206,0.950482,0.400172,0.696836,0.619599,0.427818,0.652147,0.134864,0.0493465,0.603661,0.164703,0.0644981,0.240207,0.296903,0.712364,0.657335,0.142708,0.461156,0.131278,0.993409,0.0125965,0.652923,0.0310486,0.294416,0.397736,0.805889,0.462784,0.485238,0.824703,0.0799349,0.0255299,0.303909,0.0304172,0.425702,0.000745183,0.650016,0.85352,0.652893,0.78488,0.902867,0.256554,0.949583,0.967365,0.496761,0.246487,0.679729,0.154096,0.389194,0.140884,0.285374,0.382603,0.153481,0.938296,0.413652,0.447897,0.336033,0.219541,0.910681,0.821271,0.044244,0.990616,0.846801,0.348153,0.0210333,0.272502,0.348898,0.671049,0.126022,0.00179091,0.45593,0.0288891,0.258345,0.405513,0.996254,0.755106,0.651999,0.675982,0.909202,0.0411937,0.816867,0.194576,0.423797,0.970347,0.132872,0.837449,0.418244,0.468905,0.0569896,0.328926,0.290175,0.101234,0.319542,0.136976,0.449387,0.340575,0.409478,0.798285,0.0116247,0.535501,0.800076,0.467554,0.56439,0.0584208,0.873067,0.560644,0.813527,0.525066,0.236626,0.722728,0.56626,0.0534926,0.917304,0.990057,0.0238398,0.0501764,0.827506,0.442084,0.519081,0.884495,0.77101,0.809257,0.985729,0.0905515,0.946233,0.435116,0.431127,0.355711,0.233401,0.442751,0.891212,0.0334769,0.910306,0.455602,0.0918976,0.783373,0.0162451,0.905424,0.308439,0.252871,0.628153,0.874699,0.306364,0.545457,0.864756,0.330203,0.595633,0.692262,0.772287,0.114715,0.576757,0.543297,0.923971,0.562486,0.633849,0.870204,0.997602,0.0649753,0.225915,0.231003,0.507727,0.117127,0.264479,0.418032,0.572728,0.356377,0.201405,0.588974,0.261801,0.509844,0.841845,0.889954,0.384543,0.148208,0.435411,0.249299,0.478412,0.0310445,0.941561,0.250699,0.145759,0.518318,0.793996,0.0697304,0.0808038,0.427845,0.939934,0.0784056,0.49282,0.16585,0.309408,0.00054701,0.282976,0.573888,0.418579,0.855705,0.930265,0.619984,0.444678,0.192066,0.129828,0.286523,0.0820204,0.514371,0.434731,0.517431,0.76367,0.913143,0.548476,0.705231,0.163842,0.694235,0.223549,0.957839,0.763965,0.304352,0.385684,0.7039,0.382758,0.878504,0.869749,0.692166,0.879051,0.152726,0.266054,0.297631,0.00843078,0.196319,0.917615,0.453109,0.388385,0.0474429,0.739632,0.470405,0.561814,0.174364,0.987837,0.325484,0.0875069,0.536313,0.0307148,0.251349,0.230548,0.254263,0.209188,0.994513,0.558616,0.594872,0.698413,0.941374,0.473376,0.568162,0.63354,0.352428,0.720888,0.899594,0.650058,0.729319,0.0959122,0.567673,0.182428,0.484297,0.615116,0.922061,0.954702,0.17693,0.0964244,0.942539,0.502414,0.183931,0.478851,0.533129,0.435281,0.709399,0.787392,0.644469,0.703912,0.346008,0.239341,0.402325,0.287382,0.712717,0.970488,0.920921,0.0651449,0.691376,0.820515,0.715203,0.420695,0.916427,0.282876,0.603124,0.400724,0.897992,0.525184,0.355426,0.0749218,0.621609,0.297965,0.577336,0.80554,0.776817,0.110464,0.240821,0.486216,0.897856,0.885289,0.190128,0.243864,0.12463,0.592453,0.531246,0.837347,0.562941,0.452167,0.902492,0.254317,0.272682,0.617695,0.675013,0.189109,0.900571,0.278136,0.589834,0.798563,0.803321,0.94526,0.873485,0.424929,0.243225,0.450821,0.230469,0.0200419,0.561285,0.47129,0.506258,0.459141,0.356579,0.696386,0.703005,0.481209,0.288839,0.234251,0.318556,0.85178,0.686418,0.221049,0.106098,0.9591,0.838744,0.78111,0.14821,0.739315,0.0592466,0.738043,0.537879,0.862567,0.683303,0.411364,0.287497,0.926529,0.862184,0.517966,0.94657,0.423469,0.989256,0.452828,0.88261,0.345835,0.149214,0.585616,0.827044,0.438053,0.819867,0.1456,0.289833,0.506285,0.366649,0.395931,0.465385,0.205393,0.177041,0.613595,0.944708,0.236288,0.351638,0.482586,0.0988551,0.0349412,0.89395,0.386352,0.96147,0.756134,0.904317,0.90804,0.179604,0.893573,0.360868,0.062214,0.239408,0.510082,0.64783,0.0664513,0.948135,0.467697,0.212052,0.237968,0.973981,0.5787,0.633899,0.439366,0.784093,0.81094,0.0529611,0.728801,0.0472278,0.404599,0.211387,0.146083,0.43954,0.105337,0.532434,0.40101,0.861471,0.436752,0.30905,0.041075,0.330325,0.669918,0.103289,0.569732,0.18,0.751119,0.636183,0.128135,0.218815,0.848235,0.366103,0.192797,0.426935,1.73319e-06,0.632163,0.211029,0.810942,0.685124,0.939829,0.858169,0.0897231,0.151217,0.00425233,0.529263,0.256554,0.536687,0.930273,0.118025,0.973439,0.239323,0.1591,0.303763,0.909242,0.262389,0.873495,0.0892419,0.0135082,0.509679,0.217377,0.232323,0.357914,0.58348,0.42512,0.784849,0.583482,0.0572832,0.995878,0.394423,0.742407,0.935707,0.252593,0.83213,0.0869239,0.256845,0.361394,0.343478,0.793532,0.291667,0.461503,0.76697,0.53099,0.620604,0.0707335,0.440232,0.882993,0.944229,0.529474,0.896501,0.453908,0.746851,0.128825,0.811821,0.330331,0.553945,0.596671,0.913812,0.611228,0.592548,0.308235,0.353636,0.528256,0.560828,0.185766,0.615179,0.817673,0.54716,0.958657,0.611205,0.838827,0.420161,0.378175,0.369817,0.0407643,0.448909,0.81005,0.923757,0.393138,0.339524,0.820259,0.847045,0.0863744,0.949084,0.658867,0.416705,0.503029,0.255537,0.330517,0.114257,0.848086,0.638753,0.467893,0.376341,0.199581,0.653659,0.99152,0.0172541,0.200819,0.950178,0.628459,0.0396456,0.370338,0.00663447,0.409463,0.411103,0.455543,0.219513,0.33486,0.848681,0.559036,0.155119,0.695726,0.645411,0.104203,0.354593,0.0621157,0.607232,0.61013,0.392633,0.721489,0.458216,0.0313857,0.189382,0.834557,0.230967,0.84304,0.826077,0.248221,0.0438592,0.776255,0.87668,0.0835048,0.146593,0.883314,0.492968,0.557696,0.338858,0.71248,0.892556,0.187539,0.271517,0.0476748,0.883265,0.916927,0.151878,0.237858,0.979043,0.759109,0.847988,0.371676,0.480598,0.306203,0.403062,0.66998,0.14076,0.634028,0.51302,0.966837,0.882249,0.55688,0.743092,0.758929,0.640384,0.889685,0.642243,0.133352,0.447381,0.9811,0.845833,0.339936,0.168639,0.117349,0.387611,0.0519037,0.0342764,0.539489,0.289761,0.0133193,0.298598,0.137749,0.384995,0.779196,0.443952,0.788057,0.449176,0.584712,0.422085,0.962197,0.551549,0.304334,0.519076,0.294641,0.0632622,0.159461,0.184326,0.705505,0.292813,0.631707,0.686605,0.138646,0.971643,0.855244,0.255995,0.359254,0.907148,0.290271,0.898743,0.196909,0.303591,0.197341,0.334658,0.688586,0.976537,0.778611,0.476642,0.425714,0.363323,0.898727,0.387911,0.914872,0.203061,0.906987,0.209513,0.266323,0.0664477,0.393839,0.971828,0.359261,0.0255455,0.658434,0.497906,0.997188,0.513678,0.753901,0.356442,0.420826,0.0441724,0.255185,0.617735,0.347763,0.452526,0.952394,0.0363486,0.429064,0.731004,0.512991,0.854778,0.0943272,0.411718,0.242688,0.00919931,0.614779,0.149675,0.218712,0.881102,0.216123,0.612551,0.85293,0.575383,0.638097,0.511364,0.0732898,0.635285,0.0250418,0.827191,0.991728,0.445868,0.871363,0.246913,0.063603,0.219126,0.699439,0.0159967,0.255475,0.128503,0.747001,0.768466,0.983281,0.841328,0.180184,0.225969,0.850528,0.794963,0.375644,0.06924,0.676066,0.591766,0.681791,0.528996,0.16715,0.319888,0.0403599,0.24044,0.955174,0.0654017,0.0676308,0.946901,0.511269,0.938994,0.193815,0.574872,0.158121,0.893254,0.590869,0.413596,0.021757,0.33787,0.182062,0.00503754,0.179199,0.362246,0.231006,0.0297262,0.157209,0.60665,0.0989662,0.833275,0.198416,0.780758,0.362271,0.365566,0.100646,0.40263,0.606006,0.0558196,0.468032,0.673637,0.00272103,0.979302,0.612631,0.196536,0.554174,0.770752,0.0897895,0.145043,0.184347,0.111546,0.482913,0.366409,0.116584,0.662112,0.728655,0.34759,0.691838,0.885864,0.95424,0.790804,0.719139,0.152656,0.571562,0.0814091,0.518223,0.672208,0.48404,0.124229,0.728027,0.952072,0.797866,0.730748,0.931373,0.410497,0.927284,0.485547,0.181249,0.0170734,0.63059,0.365597,0.12862,0.113504,0.732006,0.245204,0.775615,0.46066,0.592794,0.467453,0.346524,0.547034,0.258258,0.0656627,0.699691,0.829819,0.147072,0.217913,0.502027,0.631112,0.342142,0.230055,0.583183,0.140008,0.960803,0.514557,0.550505,0.888087,0.000103868,0.731754,0.90516,0.630694,0.0973511,0.0337802,0.744198,0.829357,0.278984,0.519813,0.290017,0.871778,0.987266,0.636541,0.418812,0.245524,0.702204,0.118503,0.0753434,0.849276,0.336416,0.577371,0.480387,0.678558,0.807425,0.0635706,0.818566,0.768228,0.578127,0.369072,0.656315,0.578231,0.100826,0.561475,0.208925,0.198177,0.595255,0.953123,0.0275339,0.87424,0.472936,0.317551,0.746018,0.460203,0.954092,0.16483,0.705727,0.656296,0.283333,0.78107,0.505572,0.619749,0.358441,0.985959,0.298307,0.165866,0.0495297,0.116873,0.934094,0.627657,0.485945,0.590409,0.205888,0.586771,0.151884,0.414813,0.784948,0.747139,0.367936,0.812482,0.621379,0.840872,0.130033,0.367397,0.301075,0.0841253,0.532227,0.00680176,0.740421,0.81556,0.787872,0.245993,0.435308,0.146312,0.231952,0.733616,0.312178,0.281482,0.850489,0.246272,0.909139,0.336434,0.836681,0.115027,0.923205,0.988565,0.52984,0.708154,0.735704,0.897777,0.520636,0.357083,0.738649,0.650669,0.724479,0.0397243,0.734794,0.256706,0.046526,0.475216,0.0722656,0.834398,0.721209,0.507574,0.98071,0.953161,0.24119,0.292889,0.234643,0.0916784,0.53916,0.143782,0.428112,0.375841,0.258809,0.351318,0.364406,0.788649,0.0594714,0.10011,0.686426,0.580107,0.457192,0.425075,0.230777,0.181672,0.4648,0.965571,0.438378,0.511326,0.440787,0.510643,0.345724,0.161996,0.0182173,0.326434,0.115157,0.259407,0.619322,0.3498,0.351085,0.158483,0.493582,0.779198,0.534324,0.752391,0.130515,0.89873,0.541041,0.189987,0.998839,0.227467,0.770094,0.456032,0.652542,0.000870849,0.637703,0.117342,0.966442,0.0760811,0.628668,0.407229,0.586724,0.974391,0.569225,0.604942,0.300825,0.684382,0.864349,0.920147,0.034182,0.215434,0.0786301,0.527764,0.994632,0.612954,0.280156,0.125147,0.511684,0.821196,0.315134,0.510523,0.0486634,0.085228,0.966555,0.701206,0.0860989,0.604258,0.818548,0.0525409,0.680339,0.447215,0.45977,0.267064,0.421606,0.0289947,0.872006,0.722431,0.713377,0.736354,0.642578,0.747558,0.951788,0.721208,0.275323,0.94642,0.334162,0.555478,0.0715667,0.845846,0.376675,0.3867,0.356369,0.425338,0.471929,0.322924,0.126544,0.558027,0.927183,0.945091,0.610568,0.607522,0.392306,0.0703382,0.874586,0.813912,0.0993328,0.746591,0.536343,0.812709,0.482945,0.178922,0.560268,0.434733,0.90013,0.83559,0.381153,0.234292,0.391069,0.45272,0.0801383,0.767743,0.83942,0.436508,0.193082,0.311349,0.759432,0.319625,0.869376,0.686615,0.264717,0.479944,0.294137,0.657023,0.550283,0.168722,0.470935,0.649615,0.915314,0.00727885,0.462325,0.398259,0.1862,0.0225925,0.832992,0.0863302,0.858183,0.214145,0.320622,0.249252,0.666865,0.400761,0.0169951,0.506285,0.837268,0.210077,0.817634,0.5967,0.529702,0.68701,0.283315,0.794419,0.166955,0.577451,0.451441,0.717237,0.746174,0.922377,0.366852,0.661487,0.929656,0.829177,0.0597464,0.115856,0.85177,0.892739,0.202186,0.709953,0.106884,0.522809,0.959204,0.773749,0.923569,0.976199,0.280035,0.760837,0.186276,0.0976688,0.357538,0.715978,0.784679,0.640852,0.510396,0.951633,0.218304,0.961838,0.668871,0.964478,0.884215,0.035723,0.625965,0.813871,0.8649,0.685711,0.929727,0.71667,0.57845,0.131913,0.426622,0.685335,0.654722,0.385827,0.459084,0.578291,0.362026,0.739119,0.339128,0.548302,0.836788,0.696666,0.26428,0.621467,0.337518,0.774676,0.5731,0.555822,0.736514,0.241971,0.520299,0.620729,0.277694,0.146264,0.434599,0.142594,0.831976,0.364326,0.859264,0.410426,0.496239,0.285886,0.0957604,0.150961,0.671713,0.554844,0.729251,0.0337385,0.293963,0.0683794,0.58204,0.130751,0.765045,0.84632,0.752217,0.102563,0.620997,0.325318,0.658385,0.357511,0.567288,0.178684,0.97824,0.844982,0.324948,0.412839,0.987576,0.156924,0.777165,0.846839,0.56735,0.273404,0.132725,0.66311,0.424365,0.804437,0.217954,0.153617,0.838176,0.511918,0.221996,0.420216,0.642668,0.987041,0.266537,0.394886,0.0896038,0.887533,0.720203,0.747988,0.245044,0.287492,0.926672,0.223283,0.132473,0.251621,0.636123,0.120049,0.408544,0.413288,0.966888,0.975894,0.686692,0.0996129,0.639004,0.111057,0.90405,0.856959,0.264674,0.742226,0.368876,0.48667,0.162443,0.0115448,0.473711,0.428979,0.406431,0.563314,0.316512,0.126634,0.311303,0.561556,0.414126,0.237975,0.78484,0.546599,0.489596,0.420962,0.666648,0.89814,0.83425,0.633536,0.874034,0.520942,0.733149,0.513039,0.631999,0.637199,0.369997,0.896673,0.379425,0.738874,0.383343,0.541868,0.750418,0.857053,0.970847,0.156849,0.420367,0.28736,0.283483,0.73167,0.848916,0.697609,0.969645,0.633756,0.244208,0.459241,0.0547181,0.910856,0.357381,0.888968,0.544392,0.231416,0.40991,0.27754,0.744454,0.0419095,0.914739,0.114451,0.938582,0.294165,0.853325,0.321925,0.836033,0.603743,0.178978,0.80688,0.760592,0.599346,0.0942399,0.0440755,0.331016,0.943156,0.741684,0.300661,0.576912,0.985892,0.759902,0.63163,0.896748,0.117283,0.520598,0.44114,0.348698,0.930508,0.71868,0.0931525,0.972418,0.633419,0.207604,0.911,0.927584,0.0609288,0.232925,0.763617,0.664672,0.411904,0.570497,0.425265,0.0112491,0.664737,0.46934,0.342265,0.607893,0.211024,0.642926,0.184805,0.196917,0.402827,0.816435,0.0936649,0.52011,0.337033,0.534805,0.868808,0.267541,0.253485,0.961961,0.239959,0.886904,0.169565,0.15096,0.814488,0.230494,0.383885,0.578106,0.895166,0.795788,0.148603,0.32043,0.807037,0.81334,0.78977,0.149302,0.421233,0.000794805,0.792228,0.606038,0.197712,0.195055,0.422473,0.291376,0.715165,0.759506,0.826181,0.583973,0.0270477,0.0796655,0.545934,0.267007,0.96657,0.715499,0.417966,0.781058,0.945993,0.801851,0.359163,0.841158,0.59764,0.507766,0.161588,0.404677,0.321107,0.951359,0.553979,0.74234,0.952154,0.346207,0.348378,0.149865,0.541262,0.770852,0.441242,0.256427,0.530358,0.267423,0.8404,0.557406,0.347088,0.386334,0.824413,0.313658,0.101833,0.242379,0.0947156,0.0478258,0.0442306,0.453879,0.888984,0.64187,0.961645,0.0505725,0.0465475,0.282752,0.00193128,0.600527,0.025092,0.954085,0.946734,0.37347,0.10395,0.487996,0.144322,0.545192,0.744423,0.67468,0.812614,0.584823,0.232086,0.159702,0.971157,0.0564991,0.47336,0.0729905,0.298878,0.568076,0.120816,0.343109,0.0219548,0.00980036,0.984979,0.9836,0.0603729,0.0315268,0.266352,0.0623041,0.632054,0.291444,0.016389,0.578788,0.664915,0.120339,0.0667836,0.809237,0.665531,0.811206,0.483917,0.478145,0.396029,0.716003,0.637847,0.367186,0.772502,0.111207,0.440177,0.0713809,0.679283,0.560993,0.41449,0.701238,0.570794,0.399469,0.684838,0.631166,0.430996,0.95119,0.693471,0.0630496,0.242634,0.70986,0.641837,0.907549,0.830199,0.708621,0.716786,0.495729,0.519827,0.200703,0.973874,0.915856,0.916706,0.611722,0.283043,0.689209,0.722929,0.723219,0.76059,0.402212,0.284213,0.175079,0.10345,0.855006,0.574548,0.788288,0.486173,0.00554431,0.739479,0.179643,0.0685939,0.982113,0.889503,0.710431,0.889662,0.719701,0.419052,0.606448,0.215431,0.938879,0.807151,0.189305,0.854735,0.723857,0.801027,0.137778,0.413066,0.523956,0.860997,0.173656,0.926168,0.14521,0.348735,0.0296186,0.000216171,0.923283,0.817907,0.486389,0.928828,0.557386,0.666032,0.997422,0.539499,0.555534,0.707853,0.429161,0.275235,0.126905,0.0356095,0.490666,0.065784,0.842761,0.679971,0.920519,0.566618,0.480997,0.0582974,0.979684,0.00495332,0.919295,0.153339,0.931122,0.0645049,0.502074,0.96074,0.064721,0.425358,0.778647,0.55111,0.354186,0.336033,0.217141,0.351607,0.875532,0.772676,0.0594602,0.304694,0.0479112,0.186365,0.340303,0.538577,0.252149,0.183064,0.218548,0.172668,0.749682,0.699546,0.230966,0.729365,0.704499,0.150261,0.882705,0.63562,0.214766,0.384779,0.596361,0.279487,0.810137,0.375008,0.830596,0.164323,0.711041,0.0477378,0.51593,0.586573,0.820414,0.57539,0.891267,0.868325,0.761755,0.23157,0.406902,0.0139043,0.414634,0.62545,0.186573,0.164316,0.324996,0.417539,0.893681,0.0294945,0.567799,0.776386,0.665115,0.782565,0.161165,0.261476,0.0620515,0.971302,0.636483,0.892648,0.135625,0.347525,0.940386,0.651555,0.934098,0.760799,0.226945,0.825365,0.629124,0.9887,0.0569357,0.036026,0.00260471,0.47157,0.661476,0.189177,0.635886,0.986472,0.606716,0.529567,0.0159663,0.174515,0.305953,0.681081,0.95708,0.467119,0.942557,0.0191317,0.438421,0.57904,0.91178,0.574046,0.926565,0.852165,0.225601,0.860663,0.612964,0.452546,0.686028,0.242088,0.441247,0.742964,0.278114,0.443851,0.214534,0.939591,0.633029,0.85042,0.926062,0.239745,0.379987,0.942029,0.41426,0.68594,0.62311,0.37134,0.153059,0.565666,0.390472,0.59148,0.144707,0.302252,0.165526,0.0712716,0.154417,0.391127,0.931934,0.767381,0.843673,0.617963,0.0094694,0.28492,0.360926,0.287584,0.728771,0.57546,0.227174,0.3618,0.42588,0.153237,0.601545,0.805867,0.095265,0.0158047,0.491807,0.718375,0.387145,0.644866,0.284041,0.777617,0.236346,0.428748,0.0798686,0.401872,0.50002,0.234285,0.792999,0.431954,0.00166633,0.636672,0.0499165,0.0111357,0.921592,0.410843,0.298719,0.650362,0.986303,0.525894,0.0121622,0.412183,0.67913,0.613707,0.218049,0.774395,0.629511,0.709856,0.49277,0.0166564,0.354723,0.776811,0.794273,0.591069,0.205559,0.874142,0.992941,0.705579,0.108427,0.785939,0.137533,0.110094,0.422611,0.187449,0.121229,0.344203,0.598292,0.419949,0.994565,0.584595,0.945843,0.00672748,0.996777,0.624973,0.620434,0.214827,0.399368,0.249946,0.924683,0.892138,0.266602,0.279405,0.668949,0.0608754,0.870474,0.874508,0.935017,0.863415,0.580087,0.0434446,0.649354,0.71762,0.153538,0.0719655,0.905069,0.274768,0.416168,0.503361,0.694716,0.410734,0.0879555,0.640559,0.417461,0.0847328,0.265532,0.0378953,0.299559,0.6649,0.287841,0.224242,0.557038,0.554443,0.503648,0.225987,0.615318,0.374122,0.100495,0.550336,0.237537,0.680582,0.59378,0.886891,0.398202,0.747318,0.958856,0.303271,0.022086,0.375025,0.806632,0.716802,0.785758,0.894587,0.357361,0.203219,0.97932,0.622893,0.241115,0.278879,0.287793,0.528956,0.503122,0.844831,0.0833986,0.00676938,0.0708177,0.698717,0.380891,0.171313,0.249052,0.618428,0.851895,0.842833,0.505319,0.250097,0.590151,0.464175,0.553368,0.612237,0.839199,0.36,0.32904,0.624958,0.254587,0.686401,0.828177,0.233907,0.309294,0.0692919,0.512786,0.597086,0.598248,0.0159076,0.441917,0.681646,0.0226769,0.512735,0.380363,0.403568,0.684048,0.629415,0.021996,0.535943,0.472248,0.527315,0.786041,0.0623994,0.991489,0.339409,0.674637,0.830689,0.699409,0.00367611,0.455647,0.953995,0.690077,0.283824,0.187902,0.999371,0.353116,0.700688,0.596457,0.951363,0.716596,0.0383741,0.633009,0.739273,0.551109,0.0133725,0.142841,0.235157,0.642788,0.164837,0.7711,0.115036,0.692151,0.55714,0.177436,0.683641,0.896549,0.852072,0.51433,0.595958,0.855748,0.969976,0.549954,0.545825,0.2538,0.737856,0.545196,0.606916,0.438544,0.141653,0.558279,0.155139,0.180027,0.191289,0.894412,0.731135,0.204661,0.0372529,0.966292,0.847449,0.20209,0.737392,0.962485,0.894241,0.294532,0.139921,0.577882,0.191082,0.991993,0.0922116,0.78704,0.847741,0.062188,0.336993,0.393566,0.315988,0.074849,0.938762,0.922904,0.513393,0.0804147,0.481184,0.668532,0.260441,0.672473,0.562944,0.991577,0.877134,0.600197,0.957869,0.724584,0.802287,0.695261,0.687069,0.696528,0.989793,0.82699,0.27441,0.180875,0.818983,0.366622,0.967915,0.666724,0.42881,0.304908,0.0602908,0.744798,0.379757,0.999053,0.667702,0.89315,0.0794675,0.148886,0.561682,0.339909,0.821359,0.124627,0.331486,0.698493,0.724824,0.289355,0.423077,0.527111,0.984616,0.110146,0.223639,0.974409,0.937136,0.498049,0.155284,0.756119,0.864671,0.123198,0.422844,0.29348,0.428106,0.483134,0.0382785,0.807863,0.482187,0.705981,0.701013,0.561655,0.854867,0.262696,0.901564,0.676226,0.387322,0.233049,0.37472,0.112146,0.522404,0.797797,0.639257,0.50702,0.907943,0.862896,0.481429,0.845079,0.360945,0.636712,0.601198,0.225616,0.759911,0.0240418,0.519096,0.188017,0.507176,0.557375,0.995881,0.989363,0.263356,0.696894,0.551018,0.118223,0.95959,0.452582,0.794449,0.346912,0.685631,0.169169,0.459058,0.208036,0.966966,0.098315,0.715056,0.874909,0.961211,0.196484,0.719988,0.322156,0.833197,0.321186,0.547773,0.593108,0.345228,0.066869,0.781125,0.852404,0.624244,0.777005,0.841767,0.8876,0.473899,0.392785,0.00582292,0.433489,0.845367,0.800272,0.780401,0.530998,0.969442,0.239459,0.739034,0.936407,0.337774,0.45409,0.811316,0.298985,0.650574,0.531304,0.621141,0.483771,0.85249,0.168914,0.0768783,0.197717,0.235783,0.858003,0.0501211,0.860027,0.635008,0.891888,0.747627,0.108908,0.284674,0.75345,0.542397,0.130041,0.553722,0.322798,0.66104,0.523164,0.562257,0.400074,0.459571,0.90003,0.854163,0.270887,0.199015,0.504737,0.802191,0.820157,0.988508,0.65468,0.989071,0.0653864,0.852398,0.224854,0.923389,0.902519,0.0848808,0.558398,0.794407,0.832508,0.667306,0.0790812,0.585957,0.209703,0.209122,0.13968,0.5325,0.870162,0.662843,0.0947568,0.270236,0.122414,0.994787,0.124399,0.393301,0.193802,0.629136,0.195492,0.0139593,0.617644,0.850173,0.00303008,0.683031,0.70257,0.227884,0.60642,0.605089,0.312765,0.164818,0.399497,0.145272,0.832124,0.478578,0.73123,0.0418263,0.6877,0.870909,0.574327,0.557862,0.533752,0.669083,0.828098,0.656167,0.66387,0.952497,0.0494679,0.857673,0.581634,0.24496,0.871632,0.199278,0.0951329,0.874662,0.882309,0.797703,0.102546,0.488729,0.402793,0.415311,0.653547,0.802289,0.560583,0.485671,0.280867,0.291813,0.527497,0.968567,0.162722,0.101824,0.52643,0.696475,0.770907,0.354528,0.352641,0.434778,0.307025,0.402109,0.29245,0.888659,0.647069,0.164083,0.0879368,0.742202,0.038745,0.970246,0.539905,0.141291,0.458975,0.942698,0.556602,0.112522,0.744987,0.117185,0.598193,0.0258544,0.408998,0.125691,0.994422,0.57172,0.227514,0.520852,0.268195,0.998421,0.87538,0.620836,0.433199,0.182405,0.0229451,0.72565,0.0710633,0.670014,0.889732,0.159,0.412216,0.928477,0.129246,0.952121,0.0697685,0.588221,0.894819,0.626371,0.700743,0.639807,0.743556,0.298936,0.665661,0.152554,0.424627,0.660083,0.724275,0.652141,0.180934,0.99247,0.650563,0.0563138,0.613306,0.083762,0.238718,0.636251,0.809412,0.309782,0.306265,0.699144,0.468782,0.718481,0.627621,0.598028,0.670603,0.69739,0.186249,0.565422,0.32376,0.886992,0.205228,0.0673161,0.185928,0.870889,0.21987,0.610555,0.530972,0.944145,0.262697,0.711906,0.936615,0.91326,0.76822,0.549921,0.997022,0.00693856,0.186172,0.806433,0.31672,0.492437,0.505577,0.785502,0.210918,0.133198,0.38353,0.881521,0.830588,0.569779,0.446943,0.154348,0.45677,0.652171,0.221664,0.642699,0.52306,0.441534,0.253254,0.0540325,0.385679,0.51595,0.765939,0.322294,0.42921,0.534159,0.872214,0.426231,0.541097,0.0583861,0.232665,0.857818,0.550823,0.738241,0.64332,0.761741,0.871439,0.0268499,0.643262,0.702027,0.596628,0.0902045,0.856375,0.0533987,0.742376,0.0780389,0.696097,0.265436,0.519573,0.949351,0.319469,0.905252,0.465301,0.0854073,0.227546,0.894511,0.619566,0.0997605,0.320743,0.160664,0.158147,0.553407,0.0184814,0.708969,0.291649,0.661801,0.47071,0.163088,0.688651,0.113972,0.865115,0.28528,0.204177,0.72149,0.338678,0.946552,0.799529,0.0347756,0.211988,0.319102,0.984127,0.531457,0.224355,0.449428,0.616864,0.451901,0.343939,0.236431,0.551661,0.664682,0.397094,0.709808,0.218089,0.415576,0.418777,0.509738,0.077377,0.889488,0.672826,0.766028,0.00345968,0.537941,0.0513077,0.207636,0.259432,0.389986,0.154189,0.058961,0.424762,0.366177,0.378063,0.408888,0.897634,0.602418,0.858316,0.514499,0.0543184,0.202255,0.750929,0.60598,0.866937,0.148023,0.315787,0.0850263,0.563599,0.734564,0.594764,0.640976,0.624052,0.26759,0.407004,0.627511,0.805532,0.458312,0.835148,0.0649636,0.848298,0.989336,0.123925,0.273059,0.355514,0.501988,0.681948,0.253148,0.104406,0.540264,0.767646,0.158724,0.742519,0.518575,0.764704,0.609456,0.666599,0.0804907,0.694482,0.230198,0.815055,0.289247,0.871174,0.439107,0.556837,0.278178,0.0666181,0.362369,0.73649,0.901766,0.427333,0.584787,0.891102,0.551257,0.857847,0.246616,0.0532449,0.539795,0.499764,0.157651,0.0800582,0.26741,0.316375,0.822577,0.785985,0.0810781,0.432033,0.452584,0.161569,0.126515,0.682782,0.976624,0.415762,0.553955,0.415731,0.972599,0.832133,0.482349,0.334968,0.568623,0.384115,0.7623,0.15341,0.275217,0.313557,0.0112571,0.521833,0.366802,0.551052,0.0215966,0.524453,0.63111,0.289007,0.840828,0.453687,0.0749921,0.921906,0.88572,0.527576,0.0834745,0.0122354,0.210358,0.0600983,0.427997,0.764314,0.475829,0.400596,0.596447,0.958178,0.735564,0.16507,0.342292,0.497865,0.31848,0.617509,0.811422,0.329737,0.139342,0.178224,0.880788,0.160939,0.702677,0.511898,0.449945,0.543505,0.965585,0.524937,0.465411,0.851305,0.0525136,0.548885,0.86354,0.262872,0.608983,0.291538,0.0271853,0.0848122,0.692134,0.623632,0.0429897,0.427698,0.788702,0.385282,0.925563,0.107181,0.00279085,0.736985,0.436918,0.142133,0.915209,0.317707,0.303071,0.617887,0.829605,0.753017,0.161392,0.79519,0.277954,0.626802,0.646495,0.330467,0.175687,0.510036,0.593339,0.784671,0.801574,0.620525,0.869483,0.493708,0.244157,0.912472,0.921406,0.0328584,0.297754,0.846969,0.14004,0.300545,0.583954,0.576958,0.442678,0.499163,0.894665,0.745749,0.11705,0.72427,0.498766,0.278441,0.51946,0.77672,0.905244,0.165955,0.107187,0.0809307,0.67599,0.700526,0.865601,0.477564,0.321051,0.735084,0.971272,0.565208,0.647556,0.892678,0.598066,0.945311,0.739647,0.738106,0.245856,0.323601,0.315064,0.688534,0.822764,0.209729,0.434283,0.939814,0.933998,0.933049,0.218255,0.453458,0.709769,0.123499,0.619413,0.816956,0.204429,0.295403,0.517482,0.0700305,0.772967,0.838533,0.805115,0.744239,0.403741,0.452671,0.636917,0.00180729,0.397982,0.376563,0.739913,0.643838,0.700164,0.0549773,0.332372,0.522927,0.264706,0.766655,0.462741,0.198704,0.699704,0.680996,0.652162,0.409473,0.804495,0.271575,0.226429,0.00892386,0.566978,0.743911,0.0789544,0.339945,0.582445,0.884069,0.0841844,0.986186,0.33674,0.721101,0.987993,0.734722,0.0976641,0.727907,0.37856,0.797828,0.782884,0.710932,0.320755,0.0475899,0.477587,0.783496,0.246294,0.177291,0.464492,0.898457,0.586764,0.268987,0.170032,0.813193,0.27791,0.73701,0.557104,0.356865,0.0769556,0.139549,0.240934,0.16114,0.125735,0.577674,0.882241,0.113728,0.312396,0.979905,0.841635,0.690955,0.777733,0.624518,0.401887,0.0984879,0.672108,0.879474,0.881984,0.918403,0.0567646,0.346476,0.816859,0.643528,0.615463,0.986891,0.456721,0.893373,0.723901,0.0138249,0.250238,0.800857,0.153374,0.491172,0.961997,0.279108,0.0688455,0.844238,0.392836,0.381241,0.824143,0.234471,0.0721966,0.601876,0.85899,0.474084,0.700364,0.531098,0.353557,0.582348,0.449501,0.410322,0.928824,0.26636,0.0538503,0.544286,0.253251,0.510571,0.437659,0.977153,0.524396,0.687897,0.77801,0.67777,0.179069,0.740007,0.956878,0.247914,0.584245,0.349715,0.629155,0.408388,0.584186,0.701352,0.0102638,0.443175,0.175436,0.710627,0.974273,0.528993,0.292975,0.423774,0.939315,0.221799,0.690134,0.993165,0.766085,0.943386,0.503737,0.203744,0.920538,0.0281327,0.891641,0.698548,0.705902,0.07071,0.438555,0.662781,0.318624,0.0228005,0.0124951,0.947779,0.431188,0.596681,0.649131,0.441452,0.0398559,0.824567,0.15208,0.014129,0.35356,0.445055,0.437903,0.292875,0.666854,0.128037,0.28604,0.432939,0.0714224,0.789777,0.636683,0.991961,0.81791,0.528324,0.690509,0.523812,0.599034,0.129065,0.186593,0.917658,0.151865,0.199088,0.865438,0.583054,0.795768,0.514569,0.024506,0.835624,0.339136,0.176586,0.849753,0.692696,0.621641,0.287656,0.985571,0.288494,0.415693,0.271611,0.721433,0.487116,0.0613884,0.358116,0.479076,0.879298,0.88644,0.169586,0.40311,0.485474,0.29865,0.589703,0.403133,0.450515,0.788791,0.26857,0.0335691,0.584559,0.783139,0.0580751,0.420183,0.122275,0.234661,0.269937,0.814971,0.856302,0.557593,0.800542,0.144796,0.973286,0.0721531,0.866229,0.460402,0.133541,0.224345,0.939478,0.0128397,0.110785,0.109064,0.41595,0.59626,0.407714,0.00565288,0.999392,0.85823,0.794444,0.267962,0.891799,0.379003,0.0511014,0.949874,0.799186,0.173376,0.184535,0.0691228,0.988347,0.0408364,0.626716,0.788889,0.185632,0.600002,0.861042,0.0518615,0.0604039,0.994584,0.276207,0.999882,0.00742327,0.386992,0.108946,0.423373,0.983251,0.51666,0.429026,0.982644,0.37489,0.22347,0.250606,0.266689,0.602472,0.301707,0.216563,0.401658,0.475084,0.401098,0.470781,0.463431,0.441934,0.0974969,0.25232,0.627566,0.697499,0.113362,0.679428,0.757903,0.107946,0.955635,0.757785,0.115369,0.342626,0.866731,0.538742,0.325878,0.383391,0.967768,0.308521,0.758281,0.191238,0.559127,0.0249704,0.79371,0.860834,0.241533,0.195368,0.335918,0.642631,0.66615,0.799349,0.084565,0.763646,0.0516692,0.712131,0.461145,0.165031,0.391559,0.219048,0.272977,0.347194,0.976833,0.388346,0.68982,0.843565,0.927088,0.0156982,0.226956,0.894857,0.32422,0.985237,0.0860948,0.883347,0.0102078,0.879805,0.744181,0.251741,0.0751736,0.0800996,0.894372,0.741323,0.879449,0.978937,0.50497,0.931118,0.691068,0.966115,0.0961493,0.0826278,0.185164,0.369126,0.429822,0.161997,0.757473,0.119642,0.00556157,0.684561,0.13534,0.232518,0.579418,0.45956,0.217755,0.665513,0.342907,0.227963,0.545318,0.0870883,0.479704,0.620491,0.167188,0.374076,0.361815,0.0466366,0.353013,0.866784,0.977755,0.0440813,0.832899,0.073904,0.126709,0.018063,0.44303,0.556531,0.18006,0.200503,0.676173,0.185622,0.885064,0.811513,0.418139,0.464482,0.271073,0.635894,0.129995,0.61398,0.863857,0.675313,0.701069,0.343561,0.295804,0.868256,0.717637,0.657619,0.914893,0.0706494,0.524403,0.892648,0.114731,0.357302,0.966552,0.24144,0.375365,0.409582,0.797971,0.555425,0.610085,0.474144,0.741047,0.495149,0.285657,0.159186,0.959631,0.556731,0.79508,0.0896262,0.170711,0.658937,0.764939,0.871779,0.00249775,0.060743,0.740036,0.720134,0.718362,0.654929,0.790784,0.242765,0.547577,0.905514,0.600067,0.514128,0.146954,0.975432,0.92371,0.944925,0.530858,0.533795,0.419069,0.271905,0.0289447,0.704727,0.431091,0.988576,0.261457,0.226171,0.0782024,0.432168,0.885108,0.843141,0.303948,0.887606,0.903884,0.0439836,0.60774,0.622246,0.698912,0.398524,0.86501,0.246489,0.304038,0.465078,0.760617,0.450993,0.44051,0.684327,0.395918,0.971368,0.218123,0.814987,0.243272,0.247067,0.519714,0.674363,0.235644,0.781171,0.900534,0.313846,0.213339,0.785642,0.156987,0.517287,0.673248,0.0608715,0.561271,0.280988,0.683117,0.260183,0.679512,0.548128,0.506672,0.983551,0.0132054,0.267289,0.434543,0.453715,0.951617,0.830461,0.425083,0.169739,0.645449,0.668355,0.416807,0.165162,0.342719,0.65245,0.946334,0.243253,0.966296,0.159673,0.0288952,0.123283,0.67696,0.702143,0.184155,0.238231,0.983132,0.867272,0.498414,0.662644,0.4154,0.00508614,0.646195,0.428606,0.272375,0.0807378,0.882321,0.223992,0.911199,0.307404,0.393731,0.556648,0.975759,0.810538,0.72181,0.318478,0.462988,0.668144,0.561731,0.429285,0.827817,0.590626,0.552568,0.504777,0.292769,0.736723,0.743007,0.275901,0.603995,0.241421,0.938545,0.0193954,0.246508,0.58474,0.448001,0.518883,0.665477,0.330322,0.742875,0.576677,0.637726,0.136606,0.133324,0.613485,0.947144,0.855134,0.931963,0.410133,0.523278,0.493693,0.839417,0.351094,0.0843188,0.391985,0.855871,0.377088,0.128708,0.598878,0.652989,0.732703,0.8403,0.591534,0.752099,0.0868075,0.176274,0.2001,0.60569,0.841751,0.530422,0.348566,0.418428,0.168147,0.485172,0.551752,0.781632,0.432316,0.406886,0.713595,0.842449,0.930164,0.207288,0.681866,0.281258,0.291607,0.073851,0.137129,0.668695,0.202559,0.736007,0.321684,0.935262,0.576307,0.913217,0.687361,0.663114,0.089491,0.88746,0.268805,0.931242,0.417882,0.61737,0.349669,0.586029,0.102542,0.901421,0.367662,0.534858,0.308307,0.0812568,0.377307,0.238471,0.288545,0.059173,0.519729,0.580152,0.133024,0.656857,0.248846,0.335583,0.392864,0.57053,0.270845,0.969171,0.483747,0.958206,0.632286,0.573238,0.845666,0.901091,0.50448,0.263548,0.518461,0.85415,0.849578,0.621003,0.755571,0.217239,0.155862,0.0638782,0.298496,0.533169,0.302349,0.587041,0.592342,0.822078,0.167193,0.725366,0.478935,0.416039,0.0609487,0.871799,0.986569,0.331794,0.840971,0.470317,0.29,0.473256,0.0435554,0.135666,0.374347,0.548036,0.399214,0.892808,0.402186,0.248792,0.513811,0.157757,0.466031,0.669673,0.221635,0.764527,0.202842,0.523984,0.351569,0.795184,0.346062,0.518762,0.520549,0.824996,0.934801,0.581498,0.696796,0.921371,0.913292,0.537766,0.391687,0.203291,0.0110226,0.435243,0.338957,0.38537,0.983279,0.738171,0.278178,0.385464,0.986963,0.791989,0.543221,0.452994,0.461662,0.764856,0.217522,0.664504,0.28884,0.56909,0.459687,0.634901,0.0878519,0.980237,0.459898,0.022653,0.561735,0.156693,0.944023,0.475027,0.69446,0.335711,0.678318,0.705482,0.770954,0.0172753,0.090852,0.754232,0.755446,0.36903,0.139696,0.742409,0.161018,0.682917,0.195403,0.62268,0.447773,0.412925,0.287184,0.736613,0.982015,0.746871,0.371515,0.0698671,0.727108,0.831412,0.09252,0.288842,0.988106,0.0365435,0.763869,0.682566,0.372254,0.442187,0.388048,0.143208,0.459462,0.4789,0.89744,0.214909,0.84793,0.0371364,0.957318,0.00894806,0.720054,0.152722,0.631628,0.167827,0.565647,0.918812,0.90444,0.547662,0.665684,0.275955,0.617529,0.392791,0.107367,0.710049,0.681634,0.0954727,0.746592,0.445503,0.778038,0.118847,0.88769,0.166086,0.262054,0.347152,0.644986,0.159495,0.562061,0.492916,0.196631,0.519379,0.501864,0.916685,0.672101,0.133492,0.0845114,0.237747,0.0523047,0.988951,0.785409,0.717988,0.264906,0.402938,0.11078,0.372273,0.112986,0.792413,0.467746,0.859579,0.237916,0.245784,0.978425,0.125606,0.41187,0.24048,0.472758,0.0568564,0.399974,0.0348193,0.549772,0.596605,0.554198,0.0516362,0.51329,0.226299,0.185129,0.597801,0.464046,0.237433,0.586753,0.249455,0.955421,0.851658,0.652393,0.0662011,0.223931,0.765379,0.858615,0.691677,0.624958,0.0965308,0.937461,0.603383,0.222137,0.349331,0.843863,0.694895,0.406187,0.243838,0.729715,0.955959,0.840443,0.283913,0.00759571,0.353733,0.510212,0.192724,0.951534,0.974258,0.430157,0.538286,0.223713,0.385579,0.389945,0.876106,0.45178,0.613876,0.641486,0.310394,0.305553,0.266444,0.406925,0.243014,0.869827,0.629062,0.592344,0.71369,0.323957,0.998532,0.957528,0.0536719,0.954491,0.797971,0.337585,0.962087,0.151704,0.847797,0.154811,0.103238,0.822055,0.584969,0.641524,0.0457682,0.970547,0.0314689,0.921874,0.422327,0.645345,0.56336,0.732722,0.950898,0.829804,0.139647,0.193912,0.699631,0.768709,0.786256,0.413321,0.0926668,0.784788,0.370849,0.146339,0.739279,0.16882,0.483923,0.701366,0.320524,0.33172,0.856177,0.423762,0.153775,0.441145,0.0652859,0.199543,0.411693,0.0967548,0.121418,0.83402,0.7421,0.684778,0.566742,0.692998,0.514581,0.706389,0.886909,0.214212,0.475099,0.673165,0.627533,0.567766,0.457953,0.998382,0.714104,0.197232,0.167202,0.198028,0.898598,0.487726,0.529748,0.754775,0.911488,0.683523,0.19592,0.976774,0.883066,0.607613,0.0735288,0.00448395,0.441633,0.815629,0.689261,0.00837563,0.508626,0.203843,0.714765,0.395536,0.418054,0.189864,0.0687013,0.045587,0.75763,0.526654,0.0439689,0.471734,0.723886,0.211171,0.669762,0.622484,0.698898,0.19951,0.377259,0.610386,0.883033,0.573179,0.58716,0.766099,0.180792,0.660689,0.770583,0.622426,0.476317,0.459845,0.630802,0.984944,0.663687,0.345567,0.38048,0.0817414,0.535431,0.449181,0.127328,0.29306,0.975835,0.171297,0.764794,0.699722,0.382469,0.434556,0.322206,0.0813662,0.634066,0.699465,0.691752,0.517098,0.272645,0.278912,0.283198,0.453437,0.939601,0.0537806,0.0758632,0.415918,0.513625,0.706665,0.400862,0.177312,0.0522313,0.781341,0.259054,0.587662,0.230522,0.386382,0.880722,0.206357,0.557679,0.645516,0.906079,0.940148,0.080072,0.228285,0.021514,0.714138,0.927751,0.713266,0.231236,0.200396,0.992178,0.514434,0.653833,0.931779,0.568214,0.729696,0.347697,0.0818395,0.436361,0.748558,0.259152,0.488592,0.5299,0.518206,0.076254,0.760422,0.904588,0.956976,0.96678,0.462267,0.602492,0.872859,0.402415,0.682564,0.101144,0.423929,0.396702,0.0288952,0.137195,0.627938,0.229291,0.129373,0.142371,0.883124,0.0611515,0.710586,0.61282,0.408848,0.792425,0.0491806,0.157407,0.0515771,0.537773,0.687306,0.569783,0.614027,0.447728,0.47437,0.571003,0.414508,0.936637,0.173495,0.287367,0.339052,0.856059,0.388511,0.762981,0.25276,0.417406,0.900176,0.880698,0.646697,0.0295484,0.0230694,0.529821,0.0906999,0.733655,0.142641,0.499548,0.52608,0.191821,0.656955,0.577657,0.729594,0.344261,0.14744,0.343621,0.791989,0.62181,0.914623,0.206497,0.558447,0.0881181,0.493864,0.897499,0.944177,0.882375,0.66048,0.196937,0.299781,0.560656,0.0776351,0.946478,0.590204,0.100705,0.476299,0.680904,0.83436,0.61894,0.180452,0.36044,0.810761,0.837407,0.938097,0.540355,0.181668,0.0855375,0.883976,0.973657,0.707348,0.798599,0.180154,0.265795,0.886717,0.674018,0.163295,0.830894,0.556394,0.823775,0.0278314,0.856175,0.384431,0.105467,0.802653,0.974635,0.206171,0.278953,0.655539,0.0405306,0.897893,0.835991,0.400971,0.708654,0.673398,0.339068,0.249009,0.855065,0.424605,0.132985,0.828722,0.131953,0.931585,0.00887685,0.397749,0.818302,0.682895,0.561043,0.649196,0.239289,0.384818,0.677028,0.0954639,0.769249,0.782494,0.898117,0.743884,0.988665,0.17707,0.399423,0.0291961,0.0749629,0.235414,0.430167,0.783617,0.908812,0.769235,0.0326261,0.763877,0.19384,0.165611,0.5926,0.325793,0.097196,0.601477,0.723542,0.915498,0.284372,0.284585,0.564695,0.523661,0.669403,0.241722,0.619125,0.438652,0.0242168,0.517242,0.182536,0.0128822,0.694312,0.581959,0.0420783,0.769275,0.817373,0.472245,0.552892,0.726185,0.24148,0.585518,0.490062,0.43532,0.751129,0.0826619,0.761113,0.848325,0.684138,0.484655,0.763824,0.96851,0.76924,0.328518,0.492171,0.438643,0.570241,0.111296,0.877295,0.594457,0.628538,0.0598314,0.60734,0.32285,0.641791,0.649418,0.092125,0.459164,0.121663,0.645017,0.185348,0.363142,0.230535,0.675411,0.798462,0.981664,0.758072,0.559575,0.82999,0.442211,0.0442297,0.593813,0.410721,0.813469,0.922332,0.902892,0.252113,0.492572,0.0141883,0.129408,0.0870295,0.642726,0.189239,0.694369,0.965576,0.83103,0.343787,0.0577012,0.290193,0.46545,0.702718,0.475542,0.828592,0.933253,0.150952,0.627055,0.914918,0.909025,0.18663,0.744907,0.351236,0.230859,0.338721,0.761957,0.0443287,0.261052,0.66485,0.296441,0.753625,0.679038,0.425849,0.840654,0.321764,0.615088,0.535023,0.28734,0.446118,0.87881,0.345041,0.736311,0.34426,0.0477595,0.211853,0.172853,0.981013,0.362806,0.799907,0.89593,0.27183,0.986537,0.640838,0.623066,0.217396,0.979559,0.385024,0.261725,0.240611,0.0498732,0.558166,0.994236,0.728911,0.984015,0.83489,0.0506753,0.599103,0.369913,0.338016,0.045221,0.248723,0.683057,0.781532,0.592984,0.730816,0.993385,0.765836,0.711829,0.356191,0.565743,0.60776,0.628021,0.55228,0.248597,0.251088,0.769676,0.228156,0.636111,0.0314009,0.468767,0.685985,0.589567,0.463003,0.414896,0.573582,0.297893,0.465571,0.172685,0.667806,0.803587,0.217906,0.91653,0.486644,0.999438,0.509513,0.21746,0.992824,0.27535,0.929289,0.349015,0.841093,0.537049,0.977036,0.393373,0.785646,0.228124,0.163049,0.0138023,0.864235,0.19445,0.48257,0.55022,0.784017,0.945573,0.965115,0.357599,0.243466,0.430686,0.530284,0.911272,0.234273,0.748189,0.827802,0.720917,0.747627,0.337315,0.938377,0.740451,0.612665,0.867666,0.0894655,0.453758,0.404715,0.0665015,0.847131,0.190361,0.294625,0.0101804,0.204164,0.15886,0.204631,0.686733,0.70908,0.988648,0.632306,0.674195,0.346247,0.875772,0.104882,0.87653,0.787044,0.339155,0.624719,0.614846,0.0600716,0.372347,0.952161,0.998448,0.112798,0.564826,0.866115,0.202263,0.0185833,0.27083,0.268765,0.865714,0.461191,0.56339,0.875895,0.665355,0.72225,0.0805253,0.352088,0.43133,0.0691732,0.984394,0.105525,0.41542,0.860165,0.210407,0.29195,0.647209,0.549562,0.916669,0.262055,0.609634,0.289016,0.214216,0.608082,0.401814,0.779041,0.474197,0.604077,0.797624,0.745027,0.872841,0.663339,0.206218,0.436231,0.539233,0.871572,0.158482,0.619758,0.22366,0.589812,0.688932,0.208053,0.695337,0.104351,0.0682187,0.905744,0.396301,0.715428,0.455307,0.312971,0.977482,0.0649402,0.601987,0.191698,0.673022,0.00380057,0.970739,0.147219,0.607877,0.768364,0.892245,0.480719,0.431702,0.0984631,0.91695,0.970936,0.970035,0.0754316,0.590694,0.193695,0.665243,0.279626,0.401749,0.36058,0.383977,0.469967,0.266325,0.780279,0.185395,0.721631,0.0932493,0.162877,0.786572,0.695236,0.354576,0.459594,0.699037,0.325315,0.606813,0.306914,0.0936786,0.499058,0.787633,0.525381,0.597521,0.704583,0.496316,0.567557,0.780015,0.0870104,0.761252,0.445258,0.366636,0.163,0.805838,0.750613,0.632967,0.0721632,0.530892,0.818362,0.793795,0.624141,0.98124,0.580366,0.319377,0.335815,0.03996,0.0184139,0.66113,0.646773,0.325328,0.754809,0.145831,0.112961,0.28019,0.743352,0.817544,0.776506,0.310909,0.597559,0.863517,0.0721607,0.0428165,0.230153,0.235161,0.848655,0.980766,0.868128,0.920818,0.511658,0.686491,0.714613,0.135798,0.66773,0.294979,0.455176,0.00354571,0.334939,0.47359,0.664676,0.981712,0.798918,0.419485,0.127543,0.911879,0.699675,0.870895,0.729423,0.476181,0.181804,0.326981,0.339697,0.253965,0.369798,0.56985,0.489126,0.218453,0.550616,0.357254,0.139271,0.0622733,0.0437446,0.853883,0.198072,0.711475,0.148862,0.653247,0.715021,0.483801,0.126837,0.379697,0.465513,0.925755,0.799181,0.593056,0.837633,0.498856,0.463951,0.567056,0.975037,0.645755,0.894037,0.314734,0.89972,0.263835,0.884584,0.388846,0.482287,0.4352,0.7461,0.621558,0.497473,0.789845,0.475441,0.695545,0.50132,0.624303,0.348792,0.21634,0.108104,0.475629,0.596037,0.573617,0.401384,0.395218,0.166673,0.239017,0.894074,0.630624,0.806073,0.869111,0.276379,0.70011,0.183845,0.176099,0.963944,0.0684285,0.564945,0.446231,0.503628,0.311045,0.0677889,0.00110148,0.10089,0.54323,0.696646,0.602209,0.167533,0.0454389,0.818549,0.275637,0.521068,0.414586,0.849254,0.922452,0.809804,0.0159266,0.16147,0.703878,0.64655,0.967543,0.572989,0.922929,0.667652,0.756833,0.0990278,0.631597,0.825262,0.663973,0.077828,0.32889,0.975018,0.145617,0.329992,0.0759073,0.688847,0.026638,0.678116,0.85638,0.0720768,0.496666,0.132017,0.593145,0.911252,0.981271,0.515598,0.721056,0.997197,0.677067,0.424934,0.643748,0.64461,0.997922,0.566676,0.312262,0.754755,0.665704,0.943859,0.580017,0.329677,0.0216871,0.908907,0.304695,0.167304,0.238899,0.380602,0.856151,0.265537,0.0587183,0.71253,0.337613,0.555384,0.844547,0.930759,0.466636,0.825818,0.446356,0.187691,0.823016,0.123424,0.612625,0.466763,0.768033,0.610547,0.0334395,0.0802958,0.365302,0.699144,0.0241549,0.945319,0.0288206,0.045842,0.854226,0.333515,0.213146,0.0931246,0.714117,0.0692968,0.358661,0.772835,0.781827,0.696274,0.328219,0.626375,0.627033,0.794855,0.452193,0.0733893,0.982546,0.275208,0.196813,0.595171,0.741971,0.964846,0.205717,0.775411,0.0451421,0.571019,0.474554,0.0692969,0.516338,0.503375,0.115139,0.370564,0.83689,0.328285,0.463689,0.551007,0.397582,0.82235,0.323842,0.179409,0.518624,0.652062,0.805784,0.145657,0.446917,0.257976,0.219047,0.429462,0.533184,0.41586,0.024633,0.275155,0.380706,0.23035,0.0505659,0.425848,0.80137,0.52512,0.495145,0.317708,0.0284952,0.610284,0.688272,0.865385,0.938569,0.151961,0.416392,0.33615,0.974311,0.740235,0.515559,0.492935,0.392297,0.321343,0.638593,0.839213,0.579319,0.85764,0.268676,0.112503,0.273499,0.293309,0.387658,0.654205,0.523659,0.438224,0.0800528,0.325029,0.963345,0.575198,0.642737,0.99184,0.185481,0.331009,0.857225,0.12405,0.48297,0.273617,0.4602,0.457281,0.0138523,0.975759,0.950216,0.406149,0.297102,0.588809,0.245362,0.876421,0.446449,0.514038,0.988924,0.719948,0.807347,0.376583,0.374153,0.331006,0.814807,0.454205,0.656035,0.778152,0.029403,0.298771,0.769991,0.214884,0.62978,0.627216,0.338934,0.11275,0.900834,0.799134,0.57003,0.914686,0.774894,0.520247,0.320835,0.0719961,0.109056,0.566197,0.948417,0.555504,0.0802354,0.937341,0.275452,0.887582,0.313924,0.649605,0.218588,0.128731,0.10381,0.874623,0.906882,0.133213,0.173394,0.676874,0.348098,0.803174,0.30409,0.687032,0.915923,0.204924,0.486166,0.485954,0.11961,0.26106,0.00620027,0.440445,0.333056,0.115256,0.00664238,0.281473,0.67076,0.0868778,0.218815,0.946213,0.97446,0.532739,0.595818,0.193048,0.66147,0.699628,0.0676706,0.568352,0.832841,0.241064,0.245226,0.180939,0.0442379,0.549316,0.867971,0.960161,0.75424,0.354137,0.446115,0.87385,0.615197,0.452315,0.314295,0.948253,0.567571,0.320937,0.229727,0.238331,0.407815,0.448542,0.184544,0.382275,0.98128,0.780362,0.575323,0.64275,0.47999,0.642994,0.211103,0.312831,0.884058,0.456329,0.49377,0.928296,0.00564476,0.361741,0.888457,0.759885,0.715878,0.334572,0.633735,0.331075,0.786887,0.948029,0.279329,0.354458,0.268967,0.509056,0.592789,0.676782,0.957597,0.777333,0.0590564,0.938878,0.557695,0.634379,0.581628,0.0376849,0.277373,0.792731,0.350516,0.161431,0.249059,0.844286,0.0897267,0.254704,0.206027,0.978184,0.0145887,0.921906,0.312755,0.648323,0.252981,0.0996421,0.596353,0.53231,0.4541,0.865319,0.0413655,0.0468891,0.542101,0.998963,0.824222,0.601157,0.937841,0.381918,0.235536,0.519469,0.419602,0.512909,0.312199,0.770118,0.67434,0.561258,0.614405,0.764067,0.815962,0.820432,0.74225,0.830551,0.742337,0.0550058,0.478874,0.995318,0.154648,0.075227,0.527628,0.608748,0.940546,0.568994,0.655637,0.482647,0.567957,0.479859,0.0838037,0.505797,0.861777,0.31934,0.0252656,0.281379,0.832249,0.337465,0.0514976,0.50659,0.898723,0.665902,0.270656,0.714686,0.486334,0.0129068,0.545237,0.228672,0.0679126,0.0241115,0.22399,0.222561,0.0993385,0.751618,0.831308,0.0398846,0.320612,0.486945,0.522531,0.888569,0.966804,0.606335,0.394366,0.828581,0.925675,0.419631,0.10996,0.757924,0.757096,0.161458,0.264514,0.65582,0.82736,0.53517,0.370505,0.313694,0.548077,0.915742,0.542366,0.61599,0.939854,0.766356,0.83855,0.0391924,0.517974,0.669859,0.079077,0.838586,0.156804,0.601608,0.727155,0.123608,0.207943,0.12152,0.952189,0.133618,0.541152,0.0621487,0.891543,0.298248,0.223606,0.156057,0.954068,0.0509664,0.691227,0.324573,0.364661,0.239304,0.240315,0.907026,0.855294,0.180169,0.673382,0.693844,0.219362,0.191356,0.363703,0.298439,0.0299415,0.520506,0.900047,0.757096,0.644114,0.10799,0.878616,0.596303,0.241608,0.419768,0.658451,0.133151,0.718017,0.882058,0.289207,0.672084,0.933024,0.980434,0.996657,0.297685,0.219738,0.236973,0.204711,0.075032,0.417142,0.878093,0.768876,0.636504,0.0694482,0.132579,0.934943,0.0993897,0.653085,0.834989,0.856486,0.297199,0.94298,0.735102,0.893502,0.184588,0.154871,0.551953,0.317738,0.872887,0.434011,0.606946,0.544971,0.367035,0.58738,0.541629,0.66472,0.807118,0.778601,0.869431,0.88215,0.195744,0.747523,0.651026,0.832247,0.816972,0.783605,0.76719,0.916361,0.43669,0.602179,0.772847,0.733889,0.545159,0.507949,0.627391,0.729747,0.66282,0.179344,0.047485,0.535707,0.613355,0.654431,0.0806783,0.980391,0.24181,0.622307,0.64511,0.0489284,0.400908,0.514541,0.931078,0.596652,0.262065,0.582104,0.428899,0.0790366,0.365709,0.196089,0.995398,0.8024,0.798269,0.768245,0.536289,0.343428,0.276194,0.16368,0.0731742,0.939014,0.343025,0.120659,0.474721,0.95638,0.77509,0.555399,0.936771,0.0169003,0.177706,0.581881,0.0658287,0.578615,0.0964226,0.996907,0.175267,0.358487,0.579011,0.604166,0.437524,0.944721,0.800255,0.432922,0.74712,0.598524,0.201167,0.283409,0.941951,0.477361,0.44709,0.0151254,0.416376,0.790115,0.135785,0.891097,0.746495,0.910874,0.446496,0.683265,0.927775,0.624203,0.265147,0.993604,0.202817,0.361569,0.990511,0.378084,0.720057,0.569522,0.98225,0.157581,0.514243,0.782505,0.590503,0.261363,0.381028,0.79167,0.544773,0.32298,0.269031,0.991862,0.338105,0.685407,0.781977,0.47389,0.576503,0.528472,0.384764,0.0229996,0.211737,0.312539,0.647202,0.476884,0.306142,0.85002,0.838453,0.296653,0.228103,0.558509,0.866175,0.210353,0.71609,0.380418,0.992857,0.306593,0.641781,0.373886,0.0982623,0.186553,0.696865,0.367293,0.178416,0.0349701,0.0526998,0.960393,0.50886,0.629203,0.488864,0.893624,0.652203,0.700602,0.206163,0.299405,0.177485,0.512305,0.149424,0.0159381,0.808958,0.377528,0.574448,0.675133,0.587881,0.290538,0.0555507,0.580738,0.59713,0.697332,0.954624,0.695393,0.883885,0.651489,0.0626859,0.0623009,0.686459,0.115386,0.0226937,0.195319,0.744589,0.511558,0.0889426,0.396792,0.21216,0.295105,0.696197,0.389645,0.80741,0.845621,0.405583,0.616368,0.223149,0.980031,0.291501,0.811029,0.270568,0.347052,0.391768,0.867699,0.0443836,0.346391,0.563091,0.928269,0.99788,0.625777,0.990569,0.684339,0.741163,0.0132632,0.879658,0.485752,0.524821,0.968601,0.882543,0.736981,0.263706,0.57874,0.126626,0.0711161,0.424361,0.532209,0.687484,0.64751,0.51224,0.978986,0.458539,0.782808,0.326038,0.850307,0.650507,0.370421,0.196698,0.213598,0.29869,0.194578,0.839375,0.289259,0.878918,0.580538,0.302523,0.758576,0.0662901,0.827344,0.727176,0.948834,0.564325,0.990882,0.527574,0.690951,0.0619982,0.951935,0.22316,0.749483,0.599445,0.7354,0.728468,0.0579841,0.518208,0.054506,0.908291,0.168715,0.424927,0.104989,0.382313,0.723617,0.299568,0.221688,0.0128767,0.178486,0.802226,0.315399,0.937061,0.868516,0.142743,0.664238,0.81735,0.707069,0.65512,0.344923,0.39802,0.717118,0.296858,0.62118,0.466601,0.896303,0.35658,0.195069,0.954287,0.874788,0.249575,0.862578,0.043503,0.674502,0.967567,0.425816,0.398119,0.267135,0.647504,0.410996,0.445621,0.44973,0.726395,0.382682,0.318246,0.869139,0.0469199,0.135596,0.576207,0.70204,0.480519,0.974227,0.419158,0.777377,0.595408,0.885758,0.67368,0.951988,0.0808269,0.627967,0.826776,0.330402,0.490545,0.870279,0.00490388,0.458112,0.296095,0.403023,0.725248,0.943598,0.814019,0.170868,0.393328,0.540415,0.553551,0.711574,0.409554,0.600471,0.84717,0.985761,0.30251,0.32769,0.959988,0.721668,0.105067,0.555396,0.607426,0.778747,0.507384,0.688253,0.406714,0.33416,0.0186545,0.897259,0.204439,0.0235584,0.355372,0.500534,0.426582,0.0806191,0.444132,0.240601,0.251488,0.83746,0.781016,0.805038,0.549034,0.19057,0.405509,0.396205,0.176331,0.708019,0.723894,0.136319,0.429687,0.828961,0.691715,0.0371126,0.607708,0.199099,0.725365,0.0144225,0.533259,0.74402,0.911682,0.737698,0.767578,0.267053,0.238232,0.19416,0.347672,0.682364,0.434761,0.59916,0.519824,0.215777,0.404198,0.068858,0.406347,0.809707,0.465063,0.582678,0.517726,0.188957,0.718997,0.947412,0.0179178,0.410712,0.984525,0.625626,0.609811,0.70989,0.640049,0.14307,0.45391,0.55173,0.880768,0.221489,0.818783,0.119,0.415649,0.166456,0.801363,0.85041,0.765616,0.321187,0.0661874,0.169814,0.390045,0.472534,0.979521,0.855107,0.0552124,0.497246,0.044064,0.77421,0.444659,0.0619817,0.184922,0.429184,0.687608,0.794733,0.139074,0.327656,0.937804,0.592984,0.879387,0.818572,0.814473,0.69817,0.937572,0.230122,0.864626,0.738935,0.080532,0.630241,0.0601217,0.146719,0.800055,0.450166,0.619254,0.779576,0.305274,0.674466,0.276822,0.349338,0.448676,0.721481,0.41132,0.633598,0.150664,0.0989273,0.428331,0.289738,0.426584,0.366135,0.882723,0.30597,0.184707,0.697196,0.00414004,0.122278,0.927318,0.868766,0.861213,0.00784996,0.499007,0.921335,0.154569,0.299062,0.371502,0.773823,0.078638,0.676775,0.448289,0.35546,0.0261132,0.896965,0.0769406,0.437433,0.530563,0.227605,0.53636,0.958894,0.517343,0.962943,0.325029,0.400066,0.268914,0.509736,0.0972621,0.273054,0.632014,0.02458,0.141819,0.493227,0.03243,0.640827,0.414562,0.186999,0.939889,0.786064,0.960822,0.0185267,0.462839,0.409112,0.373987,0.488953,0.306076,0.450927,0.926385,0.836639,0.678532,0.462745,0.795533,0.195875,0.425689,0.120562,0.595941,0.694602,0.630297,0.693203,0.967656,0.262311,0.717783,0.109475,0.755539,0.750213,0.750302,0.170101,0.937213,0.690191,0.956165,0.898035,0.708717,0.419005,0.307147,0.0827041,0.907957,0.613223,0.533631,0.834343,0.449862,0.212163,0.297088,0.245395,0.408039,0.722777,0.365957,0.00398006,0.417379,0.996254,0.697183,0.385035,0.258566,0.414967,0.49451,0.0141045,0.16518,0.244812,0.184206,0.102393,0.935003,0.140371,0.000428173,0.64372,0.559376,0.307575,0.726424,0.467333,0.920798,0.260055,0.301676,0.37066,0.472219,0.598764,0.616056,0.880257,0.32154,0.982013,0.884238,0.738919,0.978267,0.581421,0.123954,0.236832,0.996388,0.618464,0.250937,0.161568,0.863276,0.435143,0.263961,0.798279,0.575514,0.26439,0.441999,0.13489,0.571965,0.168424,0.602224,0.492763,0.428479,0.9039,0.863423,0.900698,0.502664,0.479478,0.780955,0.824204,0.461491,0.665193,0.563123,0.439758,0.246614,0.687077,0.67659,0.243002,0.305542,0.927527,0.40457,0.168818,0.36267,0.668532,0.967097,0.938184,0.932922,0.409097,0.0730748,0.504886,0.57752,0.675299,0.997649,0.00599928,0.579198,0.861072,0.906697,0.081862,0.34055,0.687653,0.906066,0.802041,0.352845,0.469189,0.241799,0.599459,0.156267,0.918389,0.842461,0.461809,0.845917,0.247032,0.630627,0.208587,0.915564,0.597724,0.146771,0.848485,0.00682097,0.219846,0.353372,0.584341,0.895144,0.35102,0.59034,0.474343,0.212092,0.497038,0.556205,0.552642,0.18469,0.462271,0.354683,0.537536,0.93146,0.596482,0.136995,0.0877272,0.514871,0.979457,0.549536,0.360787,0.226488,0.180163,0.569374,0.142052,0.777887,0.716145,0.990538,0.784708,0.935991,0.343909,0.369049,0.831136,0.69493,0.95939,0.305479,0.907022,0.456427,0.861683,0.459663,0.641117,0.323954,0.814346,0.178653,0.255415,0.410828,0.315648,0.343142,0.925699,0.295105,0.892678,0.286486,0.521593,0.0728402,0.855861,0.663645,0.850727,0.572006,0.654183,0.635435,0.507997,0.998093,0.00448416,0.339133,0.693022,0.963874,0.644612,0.600044,0.420301,0.506295,0.0597071,0.0614185,0.830249,0.874053,0.240071,0.0856638,0.284881,0.555719,0.428806,0.21058,0.850824,0.321483,0.497067,0.372417,0.394323,0.352927,0.0360624,0.24505,0.924933,0.690246,0.880485,0.432931,0.688338,0.88497,0.772064,0.38136,0.848843,0.416675,0.981404,0.269144,0.92297,0.0411112,0.330563,0.75322,0.915165,0.570634,0.838884,0.200046,0.126354,0.267689,0.410626,0.977178,0.589172,0.907693,0.349595,0.983495,0.26062,0.385657,0.228546,0.185554,0.0759029,0.109031,0.618484,0.764241,0.994001,0.390548,0.145601,0.842844,0.807224,0.127005,0.111988,0.730194,0.168117,0.442551,0.483414,0.0832811,0.0131856,0.322297,0.283327,0.139539,0.589986,0.693953,0.116717,0.179158,0.601646,0.466312,0.162653,0.862267,0.85197,0.391199,0.0478205,0.927873,0.50023,0.666305,0.692113,0.494231,0.0568531,0.837715,0.337075,0.864077,0.96472,0.449063,0.594271,0.132837,0.891614,0.0776843,0.216118,0.9048,0.399981,0.499445,0.0443392,0.989968,0.193398,0.161057,0.169126,0.795044,0.627369,0.331779,0.657311,0.479339,0.722978,0.705131,0.407211,0.223209,0.371436,0.0993246,0.717439,0.428289,0.937039,0.0545139,0.292366,0.90176,0.503577,0.886637,0.0345962,0.395191,0.964321,0.250714,0.299991,0.364302,0.750159,0.34433,0.35427,0.943557,0.505387,0.523396,0.738601,0.132756,0.855175,0.395912,0.612094,0.578153,0.101044,0.0193052,0.801362,0.47248,0.11863,0.518801,0.900769,0.0556692,0.573315,0.193135,0.957429,0.0768919,0.0797719,0.992025,0.472083,0.0440927,0.242739,0.772074,0.408395,0.992898,0.116404,0.762665,0.936455,0.62179,0.286061,0.675056,0.754546,0.141236,0.0709682,0.36664,0.719389,0.172012,0.385945,0.520751,0.644492,0.504575,0.0395521,0.545261,0.560244,0.612867,0.738396,0.517673,0.689759,0.818168,0.509698,0.161842,0.862261,0.752437,0.933916,0.270656,0.745335,0.0503196,0.033321,0.68179,0.67211,0.319382,0.356846,0.426656,0.460617,0.427814,0.793296,0.180006,0.599826,0.179242,0.700757,0.244317,0.683817,0.740309,0.789578,0.244061,0.353176,0.527975,0.761734,0.0429353,0.346143,0.271433,0.204777,0.208404,0.0238696,0.138693,0.47906,0.769204,0.189013,0.512381,0.450994,0.861123,0.831763,0.80784,0.287779,0.29238,0.235653,0.081075,0.472386,0.835479,0.260317,0.173143,0.0797963,0.944133,0.913452,0.869375,0.188195,0.266628,0.397349,0.949929,0.309564,0.743493,0.221362,0.514341,0.951897,0.245231,0.653034,0.430957,0.0144357,0.842047,0.943338,0.46543,0.703169,0.775101,0.27327,0.990948,0.0674811,0.508923,0.0720232,0.539867,0.344402,0.33234,0.71301,0.424198,0.276473,0.626462,0.293573,0.464668,0.893091,0.690922,0.414597,0.202654,0.434415,0.635959,0.716995,0.386312,0.88119,0.370029,0.817269,0.895626,0.212076,0.760607,0.361056,0.915246,0.535708,0.634325,0.906194,0.60319,0.143248,0.978217,0.143057,0.48765,0.310557,0.856067,0.911848,0.58703,0.482529,0.205421,0.0516979,0.37562,0.896344,0.466295,0.578274,0.330759,0.102254,0.29527,0.717071,0.983444,0.665299,0.53434,0.87907,0.877375,0.294947,0.240125,0.792621,0.830656,0.87445,0.698815,0.433845,0.0176985,0.677032,0.576902,0.505349,0.987589,0.432969,0.417197,0.574619,0.915498,0.622619,0.626317,0.291118,0.518962,0.0926115,0.869392,0.849721,0.194865,0.164662,0.566792,0.178309,0.829961,0.101132,0.0573785,0.707336,0.396079,0.297504,0.499957,0.226735,0.171954,0.198772,0.66058,0.189652,0.875804,0.237482,0.695001,0.863392,0.670451,0.112198,0.438011,0.585949,0.734817,0.0643275,0.877066,0.253779,0.156939,0.746458,0.1035,0.351804,0.91112,0.670293,0.530113,0.741081,0.771424,0.587492,0.448417,0.167503,0.884995,0.948373,0.394238,0.0569493,0.147145,0.0548179,0.246602,0.0229486,0.2923,0.941603,0.886341,0.96275,0.0538009,0.324352,0.548699,0.788617,0.388679,0.425765,0.0423966,0.545618,0.172223,0.145897,0.897423,0.0833434,0.81619,0.427536,0.824424,0.587614,0.0150275,0.272841,0.755117,0.900023,0.221214,0.149356,0.956972,0.368359,0.204173,0.203574,0.391308,0.496473,0.145177,0.277649,0.459223,0.198978,0.602,0.00792192,0.987595,0.99068,0.433687,0.0299916,0.536298,0.60591,0.175889,0.43372,0.689254,0.992078,0.861256,0.513678,0.579692,0.876284,0.786519,0.33481,0.776307,0.00773251,0.484165,0.733279,0.376092,0.688339,0.936853,0.767399,0.184812,0.0820294,0.0450478,0.644035,0.281007,0.647048,0.651957,0.268602,0.637728,0.0856437,0.298594,0.174026,0.691554,0.474482,0.607746,0.380808,0.46656,0.469002,0.894486,0.0462525,0.345286,0.681004,0.381062,0.121593,0.688737,0.865227,0.854871,0.0648284,0.553566,0.791724,0.832228,0.738377,0.873753,0.877275,0.382412,0.15476,0.524323,0.0343687,0.423362,0.162051,0.120012,0.721956,0.336077,0.811566,0.196438,0.943823,0.192374,0.662999,0.412825,0.0868603,0.709251,0.758111,0.767865,0.0903134,0.879704,0.456601,0.955541,0.734576,0.52143,0.509106,0.5263,0.353658,0.247484,0.400053,0.230933,0.629896,0.554814,0.755256,0.664264,0.978176,0.917308,0.784277,0.700132,0.253385,0.595843,0.896571,0.197208,0.788218,0.559569,0.610033,0.875078,0.26882,0.368144,0.642943,0.359134,0.247849,0.099544,0.314674,0.982424,0.620974,0.823781,0.508724,0.974631,0.0712643,0.908777,0.205564,0.70116,0.463591,0.960821,0.365424,0.441767,0.878128,0.149701,0.141899,0.131513,0.745544,0.0384693,0.32872,0.533762,0.598038,0.938753,0.40884,0.866859,0.306898,0.0517824,0.225993,0.554746,0.151326,0.540667,0.537171,0.7723,0.364448,0.0458944,0.746932,0.435712,0.954671,0.952496,0.136872,0.418262,0.913317,0.502296,0.860029,0.791445,0.651997,0.00192743,0.922957,0.397542,0.0403967,0.251678,0.931304,0.638435,0.190431,0.340143,0.505294,0.497329,0.391926,0.731287,0.052075,0.543252,0.271954,0.589246,0.315553,0.636402,0.63514,0.0624843,0.0721146,0.589811,0.0149802,0.208987,0.00807326,0.928297,0.711283,0.868102,0.719742,0.363281,0.870029,0.642699,0.760822,0.910426,0.894377,0.692126,0.548861,0.0848078,0.0322695,0.0541554,0.582137,0.424195,0.785442,0.634212,0.967448,0.0573967,0.223457,0.283,0.693799,0.858597,0.345485,0.765914,0.448408,0.360465,0.974901,0.456482,0.288762,0.686184,0.324583,0.0085031,0.0494646,0.194613,0.651202,0.810287,0.105039,0.545579,0.502413,0.6539,0.630387,0.534682,0.708055,0.212523,0.958878,0.493498,0.846735,0.926326,0.550894,0.0701919,0.209326,0.244694,0.928789,0.55481,0.0106073,0.377197,0.915275,0.985508,0.833679,0.204037,0.671692,0.158262,0.21254,0.721156,0.352875,0.863742,0.531443,0.457914,0.409321,0.0338562,0.111814,0.0397075,0.568539,0.819869,0.252231,0.527417,0.313367,0.0989655,0.453742,0.864261,0.169157,0.663068,0.108955,0.0979462,0.217878,0.119562,0.475143,0.133153,0.10507,0.308822,0.33719,0.776762,0.467084,0.549729,0.497918,0.819959,0.413471,0.0293615,0.277873,0.822792,0.0632177,0.389687,0.862499,0.631756,0.209556,0.11473,0.159173,0.522922,0.213696,0.612915,0.387184,0.382853,0.275983,0.496139,0.480799,0.493861,0.615701,0.955943,0.627014,0.720771,0.264765,0.964204,0.497533,0.731849,0.513933,0.995451,0.551808,0.927404,0.0248125,0.829681,0.750196,0.0880302,0.219368,0.612695,0.719787,0.428923,0.727425,0.878959,0.951846,0.941121,0.491874,0.339029,0.323974,0.767857,0.835168,0.804773,0.261718,0.450869,0.760716,0.888732,0.17164,0.0254807,0.852936,0.669173,0.75733,0.366868,0.664624,0.309138,0.294272,0.689436,0.138819,0.0444683,0.777466,0.358187,0.657164,0.497253,0.78711,0.384589,0.376212,0.738955,0.32571,0.868087,0.0779848,0.649684,0.635944,0.913153,0.454457,0.897662,0.364021,0.215173,0.786394,0.535661,0.240654,0.63933,0.204834,0.997984,0.00619804,0.869457,0.307122,0.30047,0.558893,0.445941,0.344939,0.33636,0.804127,0.00210238,0.833613,0.591237,0.386691,0.209825,0.330193,0.712401,0.077912,0.408178,0.362085,0.713856,0.32133,0.816543,0.611518,0.685352,0.0317161,0.397912,0.221013,0.27237,0.0372418,0.425846,0.270354,0.0434398,0.295304,0.577476,0.34391,0.854197,0.0234163,0.688849,0.190557,0.827544,0.690951,0.0241693,0.418781,0.0776428,0.233994,0.748974,0.790044,0.311906,0.157152,0.15213,0.0257624,0.478482,0.968673,0.637281,0.163834,0.000388656,0.0351928,0.384847,0.272759,0.0724345,0.810693,0.543113,0.115874,0.105997,0.120588,0.459785,0.960194,0.144005,0.148634,0.15075,0.971548,0.839585,0.174919,0.39033,0.917228,0.408914,0.139304,0.707272,0.72082,0.296455,0.859402,0.746583,0.774938,0.828074,0.383863,0.938772,0.828463,0.419056,0.323618,0.101222,0.491491,0.134312,0.644334,0.607365,0.240308,0.764923,0.0671494,0.200502,0.908927,0.215783,0.351252,0.880476,0.0553683,0.526172,0.270805,0.972596,0.935085,0.410109,0.679868,0.655906,0.706564,0.53927,0.402488,0.481502,0.367345,0.786352,0.420274,0.195808,0.205408,0.743892,0.297029,0.696898,0.878204,0.941364,0.304263,0.118512,0.706287,0.371412,0.319014,0.615214,0.587196,0.670266,0.49569,0.642564,0.196438,0.766495,0.61516,0.131523,0.176604,0.295028,0.787429,0.883169,0.834299,0.189917,0.364671,0.201643,0.976269,0.784944,0.397451,0.181677,0.528837,0.69448,0.878575,0.40704,0.635844,0.182838,0.525553,0.342131,0.55425,0.844567,0.957345,0.141446,0.514833,0.453035,0.784009,0.711271,0.21953,0.399169,0.842794,0.396134,0.694198,0.630223,0.279302,0.528496,0.82014,0.643973,0.73014,0.796409,0.428917,0.12759,0.978086,0.957754,0.822071,0.85666,0.364794,0.457915,0.0394979,0.890347,0.800046,0.593748,0.734914,0.75739,0.735193,0.249746,0.210425,0.519203,0.961017,0.429955,0.918372,0.803811,0.826088,0.61257,0.434034,0.105391,0.141066,0.254174,0.749364,0.871206,0.0505827,0.178281,0.998796,0.0286683,0.136035,0.820867,0.885329,0.50083,0.278781,0.924827,0.391177,0.0788269,0.518574,0.12609,0.836217,0.253768,0.375837,0.0466424,0.772971,0.336853,0.476597,0.691343,0.140664,0.302686,0.303913,0.574698,0.408076,0.444979,0.828871,0.15744,0.316184,0.879454,0.335721,0.31498,0.908122,0.471756,0.135847,0.793451,0.972586,0.414628,0.718277,0.363763,0.493455,0.236852,0.489853,0.329673,0.49062,0.865689,0.376315,0.26359,0.202543,0.852912,0.954933,0.343207,0.155598,0.258845,0.917905,0.563674,0.703824,0.746776,0.721114,0.0200086,0.62623,0.0568356,0.334989,0.534352,0.528592,0.470836,0.327803,0.501178,0.885465,0.0460806,0.86494,0.37892,0.282932,0.354793,0.708593,0.773552,0.220482,0.0849076,0.0371423,0.423025,0.93782,0.992075,0.766232,0.0934175,0.250921,0.684137,0.657092,0.954745,0.430912,0.378206,0.974753,0.0571424,0.435042,0.309743,0.591495,0.963634,0.780579,0.919298,0.464811,0.666043,0.965378,0.329752,0.044963,0.248311,0.684545,0.753556,0.021863,0.905027,0.838463,0.0590053,0.328053,0.776283,0.0510806,0.0942845,0.8697,0.302001,0.778421,0.526792,0.256746,0.209333,0.904998,0.2315,0.266476,0.34004,0.541242,0.85797,0.303673,0.321821,0.777268,0.768485,0.987864,0.742647,0.0982365,0.0328272,0.990958,0.782782,0.786383,0.0128206,0.687809,0.624846,0.0718259,0.0158615,0.401129,0.122907,0.110146,0.270829,0.424908,0.888567,0.797622,0.681654,0.0979004,0.70262,0.913154,0.364376,0.0426592,0.454397,0.222347,0.346332,0.776218,0.999615,0.114817,0.764082,0.742261,0.213053,0.796909,0.733219,0.995835,0.583292,0.74604,0.683644,0.208138,0.817865,0.699506,0.609267,0.940772,0.809652,0.880096,0.36568,0.698219,0.677718,0.0473344,0.796119,0.380337,0.960489,0.160495,0.422997,0.414885,0.382841,0.769329,0.191103,0.382456,0.884146,0.955185,0.124718,0.0971994,0.752094,0.857937,0.0930344,0.335386,0.603976,0.776678,0.543524,0.421842,0.476184,0.152791,0.362614,0.285835,0.0328873,0.728294,0.984054,0.710605,0.775628,0.780173,0.0909427,0.736117,0.940668,0.513939,0.151002,0.323509,0.283268,0.342105,0.705966,0.167414,0.29729,0.830683,0.264614,0.0493843,0.68862,0.357648,0.38477,0.292596,0.134326,0.928294,0.714438,0.61051,0.0810851,0.0770516,0.896346,0.113972,0.805345,0.8804,0.824578,0.580974,0.660572,0.91552,0.317091,0.60124,0.42946,0.468093,0.92475,0.712728,0.810199,0.630715,0.880142,0.107489,0.461398,0.144756,0.156873,0.150018,0.502404,0.541643,0.442614,0.636731,0.469938,0.157052,0.247241,0.551023,0.234104,0.143587,0.664995,0.0394491,0.0239862,0.489573,0.620423,0.684559,0.405093,0.937514,0.285799,0.834552,0.405607,0.210549,0.54728,0.215806,0.841264,0.427423,0.323294,0.302662,0.572179,0.480168,0.45268,0.0745829,0.021811,0.895294,0.711313,0.491749,0.0523465,0.958554,0.0427713,0.28645,0.102141,0.707766,0.325899,0.126127,0.197339,0.946322,0.810686,0.602432,0.883836,0.0964848,0.436984,0.289443,0.307033,0.984265,0.505249,0.148297,0.411687,0.828543,0.450959,0.983866,0.308711,0.903639,0.0584491,0.330522,0.798934,0.769763,0.82227,0.85128,0.728317,0.865042,0.13773,0.830458,0.572808,0.46363,0.956585,0.770147,0.409952,0.767271,0.372579,0.293788,0.863755,0.809563,0.583231,0.170789,0.793827,0.0884791,0.319086,0.205515,0.917022,0.770045,0.189381,0.225733,0.673684,0.24783,0.556255,0.472618,0.0175928,0.378525,0.323898,0.74591,0.243567,0.461629,0.576367,0.816375,0.925259,0.532952,0.586521,0.33521,0.300223,0.9591,0.628998,0.163978,0.768663,0.212228,0.334767,0.56249,0.300707,0.653852,0.768005,0.21773,0.423897,0.957386,0.443463,0.0975812,0.205216,0.999717,0.570199,0.222809,0.378242,0.894098,0.968719,0.621809,0.355727,0.545086,0.438183,0.280985,0.0780382,0.0247047,0.616195,0.378261,0.983805,0.245193,0.542239,0.752467,0.457421,0.877005,0.314958,0.758129,0.530858,0.0829629,0.975859,0.954754,0.0403492,0.419321,0.0523357,0.245566,0.419038,0.622535,0.468375,0.79728,0.516633,0.437094,0.419089,0.872359,0.98218,0.857273,0.153344,0.0602182,0.881977,0.76954,0.438479,0.865782,0.0147328,0.980718,0.618249,0.472154,0.857723,0.933207,0.230283,0.388581,0.0161699,0.206142,0.343335,0.056519,0.625463,0.395671,0.302085,0.0445011,0.0182057,0.77046,0.841781,0.534838,0.207553,0.260871,0.407198,0.189733,0.118143,0.560542,0.249951,0.000120335,0.330082,0.68843,0.865902,0.344815,0.669148,0.484151,0.816969,0.526871,0.417358,0.047252,0.915452,0.433528,0.253394,0.258787,0.490047,0.878856,0.654458,0.792132,0.923357,0.672663,0.562591,0.765139,0.207502,0.770145,0.0260097,0.614699,0.959878,0.144153,0.175241,0.209829,0.144273,0.505323,0.898259,0.0101754,0.850138,0.567408,0.494327,0.667106,0.0942787,0.911685,0.714358,0.00973052,0.345213,0.967752,0.268517,0.83526,0.846608,0.922975,0.627392,0.769966,0.595638,0.189984,0.535105,0.80314,0.960128,0.561115,0.417839,0.920006,0.705268,0.593081,0.129835,0.849541,0.0984039,0.0280946,0.859716,0.948542,0.595502,0.354043,0.615648,0.689781,0.265728,0.330006,0.699511,0.610941,0.297758,0.968029,0.446202,0.144367,0.891004,0.073594,0.914333,0.486642,0.263578,0.449438,0.289782,0.223706,0.0105524,0.707622,0.143712,0.71582,0.300703,0.273547,0.565361,0.399107,0.301642,0.425077,0.347648,0.897144,0.77912,0.963296,0.586925,0.044848,0.293302,0.286436,0.655789,0.591061,0.254465,0.101991,0.735427,0.145469,0.175585,0.64976,0.632111,0.439163,0.0991978,0.921894,0.662869,0.10975,0.629516,0.806581,0.82557,0.930218,0.0801285,0.390931,0.329325,0.381771,0.816008,0.676973,0.278915,0.595128,0.640269,0.86584,0.639976,0.933572,0.152276,0.295765,0.524632,0.406741,0.397756,0.260059,0.55221,0.573341,0.90982,0.184322,0.0125042,0.00901735,0.106216,0.675373,0.118768,0.735731,0.481954,0.944338,0.66595,0.562083,0.335269,0.995275,0.943853,0.151277,0.672248,0.222768,0.746405,0.312517,0.0886075,0.386381,0.246089,0.240884,0.682147,0.770721,0.647625,0.0799033,0.0307805,0.199835,0.653245,0.9406,0.384157,0.665749,0.949617,0.490373,0.341122,0.0683849,0.226104,0.823076,0.0127227,0.892054,0.385159,0.347992,0.887329,0.329012,0.499269,0.559577,0.551779,0.245674,0.872094,0.640387,0.632055,0.118183,0.88127,0.314202,0.888904,0.528895,0.394106,0.919684,0.728731,0.0473504,0.860284,0.112888,0.713099,0.809902,0.603261,0.0542212,0.878287,0.829365,0.877297,0.891009,0.721419,0.262456,0.239001,0.608748,0.591467,0.73827,0.168325,0.143247,0.983944,0.0404186,0.783633,0.615999,0.158602,0.664904,0.930201,0.0475054,0.193799,0.324307,0.96719,0.92253,0.371657,0.827474,0.0354184,0.0847568,0.637376,0.63868,0.138978,0.515663,0.468045,0.0162752,0.406672,0.189464,0.278731,0.645673,0.798212,0.870198,0.383943,0.966536,0.0134447,0.367887,0.0069549,0.797078,0.983886,0.165556,0.461982,0.914087,0.213062,0.655781,0.238394,0.180251,0.578311,0.610052,0.00772563,0.61373,0.694808,0.645102,0.252409,0.833786,0.160764,0.720454,0.850062,0.567436,0.909918,0.128793,0.21311,0.70813,0.998991,0.597053,0.674666,0.0124353,0.964939,0.681621,0.809513,0.948825,0.847177,0.271495,0.862912,0.0602389,0.927276,0.101306,0.24049,0.505588,0.711358,0.248216,0.119317,0.406166,0.893318,0.371727,0.239953,0.0540818,0.0921808,0.0900146,0.621518,0.00209862,0.218807,0.834628,0.710228,0.217798,0.43168,0.384894,0.230233,0.396619,0.0665147,0.0397464,0.345444,0.913692,0.311242,0.208356,0.973931,0.238518,0.309663,0.214421,0.744106,0.0210204,0.462637,0.863423,0.427187,0.355955,0.23515,0.66714,0.410036,0.327331,0.757154,0.0315546,0.329429,0.975961,0.866182,0.0396574,0.193759,0.297863,0.424551,0.423992,0.694482,0.491066,0.463739,0.0399264,0.404758,0.77498,0.248283,0.378688,0.0134981,0.557945,0.59311,0.757604,0.578966,0.0557466,0.621027,0.00615242,0.411701,0.856177,0.673292,0.821738,0.183507,0.430446,0.853292,0.512936,0.406408,0.719475,0.552594,0.600167,0.0173372,0.977145,0.0241591,0.711819,0.468211,0.487898,0.751746,0.872969,0.262878,2.84468e-05,0.251657,0.276376,0.557974,0.844767,0.0339799,0.136939,0.900514,0.655007,0.143092,0.312215,0.511183,0.816384,0.133953,0.69469,0.24683,0.987245,0.207627,0.653238,0.70672,0.760221,0.253405,0.724057,0.737366,0.277564,0.435876,0.205577,0.765462,0.187622,0.0785459,0.0283398,0.18765,0.330203,0.304716,0.745624,0.17497,0.338696,0.882563,0.0754839,0.993703,0.025655,0.387699,0.504886,0.842039,0.521651,0.199576,0.088869,0.508896,0.407203,0.742107,0.215616,0.167424,0.995512,0.939673,0.90479,0.273076,0.375549,0.110367,0.0385374,0.56317,0.188912,0.0668772,0.750821,0.519116,0.371593,0.496445,0.694086,0.710289,0.379008,0.76957,0.703992,0.404663,0.157269,0.208878,0.246702,0.67892,0.408454,0.335571,0.187816,0.815657,0.0776775,0.403432,0.983081,0.0731893,0.343105,0.887871,0.346265,0.718653,0.998237,0.384802,0.281823,0.18715,0.45168,0.0326441,0.706266,0.823273,0.529089,0.400352,0.533562,0.908096,0.169922,0.237554,0.312759,0.327191,0.446431,0.559461,0.00611063,0.854885,0.895031,0.193927,0.670542,0.972709,0.597359,0.653624,0.0458981,0.940464,0.541494,0.392163,0.659117,0.539732,0.776965,0.94094,0.726882,0.228645,0.973584,0.433148,0.0519179,0.502673,0.8335,0.58548,0.410769,0.00342144,0.823033,0.723528,0.330612,0.269465,0.282989,0.336723,0.12435,0.17802,0.53065,0.794892,0.150729,0.128009,0.448516,0.196627,0.0684721,0.99001,0.58879,0.727589,0.529742,0.365756,0.668529,0.256624,0.594401,0.642113,0.689772,0.646319,0.144786,0.523271,0.231799,0.555555,0.526693,0.0548322,0.279084,0.857305,0.324297,0.562073,0.194027,0.448647,0.740093,0.724677,0.243539,0.890822,0.852685,0.692054,0.0874497,0.921157,0.682064,0.67624,0.648746,0.211806,0.0419961,0.317275,0.46843,0.636397,0.959389,0.158202,0.282716,0.104175,0.681473,0.514515,0.65973,0.208165,0.569347,0.938814,0.0654699,0.893644,0.500887,0.259497,0.34229,0.24098,0.984174,0.585829,0.131802,0.836859,0.277884,0.219252,0.758017,0.959948,0.895492,0.406763,0.171754,0.937488,0.724038,0.640184,0.573885,0.683427,0.798386,0.856601,0.787602,0.479859,0.371116,0.447332,0.688024,0.940463,0.386146,0.753494,0.834107,0.887033,0.0129909,0.176397,0.128013,0.997165,0.762227,0.259815,0.834024,0.0401102,0.479067,0.592041,5.80335e-05,0.374559,0.998804,0.171812,0.312047,0.722842,0.811996,0.885932,0.406269,0.610382,0.742534,0.193871,0.0902406,0.11365,0.641203,0.778264,0.0541129,0.0273493,0.531758,0.88822,0.914382,0.544749,0.0646173,0.0423949,0.541914,0.826844,0.30221,0.375938,0.866954,0.781277,0.967979,0.867012,0.155836,0.966782,0.0388242,0.467883,0.689625,0.850821,0.353816,0.0958936,0.461203,0.0963493,0.289765,0.551443,0.209999,0.930968,0.329708,0.264112,0.958317,0.861466,0.152332,0.872699,0.406215,0.216949,0.915094,0.948129,0.043793,0.217304,0.324067,0.910747,0.998581,0.292046,0.777759,0.154417,0.258828,0.816583,0.6223,0.948453,0.667404,0.976116,0.0443463,0.128607,0.0724651,0.334111,0.68005,0.282464,0.265079,0.00975742,0.546576,0.223396,0.871223,0.698908,0.096095,0.277438,0.915857,0.0111891,0.225567,0.95965,0.228493,0.549634,0.870397,0.227074,0.84168,0.648156,0.381491,0.100508,0.46474,0.00379143,0.0489608,0.132144,0.979907,0.0933072,0.26075,0.0523723,0.427418,0.9408,0.334837,0.692497,0.950558,0.881413,0.915893,0.821781,0.580321,0.0119876,0.0992192,0.496178,0.0231767,0.324787,0.455828,0.25167,0.874421,0.326226,0.478744,0.716101,0.974382,0.860235,0.816609,0.439122,0.864027,0.86557,0.571266,0.843934,0.958877,0.832016,0.896306,0.386295,0.772816,0.231143,0.0787921,0.723374,0.112556,0.994685,0.545155,0.692876,0.00667234,0.644374,0.189054,0.0298491,0.969161,0.644883,0.281519,0.843582,0.971108,0.760263,0.559683,0.94549,0.620498,0.376292,0.384612,0.484525,0.241862,0.955878,0.328459,0.200739,0.787894,0.224766,0.587034,0.56071,0.455909,0.665826,0.284084,0.568464,0.660511,0.829239,0.26134,0.667183,0.473613,0.450395,0.697032,0.442774,0.0952772,0.978551,0.286356,0.0663852,0.738814,0.846038,0.0118752,0.359313,0.22233,0.396487,0.843838,0.464191,0.352365,0.172297,0.66493,0.140259,0.397063,0.251964,0.700969,0.852972,0.91779,0.985053,0.421436,0.578301,0.814292,0.682776,0.245484,0.287906,0.133171,0.942516,0.73068,0.228448,0.921068,0.0170355,0.294833,0.659882,0.863074,0.306708,0.0191946,0.0854036,0.703195,0.863033,0.549595,0.0555603,0.0353301,0.214525,0.195819,0.432393,0.466489,0.896788,0.285365,0.38428,0.881842,0.706801,0.962581,0.696134,0.389577,0.208065,0.98404,0.522747,0.150581,0.71472,0.751195,0.071649,0.731755,0.046028,0.731531,0.594829,0.352736,0.750725,0.680233,0.0559317,0.613758,0.229828,0.111492,0.649088,0.444353,0.307311,0.0814814,0.910842,0.204099,0.366846,0.295122,0.085941,0.0736468,0.257703,0.782075,0.463223,0.465768,0.766115,0.985971,0.616349,0.480835,0.737166,0.687998,0.21259,0.783194,0.419529,0.807419,0.13593,0.170255,0.487652,0.191862,0.784013,0.717479,0.303354,0.433101,0.161832,0.610665,0.514582,0.0726745,0.814764,0.881429,0.367797,0.900705,0.955075,0.625499,0.682781,0.418299,0.0912674,0.448896,0.404269,0.707617,0.92973,0.141435,0.395615,0.142321,0.924628,0.815144,0.94974,0.0605584,0.985399,0.437391,0.25242,0.769412,0.15487,0.555774,0.202512,0.316702,0.166439,0.717095,0.389377,0.981203,0.598523,0.757173,0.881908,0.553599,0.382673,0.564689,0.971898,0.47394,0.0135842,0.376167,0.181557,0.943315,0.517602,0.577172,0.0856351,0.44223,0.392316,0.0353746,0.502789,0.377715,0.472766,0.755209,0.147127,0.627636,0.310982,0.349639,0.944339,0.477421,0.0667338,0.333716,0.458624,0.665257,0.090889,0.340532,0.218856,0.473562,0.90522,0.190753,0.947502,0.918805,0.56692,0.129059,0.862119,0.084522,0.706232,0.947754,0.526752,0.098548,0.983129,0.029541,0.476263,0.455895,0.78475,0.62339,0.0835309,0.095732,0.973029,0.0278695,0.573153,0.0397626,0.361585,0.0317766,0.70502,0.452474,0.372308,0.923876,0.926036,0.277529,0.114629,0.873538,0.196333,0.681549,0.00259793,0.0584526,0.766071,0.70883,0.00620684,0.292824,0.807377,0.989336,0.322365,0.283641,0.44523,0.107114,0.90703,0.528761,0.202846,0.880059,0.556631,0.775999,0.919822,0.918216,0.807776,0.624842,0.37069,0.180084,0.548717,0.296726,0.457613,0.663346,0.170265,0.653946,0.344896,0.172863,0.712399,0.110967,0.881692,0.718606,0.403791,0.68907,0.707942,0.726155,0.97271,0.153172,0.833269,0.879741,0.681933,0.0361157,0.7598,0.238564,0.812115,0.679622,0.15678,0.619891,0.304464,0.52747,0.799975,0.853181,0.824197,0.257588,0.516528,0.994461,0.911534,0.861423,0.167324,0.623933,0.97239,0.0490162,0.342539,0.376181,0.738086,0.0504804,0.102336,0.710796,0.203652,0.935605,0.590537,0.885586,0.971721,0.350337,0.12415,0.783836,0.0299594,0.28093,0.403726,0.334423,0.808401,0.203701,0.187604,0.632597,0.461289,0.704132,0.627058,0.372823,0.565555,0.794383,0.996756,0.537945,0.843399,0.339294,0.914126,0.581485,0.389775,0.0164621,0.292281,0.593427,0.952067,0.882818,0.479013,0.923789,0.233155,0.603163,0.707624,0.263114,0.884093,0.111351,0.597538,0.692494,0.315052,0.785142,0.325091,0.77634,0.489274,0.95215,0.149163,0.0548294,0.746532,0.145919,0.592775,0.589931,0.485213,0.506901,0.171415,0.874988,0.523363,0.463696,0.468415,0.475431,0.346514,0.947428,0.399219,0.579669,0.550591,0.106844,0.842784,0.434685,0.218194,0.440321,0.127179,0.533246,0.225464,0.45227,0.309586,0.714738,0.404419,0.458749,0.769567,0.150951,0.604668,0.362342,0.740882,0.0898806,0.869243,0.912297,0.964868,0.392606,0.375993,0.433284,0.868037,0.722507,0.380712,0.267256,0.302177,0.931303,0.374099,0.14496,0.365988,0.592294,0.585282,0.493167,0.125539,0.810745,0.945436,0.435126,0.525483,0.349856,0.893875,0.29505,0.500807,0.498542,0.657392,0.241689,0.588423,0.526635,0.153986,0.553292,0.919241,0.529979,0.986575,0.787277,0.252487,0.367287,0.0545333,0.554663,0.298591,0.428633,0.699623,0.664579,0.0209261,0.284905,0.157746,0.146466,0.09565,0.103182,0.581591,0.621133,0.453038,0.475466,0.916183,0.953845,0.974009,0.573574,0.195533,0.562432,0.100209,0.349519,0.115723,0.01945,0.879499,0.102298,0.806727,0.131985,0.469586,0.861261,0.686648,0.768176,0.289893,0.386272,0.432755,0.310819,0.671177,0.590501,0.457285,0.766827,0.693683,0.0388764,0.38796,0.14672,0.514342,0.304142,0.100565,0.488351,0.877717,0.296098,0.0507825,0.977926,0.645618,0.166506,0.997376,0.525116,0.268804,0.804103,0.657101,0.738389,0.665364,0.34375,0.506566,0.955257,0.730022,0.939321,0.266077,0.401199,0.529821,0.723362,0.168026,0.223504,0.762238,0.555985,0.370224,0.276581,0.860127,0.470789,0.764932,0.737844,0.766888,0.815714,0.71577,0.412506,0.98222,0.713146,0.937622,0.251024,0.517249,0.594723,0.989413,0.182613,0.938473,0.495979,0.137871,0.668495,0.435299,0.403948,0.0696938,0.965121,0.12731,0.237719,0.188624,0.889548,0.793704,0.558849,0.166129,0.653832,0.0296382,0.93106,0.391676,0.796526,0.746775,0.107446,0.209032,0.728995,0.820592,0.146654,0.980019,0.337841,0.741377,0.969432,0.520454,0.67985,0.465411,0.658325,0.348345,0.90071,0.0622724,0.418039,0.865831,0.189582,0.655759,0.0544553,0.0791299,0.449463,0.613304,0.245259,0.103295,0.642942,0.176319,0.494971,0.439468,0.923094,0.602417,0.6485,0.652089,0.423008,0.795154,0.632107,0.760849,0.536531,0.601539,0.281303,0.216381,0.06695,0.939628,0.564727,0.96766,0.00190041,0.982766,0.833491,0.191482,0.638525,0.887947,0.270612,0.0879877,0.501251,0.515871,0.191283,0.144193,0.69219,0.686253,0.583661,0.615284,0.28867,0.232161,0.267373,0.711678,0.0273152,0.89948,0.472527,0.563846,0.501019,0.75383,0.780227,0.567969,0.693458,0.344954,0.53563,0.695358,0.32772,0.369121,0.886841,0.966245,0.257067,0.157453,0.0542323,0.758318,0.673324,0.245515,0.902511,0.365514,0.931768,0.486172,0.980798,0.220438,0.718334,0.248171,0.932116,0.745649,0.147651,0.404643,0.309495,0.64867,0.158473,0.0897221,0.21664,0.851931,0.434676,0.752269,0.547289,0.762396,0.12139,0.43413,0.728641,0.378457,0.591583,0.782873,0.136776,0.264908,0.0283881,0.0392866,0.630422,0.960156,0.525459,0.61122,0.180594,0.243792,0.859391,0.11271,0.989441,0.00704207,0.517353,0.298936,0.655712,0.675826,0.388658,0.872352,0.527757,0.823334,0.624621,0.075046,0.58573,0.746011,0.509176,0.314371,0.124469,0.10076,0.0972444,0.261244,0.365667,0.125633,0.300531,0.996089,0.0857887,0.82599,0.607309,0.266383,0.069782,0.4667,0.379093,0.0592232,0.473742,0.896446,0.358159,0.129455,0.572271,0.746817,0.0018065,0.100028,0.570152,0.626428,0.175074,0.155882,0.372439,0.68425,0.470253,0.496908,0.78501,0.567498,0.758152,0.150677,0.69313,0.0586827,0.146766,0.778919,0.884672,0.754075,0.0453018,0.954454,0.220775,0.424395,0.0136776,0.694518,0.32084,0.371837,0.823972,0.893111,0.118654,0.825779,0.993139,0.688806,0.452206,0.168213,0.844688,0.824645,0.852463,0.314941,0.321553,0.637473,0.882439,0.0797046,0.78815,0.575569,0.138387,0.934916,0.354488,0.0230596,0.688991,0.399789,0.977514,0.909766,0.824184,0.991192,0.604284,0.145024,0.363028,0.428256,0.0381354,0.481682,0.254035,0.0312745,0.170488,0.706241,0.199487,0.0151756,0.530886,0.0519502,0.330116,0.852439,0.689423,0.212555,0.932143,0.477573,0.788124,0.0705306,0.412488,0.142611,0.0935903,0.10148,0.542401,0.0711043,0.0112461,0.366585,0.0622959,0.61553,0.511609,0.425324,0.0437862,0.549744,0.907007,0.297821,0.581019,0.0774946,0.00406192,0.780506,0.0926701,0.534948,0.832456,0.422787,0.387387,0.521879,0.635342,0.31953,0.999452,0.423465,0.390061,0.41194,0.566077,0.483651,0.51342,0.108478,0.554755,0.524666,0.475063,0.617051,0.140196,0.986672,0.0423754,0.183982,0.536416,0.949382,0.481803,0.117435,0.0268766,0.485865,0.897941,0.119547,0.0208132,0.730398,0.542333,0.4082,0.252277,0.177675,0.72773,0.251729,0.60114,0.117791,0.66367,0.167217,0.601442,0.17709,0.275694,0.156197,0.701756,0.750757,0.773249,0.841952,0.737428,0.815624,0.0259341,0.273845,0.765006,0.507737,0.39128,0.791883,0.993603,0.289221,0.911429,0.0144159,0.0196188,0.453763,0.422616,0.271896,0.631437,0.150346,0.523625,0.232577,0.268137,0.187295,0.399794,0.869579,0.364384,0.675488,0.0257763,0.0661398,0.426245,0.799025,0.908092,0.163674,0.614649,0.934026,0.437518,0.379655,0.441763,0.828798,0.171537,0.435366,0.118019,0.0829666,0.449782,0.137638,0.536729,0.872397,0.409534,0.168166,0.0227435,0.933159,0.400744,0.29088,0.120453,0.800538,0.160459,0.484838,0.476026,0.186236,0.550978,0.902271,0.98526,0.459069,0.0659448,0.599909,0.393095,0.503463,0.979564,0.834858,0.332261,0.151101,0.270223,0.45028,0.234068,0.720005,0.587917,0.770797,0.592402,0.997451,0.938964,0.615146,0.93061,0.339707,0.906026,0.0510632,0.140245,0.0664857,0.535901,0.616271,0.252721,0.0868784,0.518542,0.237982,0.545947,0.584487,0.837891,0.939042,0.08795,0.817455,0.7739,0.420211,0.968557,0.0441232,0.87049,0.202625,0.764128,0.458408,0.973422,0.35653,0.455859,0.912386,0.971676,0.386468,0.252093,0.877703,0.437532,0.392338,0.944188,0.973432,0.00860929,0.19691,0.0603108,0.527151,0.434892,0.606258,0.111638,0.272783,0.5453,0.199588,0.0902382,0.3192,0.619799,0.0587949,0.363323,0.49029,0.26142,0.127452,0.948698,0.234842,0.483982,0.404556,0.147228,0.455658,0.791025,0.399321,0.333361,0.228556,0.791659,0.277549,0.201989,0.800268,0.474459,0.262299,0.32742,0.90935,0.868558,0.439058,0.182133,0.413858,0.638647,0.272371,0.733058,0.258446,0.331166,0.0963814,0.748736,0.592586,0.223833,0.697434,0.827428,0.707815,0.10199,0.974656,0.163473,0.893015,0.373977,0.496834,0.121571,0.165636,0.774383,0.32356,0.965904,0.248842,0.585859,0.293324,0.158193,0.454416,0.732383,0.340326,0.868274,0.37103,0.612697,0.601332,0.629476,0.943864,0.697714,0.378212,0.53645,0.921547,0.0756458,0.363878,0.629362,0.177636,0.338534,0.792835,0.0706506,0.712511,0.289669,0.192222,0.878147,0.0640524,0.515781,0.844051,0.312895,0.10164,0.137375,0.471087,0.556056,0.869758,0.811413,0.424331,0.240788,0.42411,0.025663,0.870264,0.367974,0.723377,0.248476,0.904424,0.644924,0.324122,0.268302,0.274285,0.501757,0.606836,0.0671201,0.572408,0.319346,0.356789,0.76463,0.197493,0.420842,0.280411,0.0415441,0.733736,0.382051,0.178919,0.204823,0.938107,0.0486776,0.0162364,0.362438,0.289465,0.440347,0.388101,0.159729,0.808321,0.111478,0.408205,0.712745,0.756401,0.732327,0.981046,0.0306865,0.234084,0.587882,0.0978067,0.806492,0.907229,0.454596,0.571122,0.104722,0.875437,0.851532,0.146266,0.609174,0.233583,0.325185,0.813997,0.17169,0.373863,0.830233,0.534128,0.663328,0.27058,0.922229,0.823057,0.0789009,0.0337072,0.231262,0.791646,0.790109,0.963589,0.772692,0.820795,0.197673,0.360574,0.918602,0.00416468,0.267803,0.373198,0.575286,0.372524,0.248635,0.426819,0.51879,0.857809,0.660402,0.843975,0.671806,0.832092,0.217838,0.502039,0.366221,0.881166,0.772619,0.28845,0.704224,0.85152,0.322157,0.935486,0.643166,0.112266,0.899075,0.415858,0.933061,0.0967472,0.776432,0.851662,0.100912,0.0442348,0.22486,0.676198,0.416759,0.473495,0.103017,0.935549,0.331304,0.763419,0.779525,0.00310928,0.595511,0.997363,0.505148,0.961732,0.878529,0.277768,0.250182,0.582753,0.129288,0.572339,0.518238,0.772453,0.684605,0.417313,0.188311,0.617666,0.51406,0.964743,0.469328,0.614972,0.00897761,0.694188,0.29117,0.425737,0.167683,0.394187,0.361286,0.498987,0.157606,0.140811,0.502096,0.753117,0.138174,0.00724426,0.714849,0.0167032,0.285012,0.965031,0.599456,0.414299,0.537371,0.117694,0.186753,0.221976,0.535007,0.375064,0.839641,0.0490676,0.339806,0.30897,0.66404,0.348784,0.00315763,0.95521,0.774521,0.170841,0.349398,0.135807,0.669827,0.507004,0.276619,0.171923,0.260121,0.414793,0.179167,0.974971,0.431496,0.464179,0.940002,0.0309518,0.878479,0.477373,0.148646,0.0652314,0.699348,0.683653,0.440295,0.53899,0.732721,0.780102,0.847959,0.396761,0.128886,0.851117,0.351971,0.903407,0.0219575,0.701369,0.039214,0.691785,0.208372,0.315833,0.863708,0.468494,0.730625,0.0428755,0.443464,0.162121,0.507055,0.383466,0.193073,0.385534,0.860839,0.341719,0.450765,0.560187,0.0253725,0.89106,0.0991766,0.758094,0.671162,0.947136,0.154854,0.800047,0.798253,0.506825,0.703454,0.82021,0.208194,0.742668,0.511995,0.416566,0.0585007,0.375703,0.88506,0.789126,0.418579,0.328524,0.951247,0.925633,0.71199,0.14432,0.311167,0.572829,0.486039,0.761932,0.133016,0.511411,0.652992,0.232192,0.269505,0.324154,0.179328,0.424359,0.124201,0.977581,0.931185,0.827655,0.797791,0.139379,0.570323,0.309786,0.555945,0.628824,0.685489,0.441005,0.41795,0.104067,0.769529,0.369197,0.0297006,0.481518,0.513517,0.340868,0.0543471,0.999556,0.1028,0.187363,0.510967,0.755792,0.419555,0.780472,0.0799454,0.598883,0.204831,0.204147,0.576464,0.136016,0.0318019,0.374255,0.275395,0.602125,0.684041,0.831339,0.23095,0.369529,0.272344,0.6489,0.473597,0.0418726,0.0180969,0.503297,0.523391,0.531614,0.844165,0.577738,0.53117,0.946964,0.765101,0.0421371,0.702756,0.184656,0.822609,0.782701,0.783539,0.0274405,0.986848,0.360003,0.163457,0.01865,0.734258,0.438851,0.620775,0.418298,0.270191,0.851725,0.787828,0.542535,0.500625,0.261424,0.584407,0.518722,0.764722,0.107799,0.0503355,0.608887,0.685537,0.581505,0.555851,0.450638,0.623642,0.258607,0.635294,0.446252,0.0413086,0.418833,0.473692,0.0281566,0.778836,0.637149,0.0468066,0.513094,0.0759999,0.667582,0.931392,0.346191,0.519307,0.71922,0.888725,0.0199316,0.980644,0.473133,0.538653,0.745366,0.580931,0.588989,0.354253,0.266468,0.170494,0.910104,0.717106,0.794136,0.168711,0.3524,0.240388,0.210019,0.771233,0.71408,0.238176,0.550068,0.351229,0.284983,0.0631621,0.427229,0.952565,0.994554,0.773419,0.471872,0.713774,0.662145,0.491803,0.694418,0.135278,0.0304565,0.439784,0.716209,0.619445,0.794037,0.982677,0.789939,0.70414,0.699783,0.584076,0.872851,0.0521829,0.824464,0.0828707,0.823415,0.538544,0.321047,0.373484,0.889772,0.606029,0.436646,0.317001,0.558594,0.4312,0.0904203,0.0304657,0.144974,0.752565,0.522269,0.839392,0.887842,0.552725,0.279176,0.604051,0.172171,0.0732125,0.586729,0.96211,0.777353,0.286512,0.546185,0.650204,0.338695,0.370649,0.733075,0.16211,0.909192,0.0541217,0.535594,0.798965,0.660151,0.97224,0.115966,0.218745,0.40344,0.206386,0.249211,0.548414,0.958951,0.77148,0.387806,0.846793,0.324205,0.666982,0.450845,0.496376,0.740194,0.0375734,0.458485,0.517547,0.324085,0.00467053,0.167752,0.66278,0.375319,0.900826,0.82489,0.284512,0.954948,0.360484,0.0834763,0.615099,0.332724,0.199442,0.833845,0.736165,0.405828,0.0830554,0.284579,0.364779,0.854535,0.672385,0.211572,0.17874,0.339367,0.662417,0.675116,0.0795609,0.699991,0.133601,0.597108,0.0240758,0.138272,0.76486,0.686856,0.513591,0.665686,0.511746,0.798103,0.620634,0.87223,0.881579,0.235734,0.204955,0.0810211,0.0695781,0.941119,0.486849,0.152634,0.225698,0.851628,0.0071688,0.898082,0.0632006,0.185909,0.237449,0.725618,0.861025,0.31701,0.425608,0.994627,0.914118,0.449684,0.132899,0.678978,0.13654,0.64649,0.344664,0.648286,0.444593,0.965298,0.520516,0.326172,0.201032,0.72547,0.407193,0.27061,0.666589,0.894042,0.423244,0.892287,0.74567,0.430412,0.79037,0.808871,0.616322,0.0278185,0.534489,0.477347,0.344828,0.960097,0.471974,0.258946,0.409781,0.604873,0.937924,0.546321,0.251363,0.282588,0.194607,0.695955,0.247887,0.715123,0.0221272,0.448919,0.440593,0.42932,0.719529,0.107183,0.323362,0.142773,0.99947,0.0690328,0.573185,0.789839,0.877904,0.189507,0.817658,0.412393,0.666854,0.162486,0.37249,0.138828,0.421433,0.782271,0.7437,0.359357,0.328592,0.995063,0.641946,0.523199,0.691018,0.889832,0.238322,0.713145,0.338751,0.678915,0.142465,0.0582801,0.786098,0.465828,0.201053,0.785567,0.534861,0.774238,0.575407,0.412764,0.963745,0.393065,0.825157,0.630598,0.555551,0.197647,0.769426,0.976984,0.979917,0.513126,0.336341,0.30851,0.508189,0.978286,0.831709,0.199207,0.868119,0.0700303,0.912352,0.20687,0.748945,0.0548173,0.26515,0.535043,0.520645,0.466203,0.32061,0.0555057,0.240441,0.896017,0.46827,0.204185,0.289082,0.293427,0.834783,0.844633,0.491074,0.604209,0.821616,0.470991,0.117335,0.157957,0.779501,0.625524,0.136243,0.611209,0.824731,0.00436224,0.681239,0.737083,0.211232,0.430185,0.7919,0.476382,0.965227,0.312545,0.942585,0.285837,0.368051,0.183026,0.181854,0.836321,0.387211,0.470936,0.129748,0.221994,0.315568,0.620821,0.826203,0.137185,0.0918123,0.943539,0.295142,0.871313,0.569063,0.431385,0.482522,0.393793,0.435747,0.163761,0.130876,0.64698,0.593946,0.922776,0.123362,0.559173,0.235321,0.0659471,0.845011,0.603372,0.248973,0.026865,0.439692,0.636184,0.497801,0.56944,0.858178,0.813369,0.190261,0.684381,0.950554,0.282074,0.62792,0.245696,0.153386,0.196983,0.677081,0.635908,0.590776,0.112828,0.79967,0.721652,0.759807,0.393616,0.644428,0.883169,0.952789,0.879749,0.949116,0.7978,0.48312,0.198089,0.824665,0.922813,0.834273,0.322466,0.492253,0.692451,0.135835,0.682514,0.376832,0.0863887,0.964588,0.00475231,0.332084,0.117974,0.201735,0.00916483,0.753882,0.792511,0.121993,0.553552,0.514163,0.8818,0.947168,0.158591,0.76497,0.899957,0.0383393,0.714086,0.697756,0.521459,0.912175,0.522421,0.444272,0.746448,0.844886,0.936525,0.438899,0.980721,0.619039,0.815731,0.0671098,0.583626,0.820484,0.399194,0.7016,0.0222187,0.408359,0.455482,0.81473,0.530352,0.00903426,0.328893,0.412152,0.956202,0.487483,0.177121,0.856158,0.525823,0.891207,0.553914,0.0472822,0.803383,0.0763352,0.491554,0.549831,0.921221,0.428079,0.98873,0.901943,0.0471173,0.804461,0.969052,0.630743,0.624945,0.368246,0.332343,0.647164,0.776605,0.787826,0.461894,0.306957,0.79686,0.790786,0.719108,0.753062,0.27827,0.89623,0.60922,0.804093,0.787437,0.163135,0.851375,0.59082,0.23947,0.342929,0.140651,0.160691,0.771008,0.129381,0.0626339,0.818125,0.933842,0.0316862,0.448868,0.558787,0.399932,0.781212,0.205951,0.176538,0.569037,0.667845,0.483494,0.365897,0.458631,0.202603,0.118959,0.736901,0.0988324,0.728179,0.540994,0.88627,0.891314,0.392368,0.477089,0.130784,0.735297,0.61774,0.291475,0.506305,0.747121,0.354109,0.32443,0.680963,0.385795,0.773299,0.239751,0.785728,0.554511,0.445702,0.962265,0.123548,0.113547,0.445759,0.489445,0.572178,0.648362,0.608405,0.309079,0.747194,0.336584,0.850073,0.633464,0.227898,0.242441,0.110553,0.358682,0.977739,0.728293,0.650157,0.484044,0.475415,0.00426598,0.808474,0.156378,0.390061,0.581773,0.396129,0.175789,0.136283,0.841831,0.138054,0.259831,0.955378,0.583813,0.749277,0.527556,0.232176,0.357682,0.836635,0.97937,0.694265,0.686708,0.612834,0.922163,0.929149,0.723387,0.280845,0.906888,0.451681,0.931002,0.390932,0.927095,0.935268,0.199406,0.0834732,0.325329,0.781179,0.479602,0.501118,0.917462,0.321432,0.639172,0.177293,0.27681,0.222986,0.92657,0.804366,0.455161,0.284252,0.641001,0.434531,0.978517,0.327709,0.0473652,0.90068,0.256858,0.770752,0.181525,0.163746,0.222433,0.112527,0.554677,0.149529,0.0477952,0.754083,0.233002,0.373124,0.535261,0.712604,0.874242,0.452723,0.034036,0.513414,0.630017,0.310846,0.7364,0.556587,0.115212,0.191561,0.840839,0.756212,0.626092,0.819356,0.0839206,0.673457,0.720036,0.340778,0.44421,0.901562,0.504524,0.666643,0.0140889,0.059201,0.816172,0.0618841,0.813284,0.0491739,0.435008,0.348545,0.761778,0.309251,0.801269,0.795814,0.822665,0.431285,0.106659,0.559065,0.987872,0.221871,0.750626,0.828711,0.978083,0.376718,0.648067,0.0620037,0.0501755,0.368103,0.402782,0.494385,0.269664,0.907306,0.161029,0.283753,0.966507,0.977201,0.345637,0.779791,0.0263746,0.780646,0.128336,0.788152,0.0898962,0.929605,0.583966,0.912561,0.36089,0.690625,0.471626,0.348762,0.912496,0.222252,0.177473,0.890579,0.59897,0.825539,0.952583,0.649145,0.193642,0.355365,0.143531,0.463306,0.262671,0.304559,0.74706,0.229178,0.28176,0.092697,0.00896837,0.308135,0.873343,0.137304,0.0962868,0.963239,0.0669091,0.680252,0.8758,0.427799,0.370878,0.347426,0.776561,0.283374,0.569677,0.954034,0.173953,0.168647,0.779573,0.126535,0.817792,0.973215,0.4819,0.961323,0.436521,0.744571,0.265882,0.183581,0.973748,0.547642,0.276278,0.982717,0.855777,0.149621,0.120021,0.952064,0.11286,0.18693,0.632316,0.988659,0.614729,0.00319357,0.336085,0.39129,0.286567,0.905762,0.345324,0.46052,0.0744093,0.124897,0.587055,0.892201,0.098112,0.068955,0.853524,0.534633,0.813526,0.119407,0.718215,0.787274,0.667049,0.994493,0.769991,0.522826,0.144114,0.890012,0.474889,0.256973,0.0769422,0.107205,0.245633,0.691671,0.110399,0.581718,0.0829618,0.396966,0.48748,0.428286,0.857486,0.561889,0.553183,0.444541,0.454091,0.651295,0.513496,0.307615,0.185928,0.327021,0.427022,0.904143,0.114296,0.0940706,0.898636,0.884286,0.616896,0.0427493,0.774298,0.0917857,0.299723,0.85124,0.198991,0.545355,0.542912,0.30939,0.127073,0.625874,0.706356,0.614553,0.0541594,0.563842,0.176442,0.607342,0.00838271,0.630533,0.258637,0.521878,0.938148,0.444565,0.8489,0.36517,0.348708,0.963195,0.45924,0.247344,0.847482,0.0761367,0.290093,0.62178,0.167922,0.589815,0.473021,0.366914,0.135171,0.0159325,0.676304,0.262244,0.641806,0.38266,0.876796,0.695966,0.946502,0.0532386,0.303308,0.954885,0.683772,0.561945,0.476763,0.62192,0.00650982,0.325663,0.987089,0.355218,0.288859,0.44633,0.602561,0.13634,0.522466,0.892654,0.758121,0.690389,0.48247,0.231141,0.0573026,0.61764,0.247074,0.733606,0.879884,0.88888,0.116267,0.75668,0.584845,0.0627687,0.809919,0.888153,0.0176536,0.493691,0.450098,0.494417,0.11561,0.456608,0.82008,0.1027,0.811825,0.108939,0.549029,0.414387,0.245279,0.0714959,0.307041,0.00339983,0.761885,0.789511,0.234541,0.819187,0.407151,0.481614,0.552794,0.287035,0.370494,0.66906,0.0437159,0.95534,0.731829,0.853635,0.843493,0.749483,0.347326,0.29359,0.2439,0.462936,0.750198,0.0639799,0.565636,0.562023,0.172919,0.114665,0.97641,0.418198,0.186161,0.283452,0.421598,0.948046,0.0729626,0.656139,0.767233,0.480114,0.137753,0.320027,0.767149,0.508247,0.989088,0.810865,0.463587,0.720917,0.6645,0.30708,0.4704,0.011826,0.60067,0.7143,0.474762,0.350868,0.77828,0.0403976,0.912892,0.951198,0.155063,0.889302,0.369396,0.341224,0.172753,0.790994,0.28927,0.245716,0.447133,0.0565031,0.72583,0.584886,0.37653,0.492979,0.0931336,0.365618,0.303845,0.556721,0.0865351,0.968345,0.8638,0.556935,0.980171,0.46447,0.271235,0.454933,0.815338,0.0495141,0.495331,0.72823,0.000712385,0.650393,0.617532,0.370109,0.991617,0.790285,0.161103,0.280887,0.0360014,0.608236,0.33739,0.761831,0.193122,0.71392,0.254811,0.286256,0.0795381,0.558656,0.842976,0.166073,0.527,0.706776,0.723008,0.507171,0.171247,0.994243,0.962104,0.986585,0.0437567,0.457435,0.714815,0.0444691,0.107828,0.332347,0.414578,0.0994451,0.122632,0.575681,0.380332,0.158634,0.183916,0.717721,0.920465,0.377038,0.431642,0.175276,0.663294,0.51118,0.733931,0.50627,0.677253,0.260932,0.213046,0.400261,0.768103,0.384293,0.394504,0.730208,0.370878,0.43826,0.187643,0.0856929,0.48273,0.295471,0.41804,0.897307,0.394916,0.540672,0.472988,0.775248,0.699306,0.656904,0.492969,0.619771,0.0339427,0.924611,0.795047,0.697237,0.43579,0.528978,0.203506,0.113043,0.78991,0.416553,0.513304,0.558013,0.800846,0.907808,0.288221,0.171723,0.346068,0.475863,0.257416,0.828798,0.771334,0.675456,0.726105,0.16625,0.216128,0.199093,0.941497,0.915434,0.855998,0.434466,0.535205,0.88994,0.359077,0.330252,0.587177,0.794867,0.85923,0.790683,0.90791,0.649139,0.207236,0.421215,0.207153,0.00808138,0.329022,0.495373,0.179805,0.67509,0.971236,0.437221,0.503888,0.74257,0.112677,0.229993,0.90882,0.328805,0.429086,0.850317,0.244239,0.285084,0.284784,0.779444,0.175024,0.643861,0.109696,0.762201,0.438728,0.968926,0.552884,0.346639,0.618065,0.76012,0.767853,0.825218,0.768201,0.0968756,0.320591,0.948006,0.771966,0.291827,0.385227,0.275854,0.0343977,0.497904,0.505848,0.943218,0.826709,0.934934,0.793535,0.0709488,0.220018,0.0783191,0.850393,0.395042,0.72218,0.96009,0.157242,0.160908,0.929016,0.710126,0.507547,0.547081,0.470246,0.2754,0.372299,0.238448,0.372276,0.69289,0.186454,0.144242,0.984717,0.571681,0.420096,0.019115,0.0695846,0.925944,0.962333,0.896294,0.860878,0.755868,0.967243,0.0808956,0.834187,0.817636,0.475937,0.556367,0.777726,0.63318,0.717275,0.706741,0.343306,0.224822,0.253822,0.813553,0.500223,0.626121,0.0520002,0.872499,0.319011,0.238454,0.0167408,0.303728,0.810134,0.436837,0.322843,0.879719,0.362781,0.285176,0.776013,0.223659,0.0410439,0.743256,0.304555,0.875231,0.560892,0.780492,0.431598,0.338618,0.413672,0.148873,0.0453589,0.756978,0.373696,0.299181,0.570531,0.873918,0.925302,0.622531,0.746417,0.244313,0.860985,0.763158,0.548041,0.671119,0.199995,0.870884,0.550838,0.562776,0.15606,0.326851,0.786435,0.197104,0.0701072,0.0909903,0.0723354,0.630999,0.871483,0.503933,0.969617,0.285155,0.652807,0.0149757,0.0421329,0.0265024,0.314157,0.612664,0.900421,0.239459,0.235195,0.646838,0.483772,0.0961802,0.409996,0.0318129,0.7673,0.609991,0.902697,0.318138,0.172767,0.0587578,0.644989,0.959203,0.255862,0.715097,0.050193,0.328198,0.346096,0.921676,0.832131,0.315713,0.20683,0.484938,0.330688,0.248963,0.51144,0.644845,0.861627,0.411861,0.884304,0.096822,0.0586986,0.368075,0.193002,0.468694,0.399888,0.960302,0.0786852,0.302586,0.27844,0.251452,0.361343,0.923429,0.210655,0.617206,0.638526,0.260848,0.945403,0.984621,0.182523,0.777534,0.300334,0.389353,0.262472,0.631022,0.638316,0.773912,0.275867,0.499943,0.185772,0.160171,0.596765,0.244471,0.528246,0.789767,0.713165,0.928134,0.750069,0.791851,0.23072,0.0285089,0.043303,0.592063,0.951938,0.253958,0.209268,0.590464,0.514806,0.154672,0.575085,0.697329,0.932206,0.875419,0.0866827,0.194678,0.506441,0.724999,0.968589,0.782308,0.224942,0.154362,0.942479,0.821708,0.398833,0.470724,0.611475,0.111998,0.398859,0.361544,0.903849,0.629578,0.390053,0.947152,0.221641,0.341991,0.20111,0.43091,0.932455,0.715916,0.585581,0.50754,0.413245,0.517787,0.382959,0.499927,0.712465,0.8894,0.224927,0.681054,0.671708,0.449869,0.835416,0.614186,0.271577,0.234248,0.0849106,0.883052,0.346247,0.483769,0.244596,0.250095,0.113348,0.634649,0.197247,0.334989,0.976641,0.398357,0.765899,0.909096,0.114273,0.35148,0.416636,0.527517,0.869267,0.799594,0.0274448,0.581732,0.688994,0.252371,0.262786,0.360702,0.70224,0.0982015,0.974888,0.973817,0.33245,0.0597985,0.856869,0.678697,0.543568,0.101465,0.928792,0.656915,0.736114,0.126039,0.991904,0.712755,0.524396,0.757803,0.62185,0.638669,0.109283,0.038486,0.166186,0.97855,0.83808,0.193631,0.560282,0.527074,0.446002,0.823068,0.887776,0.148243,0.92127,0.862664,0.12206,0.25372,0.922463,0.978929,0.932416,0.46603,0.0803936,0.861208,0.122946,0.816508,0.987248,0.11485,0.529263,0.511644,0.872653,0.151113,0.150313,0.981936,0.189599,0.316499,0.960486,0.0276796,0.51013,0.520769,0.554754,0.956132,0.343837,0.44253,0.104375,0.265107,0.305194,0.226435,0.518826,0.227657,0.205363,0.451243,0.693687,0.285757,0.312451,0.816633,0.102265,0.299698,0.931483,0.631528,0.811342,0.804136,0.782641,0.961655,0.786072,0.97224,0.278154,0.746558,0.99992,0.788284,0.267327,0.554674,0.744416,0.611164,0.997204,0.848791,0.876271,0.302398,0.0752259,0.395097,0.530054,0.280589,0.846339,0.223741,0.566346,0.15879,0.040374,0.668611,0.458489,0.971857,0.300138,0.269831,0.775993,0.0827794,0.231486,0.562065,0.0550196,0.50964,0.308623,0.0549395,0.297925,0.57595,0.609613,0.042341,0.187114,0.606817,0.891132,0.0633846,0.909215,0.966358,0.458482,0.439269,0.246947,0.304821,0.663011,0.813293,0.463611,0.703385,0.481904,0.9221,0.675242,0.782042,0.191931,0.451234,0.864822,0.423417,0.0132988,0.919841,0.933058,0.321922,0.974781,0.230982,0.897872,0.584394,0.273323,0.0849861,0.191211,0.164456,0.148371,0.100426,0.130814,0.606852,0.539696,0.377761,0.911673,0.202706,0.191054,0.375284,0.906091,0.672958,0.297384,0.581333,0.455001,0.489315,0.0325667,0.319822,0.912732,0.0458655,0.239664,0.84579,0.367787,0.214445,0.0767721,0.265659,0.798839,0.350095,0.350646,0.99005,0.514551,0.499016,0.0904761,0.645365,0.105868,0.630172,0.0231255,0.0175417,0.832878,0.21418,0.392826,0.738969,0.887138,0.69021,0.320301,0.342139,0.179525,0.352868,0.661961,0.0922573,0.398733,0.901625,0.938047,0.766521,0.116069,0.0148192,0.0321803,0.914908,0.364915,0.382826,0.904958,0.879465,0.881842,0.995434,0.52483,0.987711,0.625606,0.547955,0.00525221,0.458483,0.762135,0.398078,0.197452,0.649273,0.0882883,0.517753,0.991412,0.267813,0.870621,0.653373,0.360071,0.269355,0.554997,0.298118,0.0358756,0.671067,0.312937,0.0680559,0.585975,0.677852,0.450882,0.490933,0.557317,0.332724,0.486367,0.0821468,0.320434,0.111972,0.630102,0.325687,0.570455,0.392238,0.723765,0.767908,0.0415107,0.812053,0.285661,0.0329225,0.0798664,0.156282,0.686295,0.439937,0.425637,0.241293,0.738055,0.461513,0.912359,0.050992,0.529569,0.498334,0.728844,0.98045,0.989267,0.28616,0.313174,0.475633,0.368307,0.633609,0.587605,0.99841,0.959295,0.158061,0.390647,0.68306,0.925968,0.432158,0.495113,0.211629,0.46508,0.574979,0.367911,0.151376,0.0149165,0.793548,0.392668,0.752971,0.255061,0.305028,0.803963,0.78463,0.803362,0.532807,0.76508,0.792628,0.818968,0.0782543,0.268261,0.187275,0.711863,0.855867,0.185684,0.671158,0.0139272,0.576332,0.354218,0.939895,0.00848951,0.849331,0.151525,0.47357,0.42431,0.519436,0.624946,0.439227,0.312985,0.0176139,0.192198,0.568046,0.322641,0.996162,0.352675,0.126003,0.528969,0.117755,0.918631,0.347936,0.19601,0.186893,0.535211,0.907873,0.0427592,0.720895,0.579031,0.0566864,0.297227,0.933248,0.996582,0.305717,0.782579,0.148107,0.779287,0.206889,0.667543,0.404232,0.646116,0.980527,0.421846,0.838314,0.548573,0.744487,0.834476,0.901248,0.870491,0.363444,0.0190036,0.789122,0.71138,0.215013,0.976014,0.246591,0.122886,0.0187736,0.967487,0.701917,0.07546,0.264714,0.635165,0.0720419,0.57043,0.417744,0.220148,0.349717,0.624633,0.887691,0.753949,0.270749,0.868218,0.175795,0.109063,0.416791,0.920282,0.943538,0.318039,0.790773,0.306983,0.337043,0.579895,0.0183628,0.552056,0.555909,0.264954,0.674942,0.574683,0.232441,0.376858,0.650143,0.497154,0.0120232,0.722185,0.0675845,0.429767,0.942333,0.417301,0.0544004,0.830024,0.17125,0.325149,0.698242,0.347045,0.434212,0.115033,0.267327,0.377751,0.433072,0.0580998,0.684733,0.770114,0.637994,0.703096,0.32217,0.193903,0.96805,0.997112,0.768586,0.200491,0.37397,0.418729,0.697645,0.385994,0.140913,0.765229,0.815761,0.0832461,0.182531,0.870161,0.91327,0.353781,0.195311,0.611512,0.700826,0.629523,0.726545,0.968153,0.00727355,0.159617,0.0262523,0.692007,0.929731,0.664247,0.395103,0.251901,0.85815,0.363153,0.249013,0.626736,0.563643,0.622984,0.0454643,0.261288,0.00897748,0.186377,0.0265177,0.824738,0.269624,0.209048,0.694899,0.182894,0.562829,0.89021,0.794406,0.263655,0.519733,0.520951,0.231807,0.527006,0.680568,0.25806,0.219013,0.610299,0.922306,0.614116,0.8622,0.780456,0.977269,0.111214,0.407192,0.540912,0.734197,0.452656,0.8022,0.743175,0.639034,0.828718,0.567913,0.908657,0.0377663,0.262813,0.0915511,0.600595,0.153023,0.885957,0.86425,0.672756,0.406908,0.0960574,0.199762,0.0874756,0.354117,0.418775,0.697774,0.276423,0.0328912,0.559975,0.0568792,0.0101599,0.671188,0.464071,0.551072,0.405386,0.916727,0.353272,0.14856,0.555761,0.18199,0.716474,0.464418,0.219756,0.979286,0.555969,0.820352,0.132309,0.441926,0.684602,0.805065,0.848834,0.78066,0.00482659,0.93631,0.134777,0.423602,0.634084,0.4112,0.456493,0.194059,0.468079,0.466653,0.865247,0.93215,0.0177249,0.270633,0.848877,0.370997,0.419193,0.404638,0.552987,0.135667,0.869057,0.772744,0.114953,0.425026,0.593096,0.247262,0.866953,0.277698,0.0523265,0.715787,0.0583573,0.0571531,0.652097,0.193134,0.480755,0.286181,0.604333,0.937248,0.48024,0.0724124,0.403901,0.345487,0.0045624,0.421626,0.61612,0.85344,0.792623,0.0353134,0.258078,0.34561,0.17098,0.127135,0.118354,0.285933,0.552161,0.711449,0.533195,0.419114,0.989147,0.585521,0.1349,0.0475044,0.642675,0.786997,0.240638,0.123429,0.0731786,0.844972,0.0606773,0.553419,0.917384,0.464578,0.898906,0.921947,0.886204,0.515027,0.775386,0.678827,0.55034,0.0334644,0.0244367,0.72132,0.160599,0.14279,0.00725312,0.71276,0.85424,0.540448,0.131874,0.843387,0.12597,0.266774,0.890891,0.768644,0.0537717,0.131529,0.892074,0.12695,0.976501,0.952751,0.680369,0.893885,0.417329,0.579276,0.815832,0.303533,0.0943021,0.591218,0.98236,0.644642,0.624682,0.00679646,0.365962,0.785282,0.149587,0.373215,0.498042,0.00382643,0.913663,0.629916,0.847213,0.0396329,0.89669,0.738104,0.808277,0.950462,0.869634,0.700351,0.0774119,0.846135,0.653101,0.757781,0.74002,0.0704305,0.337057,0.555852,0.373964,0.431359,0.14707,0.356323,0.0760007,0.771752,0.36312,0.441963,0.557033,0.512707,0.815178,0.0550751,0.516533,0.728841,0.684991,0.363746,0.768474,0.581681,0.10185,0.576751,0.532142,0.971484,0.277102,0.609554,0.817619,0.930203,0.367335,0.557639,0.000633662,0.704392,0.11349,0.374597,0.13575,0.26056,0.730921,0.211751,0.0323117,0.0940403,0.653714,0.589345,0.606747,0.468892,0.64442,0.12328,0.197733,0.329411,0.487026,0.966207,0.911091,0.588877,0.542958,0.443233,0.560361,0.82006,0.0527875,0.37798,0.750263,0.420123,0.935618,0.750897,0.124514,0.0491086,0.125494,0.260265,0.309668,0.856415,0.472016,0.34198,0.950455,0.12573,0.931325,0.557202,0.594621,0.575745,0.680482,0.792354,0.905156,0.167508,0.758562,0.816247,0.756385,0.30152,0.259481,0.316745,0.121581,0.312268,0.694725,0.871844,0.732391,0.630343,0.622741,0.856905,0.679452,0.748235,0.11717,0.98912,0.60465,0.589185,0.3311,0.555105,0.714915,0.262425,0.112307,0.309536,0.838171,0.792789,0.10189,0.743326,0.960297,0.860452,0.559574,0.716682,0.161973,0.819054,0.0334272,0.283553,0.131322,0.728152,0.155397,0.863713,0.358495,0.778138,0.720618,0.0379467,0.526373,0.837787,0.0270669,0.131023,0.426973,0.358167,0.686128,0.141887,0.620593,0.798436,0.451423,0.458763,0.591225,0.553314,0.20209,0.551522,0.413766,0.761664,0.268204,0.575739,0.580718,0.301631,0.859292,0.71204,0.0297833,0.0146885,0.575753,0.388278,0.792826,0.296371,0.426225,0.3192,0.134158,0.453292,0.450223,0.561131,0.811459,0.136351,0.703018,0.432052,0.934787,0.154441,0.890816,0.526012,0.707755,0.0929055,0.0775342,0.121521,0.854569,0.345738,0.69726,0.435287,0.64737,0.556552,0.147327,0.677153,0.57124,0.72308,0.0654315,0.364067,0.0194505,0.491657,0.683266,0.153609,0.944949,0.133489,0.714739,0.756408,0.26984,0.417758,0.18846,0.204627,0.572199,0.0792759,0.730639,0.279954,0.172181,0.808173,0.401476,0.0267505,0.153912,0.098736,0.462037,0.801282,0.655288,0.609364,0.478435,0.226528,0.332444,0.543866,0.590595,0.351894,0.0355229,0.273861,0.505503,0.980472,0.40735,0.220242,0.73688,0.67719,0.638,0.92534,0.881818,0.210199,0.00461587,0.612457,0.490153,0.176797,0.42063,0.891629,0.203548,0.574542,0.990365,0.665585,0.375823,0.645653,0.274949,0.854258,0.872181,0.607393,0.398124,0.462776,0.959287,0.433647,0.736637,0.46479,0.414119,0.143987,0.685033,0.150999,0.821177,0.323033,0.0763386,0.702995,0.533232,0.0809545,0.315451,0.0233849,0.257752,0.736081,0.915014,0.4613,0.310623,0.905379,0.126885,0.686446,0.551032,0.401834,0.540705,0.423213,0.00922715,0.938829,0.885989,0.968515,0.372476,0.622625,0.433305,0.786595,0.766612,0.118338,0.937594,0.587789,0.44137,0.0139323,0.290784,0.974602,0.0948867,0.606235,0.997987,0.352639,0.342317,0.913001,0.813938,0.65294,0.81838,0.940823,0.339386,0.369412,0.342657,0.880091,0.792625,0.351884,0.81892,0.678614,0.320399,0.191396,0.301239,0.753704,0.977991,0.0678517,0.872041,0.915585,0.655641,0.313412,0.929517,0.946425,0.288013,0.0244035,0.552661,0.286,0.377042,0.894977,0.199001,0.19098,0.547917,0.017381,0.131803,0.887303,0.386793,0.47446,0.767394,0.179418,0.826344,0.586314,0.858032,0.146743,0.77771,0.159272,0.900446,0.7557,0.227123,0.772487,0.671285,0.882764,0.0858989,0.600802,0.82919,0.373912,0.625205,0.38185,0.659913,0.00224743,0.276828,0.858914,0.193228,0.824745,0.876295,0.325031,0.712049,0.263088,0.799491,0.479443,0.442506,0.625834,0.0657567,0.300538,0.772577,0.843466,0.459809,0.673023,0.599167,0.686933,0.44551,0.270452,0.569697,0.531409,0.871254,0.398887,0.905322,0.496459,0.780737,0.565234,0.498706,0.0575653,0.424148,0.691934,0.88231,0.300443,0.0169647,0.594359,0.56353,0.816455,0.0738019,0.00603597,0.44229,0.139559,0.306574,0.214867,0.983025,0.766383,0.88789,0.582192,0.453316,0.3334,0.852644,0.0230133,0.864809,0.723897,0.4219,0.770131,0.220356,0.202638,0.335366,0.719063,0.260203,0.759514,0.410997,0.142513,0.0599562,0.427961,0.736872,0.623486,0.244417,0.810674,0.629522,0.686706,0.950233,0.936096,0.901573,0.933258,0.70248,0.789462,0.51545,0.155796,0.122863,0.368093,0.178809,0.987672,0.0919906,0.600709,0.757803,0.312347,0.803347,0.0931688,0.0314096,0.0635499,0.852682,0.442406,0.206063,0.912639,0.870368,0.942936,0.536125,0.114784,0.75361,0.165647,0.801491,0.703843,0.101743,0.703063,0.637101,0.804223,0.492526,0.152551,0.960019,0.615388,0.520644,0.138828,0.60306,0.612635,0.739537,0.360864,0.924982,0.542884,0.454032,0.956391,0.606434,0.306715,0.398798,0.812498,0.219353,0.269166,0.755433,0.755478,0.38395,0.509044,0.921125,0.18544,0.212887,0.0228688,0.888504,0.849988,0.827092,0.38103,0.00253914,0.787111,0.996418,0.523184,0.925938,0.599478,0.135819,0.665476,0.960342,0.0608006,0.20836,0.414374,0.0171921,0.814794,0.721089,0.41599,0.627292,0.940442,0.685155,0.382725,0.695921,0.0691052,0.891769,0.617046,0.254546,0.104656,0.639915,0.143049,0.954644,0.467007,0.524079,0.957183,0.254117,0.520497,0.480366,0.180056,0.119975,0.616185,0.845532,0.080317,0.676986,0.0538917,0.494691,0.694178,0.868686,0.21578,0.110167,0.495978,0.156222,0.795323,0.878703,0.852143,0.864428,0.770472,0.469189,0.118974,0.875127,0.109104,0.262023,0.829771,0.57611,0.786102,0.786954,0.830227,0.306599,0.26732,0.0102832,0.426574,0.883505,0.855815,0.506891,0.56049,0.909706,0.00158217,0.254668,0.778392,0.217362,0.364835,0.27437,0.373584,0.160158,0.153074,0.225727,0.0245863,0.923545,0.694916,0.14356,0.798673,0.80402,0.405583,0.628444,0.38013,0.191685,0.415397,0.210357,0.498284,0.682717,0.22064,0.924858,0.566222,0.0764552,0.431749,0.126713,0.986162,0.433331,0.381381,0.764554,0.650693,0.746216,0.0389246,0.0242778,0.906375,0.191998,0.250005,0.930961,0.115544,0.944921,0.0745208,0.914217,0.74894,0.480104,0.54266,0.12907,0.671789,0.958058,0.339427,0.170073,0.640775,0.560068,0.0949308,0.206997,0.636523,0.52668,0.33371,0.622685,0.960011,0.715091,0.387239,0.610705,0.461307,0.426163,0.634983,0.367682,0.618162,0.884987,0.298643,0.733705,0.829908,0.373163,0.647922,0.578849,0.853267,0.190582,0.707919,0.525056,0.14864,0.0473462,0.695129,0.789415,0.607414,0.790059,0.996413,0.243937,0.316739,0.330123,0.866622,0.276751,0.0452139,0.253861,0.887455,0.506521,0.680024,0.522438,0.874203,0.298186,0.407425,0.172846,0.0318912,0.237334,0.546009,0.679813,0.816182,0.399276,0.870395,0.524101,0.924332,0.0190353,0.571447,0.619461,0.808451,0.178862,0.40952,0.804863,0.422799,0.72626,0.134986,0.28942,0.00301006,0.1802,0.543281,0.890465,0.686721,0.223305,0.412903,0.560924,0.521491,0.820328,0.73377,0.553382,0.0576617,0.279779,0.233195,0.873844,0.679055,0.103591,0.397945,0.603387,0.122626,0.969393,0.222848,0.931076,0.148254,0.632368,0.735939,0.571053,0.358628,0.870925,0.860473,0.361638,0.0511254,0.403754,0.252103,0.737846,0.627059,0.665006,0.298771,0.14855,0.485334,0.0325403,0.701932,0.542996,0.312319,0.935127,0.41684,0.991374,0.0387175,0.814785,0.594761,0.161343,0.784178,0.817609,0.0924197,0.932432,0.449978,0.828359,0.503485,0.808605,0.699285,0.363958,0.170243,0.75041,0.767712,0.422346,0.488256,0.394771,0.0873524,0.787027,0.543321,0.572687,0.819567,0.245253,0.115682,0.131886,0.18038,0.532522,0.123261,0.219097,0.347307,0.718022,0.38044,0.131485,0.535631,0.47286,0.0639175,0.985609,0.301219,0.567402,0.794214,0.000503816,0.93136,0.964457,0.750914,0.699073,0.386804,0.23917,0.0938436,0.474156,0.0261973,0.637164,0.0468428,0.845765,0.882417,0.162525,0.977651,0.0627967,0.695048,0.100912,0.281894,0.042355,0.818933,0.662334,0.17384,0.354565,0.135194,0.237758,0.340173,0.436413,0.80516,0.134388,0.436917,0.736521,0.098845,0.187831,0.435593,0.485649,0.427001,0.529437,0.959805,0.453199,0.166601,0.0066478,0.298963,0.0490185,0.169173,0.276614,0.111815,0.864221,0.377526,0.393709,0.906576,0.196459,0.0560431,0.0804161,0.551024,0.191237,0.318174,0.891197,0.627651,0.123334,0.0255847,0.0645681,0.859855,0.12443,0.252399,0.295448,0.610079,0.679401,0.824885,0.569884,0.132599,0.991487,0.576531,0.431562,0.0405052,0.745704,0.708177,0.15232,0.609925,0.0857023,0.546029,0.516501,0.282161,0.602072,0.596917,0.833185,0.79331,0.915091,0.724382,0.420961,0.0384249,0.749967,0.485529,0.89828,0.874397,0.737928,0.193728,0.484475,0.417328,0.0186136,0.054359,0.549928,0.0101003,0.63089,0.98149,0.0506055,0.376595,0.689666,0.202926,0.98652,0.775369,0.748955,0.503021,0.0575302,0.351027,0.0999375,0.890715,0.144337,0.0150282,0.615098,0.565298,0.0534531,0.365065,0.0508265,0.951733,0.239462,0.788754,0.145461,0.723937,0.206083,0.164075,0.778296,0.75601,0.174175,0.409187,0.7375,0.224781,0.785781,0.427166,0.427706,0.772301,0.202535,0.176661,0.275322,0.260065,0.527689,0.375259,0.150781,0.672026,0.390287,0.765879,0.237324,0.443741,0.130943,0.28815,0.395474,0.370405,0.0769048,0.540935,0.0943423,0.282988,0.70501,0.872638,0.0389977,0.879185,0.281825,0.776498,0.103966,0.0676062,0.203664,0.531672,0.839907,0.406199,0.708333,0.115229,0.666265,0.236022,0.490489,0.817046,0.908049,0.880776,0.582924,0.145373,0.324517,0.713868,0.433523,0.71999,0.0842728,0.510428,0.260925,0.178615,0.793415,0.965935,0.0512535,0.832413,0.84512,0.333078,0.608911,0.949086,0.400685,0.812575,0.480758,0.240592,0.218775,0.189091,0.355821,0.885039,0.425114,0.84631,0.702085,0.333162,0.727086,0.285009,0.478535,0.0516027,0.998877,0.912058,0.771593,0.0831499,0.422486,0.0325182,0.261765,0.215901,0.998453,0.313019,0.0483138,0.843573,0.646097,0.657225,0.792659,0.0467818,0.4698,0.273417,0.287374,0.688574,0.462508,0.643195,0.573614,0.887622,0.489505,0.275699,0.220784,0.216591,0.560708,0.699319,0.268194,0.559585,0.611377,0.0397868,0.642735,0.0338625,0.072305,0.9045,0.249763,0.0707583,0.217519,0.298077,0.914332,0.863616,0.955302,0.706991,0.910398,0.425101,0.980408,0.197771,0.113676,0.442916,0.840967,0.687289,0.330538,0.330472,0.962988,0.551322,0.547063,0.523696,0.250641,0.815257,0.0832814,0.862018,0.855044,0.726017,0.895881,0.927349,0.630517,0.145644,0.998107,0.848036,0.443721,0.912439,0.711651,0.399023,0.61943,0.622049,0.824124,0.599837,0.81982,0.9378,0.0427533,0.660787,0.625089,0.373291,0.991259,0.588077,0.924613,0.538322,0.111773,0.175254,0.353579,0.195055,0.0372723,0.208623,0.921071,0.933153,0.135972,0.551588,0.0787968,0.134079,0.399624,0.522518,0.0465177,0.111275,0.921541,0.665947,0.733324,0.745665,0.265785,0.553144,0.683465,0.308538,0.213932,0.308554,0.681829,0.205191,0.896631,0.606442,0.743513,0.00840454,0.781697,0.0970923,0.203459,0.818969,0.305715,0.124531,0.752122,0.441687,0.676119,0.830919,0.575766,0.0757422,0.353437,0.622284,0.187017,0.274978,0.288231,0.920341,0.0206426,0.554016,0.473486,0.704108,0.862554,0.687417,0.0126617,0.544383,0.892608,0.909293,0.150826,0.636121,0.917697,0.932523,0.733213,0.121157,0.751492,0.0389286,0.245687,0.503614,0.480616,0.921806,0.334532,0.0563815,0.997548,0.687969,0.678665,0.184565,0.962947,0.966896,0.104906,0.983589,0.520912,0.578392,0.687697,0.383466,0.265809,0.700359,0.92785,0.158417,0.609652,0.0786758,0.794538,0.527349,0.0111984,0.527752,0.648506,0.76269,0.56668,0.894193,0.266304,0.0472961,0.815999,0.600836,0.103678,0.813547,0.288805,0.782343,0.998112,0.251752,0.749239,0.103019,0.235341,0.270151,0.68141,0.923038,0.653618,0.94722,0.623397,0.581467,0.105637,0.233048,0.660143,0.900175,0.760397,0.671341,0.427927,0.408903,0.434032,0.994608,0.303096,0.700335,0.0419037,0.119095,0.301171,0.145581,0.932642,0.589977,0.927924,0.930754,0.841728,0.677163,0.0337729,0.0770693,0.947314,0.715183,0.000107228,0.600932,0.662403,0.623504,0.182399,0.76804,0.856552,0.842542,0.668215,0.616949,0.513883,0.0961427,0.0258521,0.947915,0.0907503,0.328948,0.64825,0.132654,0.448043,0.949422,0.278235,0.380686,0.539398,0.206159,0.31144,0.381127,0.883322,0.345213,0.458196,0.830637,0.0603961,0.458303,0.431568,0.722799,0.081807,0.613967,0.490839,0.938359,0.456509,0.159055,0.555308,0.970392,0.255197,0.58116,0.918307,0.345948,0.910108,0.566558,0.478602,0.358152,0.51598,0.756837,0.738837,0.0553779,0.962996,0.050277,0.436505,0.846319,0.39549,0.894701,0.676955,0.455886,0.353004,0.108524,0.178685,0.434811,0.722491,0.669524,0.373169,0.179,0.828579,0.928477,0.149392,0.0837763,0.509637,0.0676998,0.429724,0.419745,0.634258,0.908326,0.777897,0.150237,0.665163,0.516734,0.205615,0.628159,0.567011,0.64212,0.474478,0.962501,0.53682,0.151433,0.418387,0.889824,0.259957,0.597072,0.324634,0.982448,0.266596,0.697804,0.161448,0.0951751,0.626281,0.31084,0.178951,0.135918,0.37854,0.608675,0.555664,0.0127973,0.517001,0.333561,0.163034,0.182164,0.850295,0.36865,0.810323,0.417306,0.0107691,0.284801,0.379807,0.547589,0.436235,0.798194,0.437413,0.696192,0.395265,0.762047,0.678639,0.661862,0.459851,0.840087,0.757037,0.0861324,0.150927,0.935988,0.222051,0.529467,0.544663,0.777715,0.542264,0.0616646,0.111275,0.705298,0.243828,0.96157,0.0739479,0.0541516,0.378876,0.084717,0.338953,0.758683,0.632306,0.775187,0.556876,0.0697191,0.471379,0.952142,0.831766,0.150018,0.614003,0.291618,0.990105,0.37104,0.37775,0.141032,0.307028,0.599801,0.670499,0.851692,0.377516,0.212762,0.913356,0.488791,0.918061,0.157185,0.450361,0.992009,0.211336,0.829237,0.0767257,0.550289,0.587919,0.709032,0.325476,0.144795,0.778751,0.796855,0.096937,0.610518,0.946874,0.71094,0.902135,0.936979,0.0819804,0.279885,0.0780109,0.389009,0.879686,0.748509,0.2407,0.257202,0.961272,0.154057,0.745993,0.879333,0.311241,0.196354,0.871341,0.522578,0.0255901,0.948067,0.0728667,0.613509,0.657099,0.398343,0.758305,0.43585,0.195198,0.855242,0.0463675,0.142072,0.566182,0.948503,0.0790507,0.648162,0.228388,0.157062,0.037171,0.108075,0.905571,0.277871,0.365277,0.866843,0.431928,0.11127,0.746176,0.743169,0.307623,0.617517,0.265747,0.333213,0.565584,0.338613,0.946723,0.222683,0.736957,0.705027,0.658533,0.932155,0.560269,0.704901,0.0742268,0.126451,0.653404,0.153278,0.774613,0.881792,0.310339,0.811784,0.989866,0.21591,0.0896555,0.355143,0.082753,0.521583,0.466413,0.828929,0.264752,0.774036,0.446446,0.530499,0.107249,0.0120304,0.869112,0.0539718,0.234714,0.606069,0.758999,0.893247,0.538224,0.319268,0.598148,0.612451,0.445719,0.251551,0.765728,0.220332,0.133343,0.0760671,0.0321166,0.12321,0.291977,0.121772,0.478353,0.37473,0.643355,0.944766,0.203659,0.908108,0.718801,0.650105,0.438607,0.826051,0.662135,0.307719,0.880022,0.896849,0.913788,0.639021,0.790096,0.452011,0.958289,0.388244,0.0644621,0.404008,0.639796,0.83019,0.624341,0.773139,0.906257,0.656457,0.896348,0.198234,0.778229,0.374701,0.572965,0.421585,0.319467,0.776624,0.329692,0.0382681,0.426729,0.768299,0.864319,0.088864,0.0760176,0.744341,0.985713,0.989805,0.383363,0.775809,0.441817,0.341652,0.164053,0.506279,0.74566,0.803849,0.336469,0.370001,0.576988,0.242726,0.0264582,0.473336,0.440961,0.804688,0.848037,0.0139255,0.226272,0.167504,0.790549,0.555965,0.205772,0.217278,0.324263,0.0700906,0.306142,0.400281,0.814432,0.291855,0.390086,0.197794,0.0676643,0.831903,0.539446,0.231718,0.338182,0.285107,0.0355667,0.674651,0.655108,0.612554,0.917378,0.681566,0.0858904,0.358338,0.486253,0.933928,0.372264,0.712526,0.101431,0.162813,0.26849,0.307203,0.380091,0.592753,0.377294,0.686233,0.993034,0.191725,0.978087,0.383121,0.38952,0.0457517,0.215024,0.928966,0.277469,0.553206,0.214073,0.313036,0.227857,0.86918,0.925591,0.145235,0.550746,0.0114811,0.503573,0.0369993,0.945409,0.875837,0.749525,0.0468399,0.0386504,0.018015,0.354043,0.418741,0.610768,0.731337,0.104974,0.603803,0.923062,0.0830612,0.986923,0.312582,0.128813,0.201947,0.241548,0.406282,0.755153,0.45562,0.719319,0.98301,0.3248,0.644909,0.128245,0.875546,0.65639,0.631819,0.912546,0.601799,0.507656,0.66207,0.648639,0.546307,0.680085,0.00268168,0.965048,0.290854,0.734018,0.0700215,0.894656,0.65708,0.153083,0.88158,0.969662,0.281896,0.083527,0.21121,0.688178,0.83868,0.66683,0.407496,0.821691,0.99163,0.0524056,0.949936,0.867176,0.708796,0.581755,0.779722,0.310595,0.0894111,0.441792,0.959233,0.635718,0.121877,0.961915,0.600765,0.412731,0.695933,0.670787,0.307388,0.353014,0.823869,0.188967,0.322676,0.105765,0.272494,0.533885,0.793943,0.111175,0.200715,0.201439,0.932865,0.192345,0.253845,0.882802,0.0595211,0.962641,0.464557,0.839243,0.273236,0.553968,0.281035,0.232469,0.189685,0.402912,0.194384,0.790451,0.815643,0.890318,0.461237,0.123031,0.243331,0.285107,0.311999,0.566007,0.390872,0.584493,0.0998922,0.184815,0.695668,0.300607,0.386254,0.628533,0.492952,0.640099,0.511335,0.552473,0.60274,0.975891,0.391716,0.875975,0.529859,0.672751,0.108444,0.719544,0.075663,0.302829,0.509995,0.891307,0.193146,0.971232,0.0143376,0.436478,0.256339,0.326336,0.00248469,0.647211,0.910829,0.102377,0.832026,0.606497,0.402984,0.21828,0.23503,0.895936,0.858379,0.746365,0.448409,0.461119,0.722256,0.840125,0.337094,0.252115,0.512875,0.445539,0.971659,0.588539,0.748367,0.481654,0.479845,0.941513,0.452887,0.494183,0.377991,0.709226,0.820519,0.380476,0.356437,0.731348,0.482853,0.188463,0.337845,0.885837,0.406743,0.572875,0.781773,0.265122,0.31924,0.230182,0.726241,0.0414967,0.0703064,0.0633351,0.293612,0.583182,0.508874,0.265271,0.17172,0.257241,0.746925,0.651565,0.198754,0.199812,0.145748,0.576746,0.909038,0.966267,0.957221,0.265475,0.697615,0.440074,0.453938,0.0354602,0.325911,0.860681,0.608336,0.107684,0.125803,0.927576,0.337865,0.852044,0.969073,0.408172,0.915379,0.262685,0.991354,0.424253,0.527956,0.163074,0.681494,0.274881,0.81464,0.880248,0.474693,0.960388,0.456994,0.383731,0.926655,0.414215,0.649206,0.62427,0.854289,0.103144,0.65973,0.1802,0.963825,0.268066,0.287884,0.0896277,0.195642,0.625749,0.941672,0.164714,0.0339206,0.857051,0.427399,0.0252742,0.281303,0.955354,0.188348,0.962797,0.230236,0.00298769,0.843045,0.704929,0.963375,0.300039,0.0886603,0.89003,0.714254,0.737866,0.5143,0.568543,0.84101,0.17403,0.748743,0.804835,0.442095,0.0366273,0.894463,0.637737,0.662376,0.836134,0.802451,0.696297,0.693185,0.22985,0.721571,0.974488,0.185204,0.909919,0.937285,0.41544,0.912907,0.78033,0.120369,0.876282,0.0803688,0.20903,0.766312,0.794623,0.946896,0.280612,0.363166,0.787906,0.454642,0.111909,0.592741,0.896738,0.148537,0.487204,0.534475,0.810913,0.323338,0.336926,0.50721,0.0165226,0.566776,0.228781,0.991011,0.75198,0.1387,0.928296,0.16742,0.0516071,0.708626,0.28779,0.927889,0.788994,0.496819,0.694202,0.583617,0.443715,0.974814,0.946783,0.231621,0.429456,0.0586922,0.824362,0.326194,0.207229,0.311566,0.860668,0.0181416,0.634904,0.197594,0.525351,0.651426,0.76437,0.754132,0.642437,0.51635,0.892832,0.570732,0.683771,0.944439,0.279358,0.971561,0.872329,0.0683526,0.46838,0.566531,0.65197,0.912095,0.541345,0.598753,0.143716,0.970801,0.657445,0.968078,0.296995,0.864674,0.279644,0.157663,0.882815,0.914548,0.355258,0.408167,0.565974,0.119628,0.162299,0.208411,0.635978,0.0551309,0.779143,0.319749,0.99957,0.0585016,0.29131,0.871899,0.126854,0.75969,0.43843,0.778824,0.671785,0.979774,0.377577,0.815501,0.950575,0.0350216,0.783579,0.24757,0.899695,0.0632235,0.405233,0.782511,0.977771,0.760491,0.190677,0.543746,0.880119,0.352976,0.752156,0.516097,0.408107,0.5313,0.835846,0.407677,0.589801,0.127156,0.279576,0.716656,0.886846,0.718006,0.49548,0.55863,0.69778,0.873056,0.374131,0.648355,0.908078,0.157711,0.895925,0.807773,0.220934,0.301159,0.590284,0.198705,0.0616496,0.780961,0.742451,0.941768,0.133937,0.494608,0.457865,0.542044,0.0259074,0.293712,0.949721,0.615709,0.420868,0.229297,0.332365,0.307714,0.947302,0.827844,0.866344,0.645082,0.7009,0.240475,0.293438,0.608978,0.398186,0.189363,0.416751,0.61912,0.490522,0.00703524,0.817825,0.552171,0.787996,0.560277,0.49394,0.921933,0.054884,0.951805,0.463977,0.0807914,0.245517,0.413697,0.6965,0.666385,0.642994,0.0288647,0.974099,0.590296,0.856709,0.840443,0.235378,0.557609,0.0809181,0.528816,0.166588,0.479104,0.718179,0.583339,0.098224,0.2087,0.590374,0.916049,0.760872,0.37837,0.476326,0.254811,0.300303,0.53121,0.206616,0.76428,0.612001,0.452133,0.177977,0.308502,0.118518,0.820971,0.337366,0.0926166,0.411266,0.194075,0.933059,0.646645,0.751684,0.0139775,0.17546,0.918272,0.493082,0.893639,0.501611,0.591306,0.102339,0.0919853,0.507355,0.863211,0.470356,0.983681,0.118022,0.770659,0.514891,0.324638,0.534939,0.126892,0.776772,0.712916,0.435394,0.89529,0.533887,0.77276,0.987906,0.945153,0.966835,0.920966,0.591798,0.718519,0.934943,0.767258,0.636791,0.428025,0.660897,0.138402,0.0193303,0.763236,0.230388,0.526685,0.626447,0.700743,0.510366,0.744469,0.471403,0.0252571,0.0691076,0.00634169,0.152149,0.845879,0.719258,0.587543,0.741169,0.253144,0.360303,0.729075,0.198297,0.327138,0.650041,0.790095,0.0456571,0.584984,0.557353,0.682449,0.0130085,0.21825,0.820851,0.0323388,0.981486,0.0512387,0.559024,0.607933,0.751982,0.0693903,0.352402,0.223385,0.0946474,0.421509,0.229726,0.246797,0.267389,0.948984,0.834339,0.00855737,0.202129,0.194642,0.737632,0.400426,0.52178,0.387673,0.190521,0.567437,0.972657,0.747874,0.249885,0.985665,0.966123,0.0707363,0.0180041,0.947609,0.121975,0.577028,0.555542,0.873957,0.646419,0.907943,0.097342,0.741066,0.329453,0.327068,0.987862,0.596841,0.276053,0.822202,0.605399,0.478181,0.0168437,0.343031,0.878607,0.538623,0.730704,0.0691285,0.10606,0.703361,0.817002,0.355946,0.689026,0.783126,0.426682,0.70703,0.730735,0.548657,0.284059,0.286276,0.422614,0.930477,0.19422,0.519956,0.671543,0.523673,0.847025,0.659405,0.120514,0.123077,0.481607,0.725913,0.601259,0.498451,0.0689439,0.479866,0.0370744,0.799648,0.548994,0.143135,0.503009,0.365997,0.49908,0.192035,0.149123,0.925762,0.899066,0.879857,0.474419,0.183125,0.166134,0.897033,0.113602,0.360353,0.416989,0.785145,0.884026,0.264013,0.44455,0.00453983,0.387091,0.926157,0.730453,0.988349,0.424608,0.799397,0.468215,0.461683,0.599045,0.0172096,0.604817,0.102054,0.383206,0.103897,0.294089,0.532329,0.0296592,0.193155,0.412186,0.504078,0.37628,0.57832,0.401111,0.489881,0.938673,0.818099,0.275026,0.822699,0.0821128,0.719576,0.827239,0.469203,0.645734,0.557691,0.457552,0.0703418,0.357088,0.925767,0.532025,0.956133,0.942977,0.136842,0.0581863,0.326183,0.240739,0.352276,0.858512,0.270398,0.545431,0.270699,0.774476,0.92171,0.849018,0.175587,0.411591,0.787692,0.993686,0.686617,0.610391,0.0757991,0.406194,0.437629,0.545002,0.051927,0.995321,0.00255482,0.122269,0.352409,0.928322,0.654293,0.308541,0.871299,0.791135,0.366728,0.197483,0.0318741,0.719003,0.0559953,0.302272,0.264434,0.326694,0.0767485,0.186144,0.175712,0.252335,0.597735,0.963404,0.246022,0.284353,0.573795,0.321821,0.690546,0.0114241,0.866823,0.742473,0.00674499,0.869378,0.864742,0.359154,0.7977,0.519036,0.667695,0.669,0.310171,0.0344229,0.866482,0.342045,0.753426,0.922478,0.644317,0.0178599,0.249172,0.721066,0.204004,0.424884,0.973401,0.801739,0.388288,0.219422,0.086092,0.962083,0.541243,0.776638,0.973507,0.408066,0.519112,0.980252,0.277444,0.383854,0.339406,0.0751441,0.902889,0.00710094,0.744144,0.21306,0.0415239,0.610626,0.555105,0.79495,0.533104,0.199422,0.81281,0.782276,0.920487,0.0168139,0.20716,0.893888,0.818553,0.595448,0.11331,0.904645,0.557531,0.654553,0.681283,0.531038,0.0626193,0.200395,0.51129,0.340063,0.584248,0.850695,0.415207,0.487138,0.857796,0.159351,0.700197,0.89932,0.769977,0.255302,0.69427,0.303081,0.454724,0.50708,0.0853563,0.375211,0.523894,0.292516,0.269099,0.342447,0.887964,0.382409,0.247092,0.445494,0.0369622,0.928375,0.976532,0.0995815,0.12877,0.487822,0.439645,0.713018,0.338517,0.854852,0.200156,0.196313,0.0142026,0.900353,0.0956331,0.78418,0.155655,0.789903,0.0872602,0.610379,0.296983,0.172617,0.98559,0.820877,0.465132,0.254689,0.163324,0.353096,0.637098,0.410416,0.798591,0.67406,0.338791,0.775123,0.773641,0.467561,0.262944,0.213286,0.18058,0.601461,0.0681376,0.380736,0.797774,0.0823402,0.281089,0.893407,0.86652,0.436745,0.68331,0.95378,0.0471239,0.980294,0.126397,0.0327138,0.801171,0.591529,0.287402,0.964495,0.944625,0.9245,0.374911,0.743216,0.59856,0.713702,0.518338,0.372201,0.181264,0.781282,0.585487,0.361844,0.382743,0.653624,0.74258,0.180518,0.735965,0.0236691,0.0739247,0.602484,0.460414,0.757235,0.556264,0.507538,0.737529,0.682661,0.540252,0.538699,0.27419,0.827654,0.503194,0.218815,0.752154,0.878105,0.962031,0.350714,0.591808,0.480369,0.722915,0.773072,0.261652,0.308402,0.134915,0.644395,0.962026,0.877495,0.824913,0.697991,0.901164,0.898837,0.300475,0.361578,0.656072,0.856739,0.869116,0.393601,0.5394,0.409368,0.9323,0.81359,0.237022,0.435494,0.0324051,0.989176,0.3136,0.994436,0.33989,0.905407,0.474805,0.0628049,0.678479,0.736456,0.371206,0.813395,0.380851,0.333232,0.69089,0.205764,0.0312229,0.592054,0.104601,0.331698,0.953633,0.760673,0.188437,0.822749,0.154274,0.727838,0.232117,0.0865743,0.541428,0.469139,0.522069,0.573833,0.458316,0.835668,0.568268,0.798206,0.741076,0.0430732,0.861011,0.419555,0.77953,0.232217,0.232949,0.160381,0.565449,0.923839,0.366145,0.596672,0.515893,0.470746,0.92837,0.469526,0.231419,0.116807,0.292275,0.385693,0.844645,0.524392,0.472268,0.386072,0.993531,0.994337,0.959905,0.451847,0.830005,0.528174,0.250052,0.571081,0.571247,0.111063,0.990636,0.350776,0.34328,0.223585,0.511157,0.908729,0.147424,0.877302,0.505402,0.663318,0.348048,0.433772,0.132843,0.579467,0.550579,0.425118,0.965161,0.395224,0.94951,0.437429,0.781296,0.943041,0.431765,0.741202,0.394888,0.26177,0.269375,0.64494,0.832851,0.840622,0.756003,0.823487,0.191398,0.0992834,0.047072,0.702555,0.00801283,0.194496,0.579858,0.513415,0.857814,0.927906,0.947186,0.990657,0.507373,0.497765,0.415776,0.472534,0.892989,0.365286,0.909963,0.674286,0.308327,0.341728,0.415487,0.703215,0.603499,0.684862,0.348155,0.43635,0.525484,0.104158,0.259837,0.716882,0.203442,0.306909,0.419438,0.211455,0.501405,0.999295,0.724869,0.359219,0.927201,0.672055,0.349876,0.434574,0.169821,0.765652,0.907108,0.0628102,0.130938,0.817071,0.737096,0.439264,0.1588,0.152583,0.142479,0.762298,0.837446,0.490634,0.198648,0.36293,0.594792,0.458485,0.0798121,0.798234,0.765394,0.49925,0.00968889,0.2668,0.498545,0.734558,0.626018,0.425746,0.406613,0.975895,0.86032,0.576434,0.741547,0.767428,0.639244,0.872484,0.5845,0.37634,0.311748,0.743299,0.528924,0.454228,0.505598,0.366369,0.944862,0.704246,0.729299,0.539654,0.162732,0.809111,0.337888,0.928126,0.308361,0.347577,0.194925,0.806906,0.0821353,0.820944,0.232652,0.488749,0.796839,0.0929719,0.0651828,0.538385,0.8604,0.704427,0.410869,0.4449,0.0807673,0.722618,0.188199,0.609691,0.176845,0.693797,0.97606,0.121707,0.398043,0.705359,0.661361,0.560775,0.51447,0.999249,0.488901,0.822831,0.346827,0.683826,0.629737,0.428962,0.50477,0.862388,0.917711,0.301609,0.95536,0.982894,0.839994,0.815761,0.687321,0.250863,0.260661,0.768088,0.973481,0.44886,0.377779,0.150326,0.142657,0.353839,0.272033,0.5407,0.0591977,0.933394,0.101474,0.573668,0.932643,0.590375,0.396498,0.27947,0.274201,0.0262349,0.708432,0.778971,0.888623,0.626143,0.08058,0.843983,0.609037,0.920574,0.659744,0.296357,0.171437,0.920405,0.064445,0.144918,0.369264,0.442224,0.295244,0.511921,0.796063,0.567277,0.0526209,0.85526,0.50067,0.154095,0.428928,0.433314,0.74447,0.825426,0.712784,0.0186716,0.851661,0.421216,0.797643,0.740284,0.0473588,0.878223,0.584268,0.656395,0.798797,0.244012,0.952752,0.970234,0.164417,0.0171975,0.115152,0.533681,0.459421,0.410396,0.0456021,0.255484,0.977673,0.0982231,0.110744,0.478343,0.252318,0.539672,0.911657,0.996789,0.365098,0.624441,0.0154605,0.216759,0.0456565,0.813103,0.957044,0.0930153,0.691326,0.541312,0.749411,0.490123,0.785323,0.702163,0.460357,0.94974,0.71936,0.575509,0.483421,0.178782,0.985905,0.529023,0.434265,0.963578,0.627246,0.545009,0.441921,0.879565,0.0846814,0.353577,0.876354,0.449779,0.978018,0.891814,0.666539,0.0236744,0.704917,0.623582,0.11669,0.396244,0.164894,0.8661,0.886367,0.950217,0.568263,0.346724,0.899957,0.287624,0.922233,0.383378,0.466405,0.908138,0.912401,0.900671,0.871716,0.539648,0.44568,0.313636,0.419212,0.530362,0.667214,0.295566,0.980141,0.645231,0.18738,0.64668,0.668906,0.892297,0.270262,0.785595,0.288541,0.435156,0.651696,0.174908,0.385374,0.219959,0.521632,0.285331,0.507583,0.443865,0.66871,0.973988,0.352003,0.581111,0.874659,0.223719,0.120758,0.32034,0.537355,0.539971,0.850701,0.204568,0.835536,0.830843,0.8498,0.0229158,0.477523,0.518706,0.915213,0.747785,0.304301,0.203754,0.182942,0.955997,0.378661,0.568315,0.175956,0.900293,0.853647,0.683539,0.344158,0.522356,0.657527,0.696161,0.103467,0.532186,0.919879,0.224225,0.852526,0.457234,0.764196,0.703227,0.661802,0.599732,0.53407,0.511602,0.622648,0.0115928,0.0303081,0.537861,0.759378,0.334609,0.741614,0.94232,0.290606,0.120275,0.510635,0.466562,0.0205682,0.364281,0.150101,0.364726,0.886637,0.807628,0.0608863,0.990104,0.339814,0.980766,0.21433,0.19234,0.437999,0.978526,0.895568,0.0998019,0.578258,0.429638,0.611404,0.200906,0.44123,0.641712,0.738767,0.200608,0.976322,0.480381,0.142928,0.266928,0.600656,0.653563,0.73349,0.621225,0.0178441,0.883591,0.98595,0.904481,0.69122,0.0468368,0.894586,0.0310339,0.0276023,0.108915,0.223374,0.465602,0.0874412,0.118942,0.565404,0.665699,0.548579,0.176808,0.866605,0.98981,0.81852,0.605371,0.190418,0.794842,0.0857523,0.333346,0.0617701,0.686409,0.986908,0.79526,0.307633,0.00475232,0.678852,0.293584,0.909234,0.370071,0.340421,0.80382,0.401105,0.368023,0.912735,0.624479,0.833625,0.000176232,0.743421,0.399028,0.665875,0.292,0.575836,0.53248,0.28181,0.394357,0.137851,0.472228,0.189199,0.223604,0.805573,0.250969,0.910012,0.792481,0.046229,0.217646,0.797234,0.725081,0.51123,0.706468,0.0951523,0.85165,0.510287,0.496258,0.219673,0.423022,0.120737,0.0532975,0.423198,0.864158,0.452326,0.0890737,0.156158,0.0281618,0.621554,0.437968,0.422518,0.759405,0.910196,0.611717,0.983009,0.715769,0.862686,0.893021,0.50825,0.908915,0.110667,0.305484,0.633995,0.621897,0.0119517,0.729148,0.473547,0.522239,0.225405,0.69322,0.945261,0.346142,0.746517,0.36846,0.2103,0.198843,0.457533,0.366458,0.227005,0.0790869,0.804426,0.649523,0.838492,0.714622,0.26124,0.821501,0.430391,0.123926,0.714522,0.938641,0.0328403,0.825189,0.244125,0.666836,0.447086,0.256077,0.395983,0.920633,0.778316,0.621389,0.613853,0.723577,0.967531,0.360371,0.0920363,0.177831,0.559214,0.54957,0.544289,0.786218,0.628657,0.348716,0.435742,0.467149,0.0633376,0.696982,0.288649,0.493728,0.820908,0.00317174,0.43237,0.853748,0.828361,0.676495,0.520584,0.275448,0.932572,0.916567,0.196081,0.710887,0.537956,0.809934,0.434464,0.505487,0.170305,0.5265,0.683318,0.729518,0.07607,0.227607,0.515737,0.704727,0.576323,0.951478,0.171875,0.63966,0.64846,0.460525,0.133389,0.469368,0.463696,0.565758,0.323116,0.292058,0.242253,0.843699,0.567505,0.174825,0.760266,0.763586,0.885712,0.298222,0.57352,0.320176,0.803709,0.743824,0.846677,0.487027,0.473343,0.922747,0.714634,0.989079,0.627473,0.290957,0.940557,0.799348,0.930617,0.589018,0.259873,0.0640061,0.0583854,0.723569,0.629764,0.381501,0.0156266,0.872018,0.2252,0.583132,0.0468422,0.985467,0.346717,0.932554,0.283689,0.920237,0.25273,0.0873985,0.664062,0.099407,0.574426,0.137404,0.0221536,0.28906,0.126484,0.649627,0.580017,0.067041,0.448975,0.510634,0.656059,0.708848,0.57464,0.714444,0.432417,0.204404,0.095945,0.448043,0.076422,0.321145,0.0311749,0.123264,0.306612,0.377892,0.0558184,0.590301,0.29813,0.308549,0.6777,0.962192,0.407956,0.252125,0.0995959,0.430109,0.541185,0.226079,0.0797361,0.121202,0.29312,0.528711,0.631835,0.949179,0.237559,0.206475,0.663623,0.669975,0.41088,0.759568,0.118019,0.487302,0.0807133,0.149194,0.610566,0.387325,0.527086,0.666384,0.977627,0.825216,0.974933,0.655327,0.787407,0.382889,0.907452,0.887003,0.812999,0.448637,0.113083,0.892735,0.569838,0.406203,0.421446,0.201674,0.355382,0.659005,0.408149,0.0190051,0.32898,0.819029,0.778573,0.446999,0.306331,0.859286,0.596192,0.916897,0.246612,0.123278,0.583281,0.224239,0.948494,0.558214,0.879565,0.735901,0.941104,0.787017,0.622904,0.754102,0.235654,0.735987,0.646837,0.805493,0.14219,0.0682825,0.00716645,0.497572,0.727287,0.415316,0.516577,0.0562669,0.234345,0.29515,0.503266,0.540675,0.154436,0.0994577,0.457572,0.401048,0.222736,0.0408533,0.625287,0.17123,0.599068,0.504852,0.907131,0.540171,0.291869,0.530035,0.294273,0.527524,0.266022,0.94111,0.333016,0.408212,0.00939249,0.340183,0.905784,0.736679,0.755498,0.422361,0.792946,0.989843,0.717511,0.296212,0.530518,0.871947,0.39567,0.988091,0.272995,0.618405,0.028944,0.898282,0.789635,0.628012,0.403134,0.696766,0.168183,0.695004,0.226801,0.462456,0.222527,0.492823,0.403566,0.555544,0.901035,0.412959,0.895726,0.806819,0.149638,0.651225,0.22918,0.942585,0.641068,0.94669,0.238796,0.171586,0.818637,0.634466,0.159677,0.0916325,0.252871,0.188621,0.989915,0.0425067,0.816633,0.393049,0.739273,0.984816,0.0880529,0.966074,0.447272,0.31058,0.458897,0.850839,0.866124,0.359933,0.263798,0.761851,0.166751,0.413436,0.413075,0.395931,0.356021,0.0541433,0.342621,0.594817,0.22573,0.161258,0.229283,0.385407,0.252891,0.482154,0.574028,0.242806,0.524661,0.39066,0.635855,0.263934,0.375476,0.723908,0.230008,0.822749,0.034488,0.688905,0.673587,0.900612,0.048838,0.937385,0.662463,0.215589,0.350821,0.0755382,0.611521,0.706841,0.129681,0.954142,0.301658,0.355411,0.1154,0.530941,0.740818,0.368291,0.0130954,0.314845,0.611097,0.537756,0.705506,0.246952,0.80169,0.0809823,0.970859,0.031698,0.903731,0.00534722,0.720603,0.577318,0.905959,0.769441,0.514703,0.568422,0.985031,0.865524,0.64396,0.596551,0.572365,0.773642,0.550693,0.874023,0.129053,0.666093,0.404965,0.869871,0.0343843,0.41806,0.184716,0.645481,0.955816,0.890222,0.892433,0.757506,0.971204,0.863292,0.789204,0.874935,0.868639,0.509808,0.452254,0.774598,0.279249,0.966957,0.343021,0.26428,0.832481,0.986981,0.860831,0.404846,0.760623,0.411524,0.27887,0.889676,0.0776173,0.683834,0.759547,0.112002,0.101894,0.944263,0.757483,0.0577104,0.834485,0.649915,0.815217,0.80569,0.513207,0.604421,0.680625,0.381846,0.114229,0.132879,0.156445,0.393478,0.0998357,0.499465,0.657757,0.932316,0.486446,0.518588,0.337162,0.247069,0.930113,0.616032,0.136746,0.00772985,0.299866,0.896292,0.119731,0.40176,0.840556,0.877214,0.459471,0.675041,0.527129,0.274687,0.480731,0.0403366,0.879109,0.161356,0.422183,0.993337,0.294235,0.578627,0.386815,0.39407,0.0780927,0.0445725,0.326387,0.564539,0.563161,0.663549,0.811609,0.493274,0.279581,0.948354,0.501004,0.579447,0.844647,0.620735,0.981208,0.685202,0.497949,0.440678,0.360243,0.0250784,0.715366,0.840974,0.065415,0.594474,0.00232964,0.487598,0.587812,0.296564,0.0662252,0.974627,0.690635,0.144318,0.0191994,0.0170218,0.708857,0.58236,0.680571,0.520466,0.0756341,0.960153,0.46882,0.576638,0.5396,0.313466,0.197372,0.520808,0.998669,0.695321,0.961486,0.358912,0.7204,0.676852,0.199885,0.785815,0.271326,0.202215,0.273413,0.859138,0.498779,0.339638,0.833765,0.189414,0.483956,0.852964,0.206436,0.192813,0.435325,0.887007,0.713279,0.510959,0.84716,0.182099,0.0875962,0.38676,0.495565,0.284969,0.907568,0.494234,0.98029,0.869054,0.853145,0.70069,0.545905,0.0530306,0.486505,0.817232,0.255246,0.759918,0.67637,0.754025,0.0995555,0.510134,0.943439,0.583511,0.363099,0.149876,0.776324,0.798423,0.0368828,0.489603,0.309382,0.884043,0.671702,0.396978,0.270803,0.167267,0.681947,0.17837,0.6615,0.662237,0.0474237,0.514646,0.362927,0.593329,0.567676,0.849432,0.410561,0.822922,0.609349,0.0869303,0.576947,0.708905,0.597065,0.520386,0.292416,0.960163,0.670262,0.0687406,0.758586,0.707144,0.558344,0.0679681,0.591187,0.230045,0.464946,0.86199,0.397312,0.146893,0.04036,0.0588123,0.80913,0.0877838,0.573458,0.172056,0.681113,0.141134,0.0214882,0.0916736,0.964056,0.630838,0.178604,0.541003,0.339742,0.775669,0.0613887,0.632159,0.735832,0.73165,0.700899,0.494418,0.438795,0.259243,0.562386,0.0299821,0.489288,0.0273323,0.891972,0.8866,0.174225,0.932332,0.945413,0.983355,0.0201158,0.51887,0.155411,0.701229,0.660004,0.1769,0.792902,0.62406,0.807737,0.971506,0.165063,0.14748,0.747175,0.226451,0.779638,0.483006,0.958102,0.480538,0.977424,0.396896,0.739781,0.539811,0.426879,0.229069,0.567143,0.318851,0.115669,0.741368,0.251183,0.061082,0.724723,0.271298,0.579952,0.880134,0.972527,0.239957,0.0570338,0.765429,0.864017,0.864771,0.736936,0.0290795,0.0122505,0.48411,0.255531,0.791889,0.967117,0.213632,0.272426,0.944541,0.610529,0.012207,0.484352,0.0374074,0.241276,0.0514945,0.356258,0.356945,0.792862,0.607441,0.418027,0.517585,0.878739,0.99798,0.39772,0.851266,0.237937,0.454753,0.616696,0.101954,0.319524,0.353631,0.131033,0.331775,0.837742,0.386564,0.123663,0.804859,0.600196,0.39609,0.7494,0.210725,0.408297,0.233751,0.248132,0.649573,0.285246,0.60439,0.00651834,0.0781082,0.211831,0.424546,0.595693,0.09057,0.422526,0.993413,0.941836,0.660462,0.448166,0.558532,0.762416,0.76769,0.912163,0.893449,0.0994651,0.749905,0.280013,0.223129,0.554764,0.880209,0.619218,0.304163,0.0909335,0.0275154,0.537915,0.339066,0.677088,0.82316,0.943456,0.683607,0.901269,0.155287,0.108152,0.496962,0.245857,0.530678,0.490375,0.187693,0.19114,0.938541,0.746225,0.953556,0.706232,0.658389,0.847005,0.805697,0.408294,0.127018,0.0288255,0.963058,0.00722657,0.648044,0.267221,0.0981601,0.675559,0.805136,0.437226,0.352648,0.628296,0.380682,0.0362543,0.529565,0.535969,0.144407,0.0265267,0.781826,0.675085,0.516902,0.969519,0.866225,0.455443,0.715744,0.819781,0.161675,0.374133,0.666786,0.967372,0.782427,0.793804,0.996197,0.745484,0.801031,0.644241,0.0127056,0.899191,0.319801,0.817841,0.336417,0.672448,0.446137,0.717099,0.708703,0.975702,0.253067,0.853109,0.0022287,0.0348932,0.528194,0.51913,0.00441207,0.394419,0.974573,0.720156,0.2142,0.136248,0.0942891,0.880987,0.10362,0.876716,0.674791,0.0998172,0.6222,0.475822,0.744058,0.634906,0.375013,0.063859,0.452747,0.711429,0.736307,0.898885,0.428528,0.44501,0.874587,0.681596,0.298119,0.876815,0.716489,0.826313,0.395946,0.720901,0.220732,0.370519,0.441057,0.434932,0.506768,0.535346,0.315919,0.610388,0.412062,0.99071,0.710205,0.0342626,0.466532,0.454263,0.669169,0.841545,0.518122,0.121916,0.552974,0.254429,0.0208005,0.981502,0.699439,0.895387,0.663098,0.997558,0.772203,0.379587,0.823871,0.168148,0.100488,0.044603,0.538668,0.541545,0.479535,0.0454351,0.076891,0.795455,0.655823,0.488953,0.786165,0.366027,0.523216,0.252697,0.820291,0.192384,0.0942411,0.338413,0.3143,0.647215,0.592842,0.3351,0.628717,0.292281,0.230488,0.291815,0.289839,0.00269016,0.671402,0.11371,0.170839,0.77189,0.158313,0.709506,0.313434,0.637848,0.754941,0.390325,0.433303,0.410764,0.879278,0.219468,0.776791,0.402494,0.472164,0.597082,0.594878,0.566405,0.935494,0.909178,0.21362,0.528336,0.244279,0.842338,0.820617,0.474766,0.134153,0.110456,0.477457,0.805555,0.224166,0.648295,0.577445,0.382479,0.357801,0.890879,0.0203274,0.112742,0.281204,0.45363,0.523506,0.160483,0.673098,0.300298,0.562977,0.145262,0.89738,0.157855,0.711667,0.832874,0.0670335,0.925287,0.361211,0.311312,0.767625,0.181828,0.786079,0.901778,0.292284,0.263535,0.707333,0.516451,0.91183,0.284777,0.89893,0.269631,0.175656,0.919257,0.382374,0.456861,0.372887,0.90588,0.617343,0.045985,0.206178,0.18032,0.191247,0.103557,0.338175,0.902914,0.936431,0.405209,0.828201,0.297642,0.716521,0.595826,0.47947,0.5026,0.497603,0.771754,0.766135,0.204936,0.288205,0.677965,0.489713,0.187134,0.947596,0.66537,0.106391,0.32997,0.12223,0.479279,0.23585,0.739573,0.525264,0.442028,0.919894,0.71651,0.545586,0.258069,0.619424,0.482017,0.663277,0.447625,0.779659,0.379798,0.0434506,0.259129,0.882398,0.541054,0.0308834,0.648533,0.74599,0.319088,0.326497,0.235704,0.506223,0.274094,0.901073,0.612614,0.604064,0.0233035,0.0918925,0.839914,0.762877,0.617156,0.281943,0.68277,0.333667,0.827528,0.940839,0.953091,0.309545,0.604117,0.400716,0.0892046,0.983915,0.444166,0.348334,0.866313,0.98522,0.379217,0.514846,0.73121,0.698305,0.841343,0.966914,0.204528,0.115437,0.867987,0.817142,0.719501,0.891291,0.909034,0.559415,0.654168,0.52619,0.841358,0.336938,0.859857,0.668886,0.277777,0.812947,0.978431,0.881894,0.213663,0.0676358,0.865809,0.657829,0.41597,0.732122,0.643049,0.795187,0.246968,0.37426,0.493492,0.0883112,0.341174,0.69802,0.203748,0.209161,0.515162,0.923249,0.100452,0.424196,0.482664,0.75462,0.950386,0.324021,0.0915579,0.810243,0.992907,0.369335,0.62319,0.971338,0.25123,0.836853,0.0389741,0.117039,0.494683,0.454944,0.849161,0.137732,0.25013,0.0961293,0.511992,0.743622,0.18444,0.853166,0.441642,0.388189,0.0623269,0.956804,0.311437,0.162779,0.381,0.794101,0.917398,0.331386,0.118123,0.00895626,0.141629,0.11103,0.378292,0.764819,0.0823681,0.629521,0.601672,0.121342,0.74656,0.096355,0.576286,0.595721,0.234087,0.826416,0.691851,0.746079,0.570039,0.876291,0.599245,0.0116812,0.26448,0.661572,0.968485,0.575917,0.82435,0.349485,0.370019,0.741749,0.680871,0.488141,0.750705,0.8225,0.599171,0.128997,0.587319,0.681539,0.758518,0.188991,0.802882,0.505078,0.285346,0.379167,0.100799,0.519433,0.205584,0.79265,0.265512,0.775623,0.668941,0.864756,0.787304,0.933421,0.526328,0.755789,0.509338,0.350678,0.105274,0.879357,0.0924272,0.786145,0.367498,0.843132,0.608644,0.966669,0.972129,0.195963,0.648208,0.730647,0.384954,0.45109,0.235725,0.6703,0.830257,0.336524,0.189733,0.0358412,0.129174,0.455244,0.811464,0.798115,0.320001,0.598768,0.731536,0.846329,0.354556,0.240874,0.197007,0.45983,0.120231,0.289435,0.245975,0.487729,0.132567,0.854619,0.454398,0.104695,0.0505819,0.102607,0.835342,0.435536,0.553697,0.0710666,0.105835,0.383954,0.40759,0.295568,0.419795,0.536764,0.750812,0.231259,0.334879,0.0708133,0.830027,0.0664149,0.917142,0.184583,0.307289,0.11415,0.644413,0.42752,0.403584,0.890388,0.915248,0.536151,0.745007,0.369646,0.640846,0.795589,0.472253,0.476188,0.231125,0.0259495,0.547255,0.33696,0.409904,0.954845,0.632528,0.829699,0.49161,0.383341,0.0609578,0.826489,0.454154,0.890984,0.892904,0.371296,0.0755673,0.200192,0.485446,0.719981,0.627712,0.88903,0.610369,0.54296,0.425181,0.355376,0.912607,0.0660271,0.150966,0.38486,0.542216,0.382091,0.410809,0.0894706,0.719051,0.820713,0.0443161,0.351579,0.650412,0.535926,0.73492,0.711369,0.362414,0.189074,0.602354,0.255318,0.56037,0.677921,0.45551,0.0458154,0.397902,0.0832225,0.934845,0.00827039,0.626183,0.360026,0.363647,0.53879,0.426053,0.514612,0.923649,0.968269,0.896703,0.334459,0.0577391,0.615753,0.155171,0.102055,0.967333,0.805583,0.637981,0.702252,0.516952,0.000395435,0.891326,0.119306,0.255713,0.451695,0.797227,0.711224,0.497511,0.195129,0.794446,0.432356,0.203399,0.420629,0.792382,0.567046,0.959419,0.218435,0.0816579,0.883068,0.186704,0.978361,0.217527,0.244443,0.594114,0.372698,0.346498,0.561447,0.178281,0.984479,0.263699,0.695234,0.984874,0.155025,0.81454,0.240588,0.60672,0.611767,0.951812,0.104231,0.806896,0.746258,0.536587,0.0102952,0.166887,0.328969,0.577341,0.126306,0.547404,0.658999,0.00937469,0.734107,0.637359,0.226902,0.97855,0.231473,0.5996,0.325048,0.79292,0.777881,0.309527,0.0566186,0.473115,0.294401,0.211643,0.287656,0.534989,0.818363,0.899423,0.486801,0.922594,0.706319,0.233059,0.459181,0.716614,0.399947,0.788149,0.293955,0.526253,0.335553,0.952954,0.535628,0.0696605,0.590313,0.762529,0.0482106,0.821786,0.362129,0.373259,0.614706,0.140011,0.682786,0.671325,0.613126,0.977187,0.882968,0.900782,0.512176,0.701331,0.800205,0.998977,0.623925,0.506523,0.232036,0.0831056,0.223138,0.631983,0.871255,0.517093,0.158236,0.206808,0.470046,0.693864,0.276469,0.0603594,0.456393,0.324679,0.882146,0.818523,0.697938,0.496852,0.958534,0.380723,0.168177,0.57166,0.35791,0.0511453,0.472442,0.870086,0.752477,0.272646,0.869064,0.376401,0.77917,0.1011,0.459507,0.00230738,0.733083,0.330762,0.5194,0.891319,0.53757,0.989446,0.585183,0.814039,0.0498057,0.0415763,0.138718,0.931952,0.860099,0.836655,0.428804,0.818633,0.217379,0.596981,0.390292,0.575289,0.648126,0.862734,0.445376,0.400603,0.13538,0.314439,0.777004,0.91455,0.415539,0.236511,0.916858,0.148622,0.567273,0.436258,0.0399411,0.104843,0.425704,0.625124,0.918882,0.47551,0.6667,0.0575996,0.407461,0.526799,0.894255,0.836265,0.345432,0.111634,0.433246,0.735724,0.686923,0.0813722,0.598459,0.132299,0.481975,0.733839,0.446738,0.258979,0.648389,0.862277,0.495491,0.565247,0.0108994,0.0627639,0.0015043,0.0508405,0.167607,0.427208,0.675965,0.0864891,0.902718,0.342665,0.144089,0.310179,0.869464,0.0383438,0.146444,0.214896,0.149978,0.57969,0.950621,0.836901,0.661062,0.549079,0.9692,0.143037,0.282918,0.415938,0.402016,0.931307,0.278215,0.897507,0.496554,0.289114,0.960271,0.498058,0.339955,0.127878,0.925266,0.0159196,0.214367,0.827984,0.358584,0.358456,0.138163,0.228049,0.3968,0.284606,0.442945,0.546777,0.864296,0.393566,0.383678,0.525358,0.942645,0.352878,0.668395,0.225563,0.768815,0.0704106,0.15687,0.0470305,0.967917,0.653424,0.336145,0.928188,0.151483,0.6761,0.0560657,0.076749,0.692019,0.270433,0.904733,0.0506039,0.628888,0.0428958,0.278653,0.025688,0.327502,0.721598,0.572465,0.191798,0.115163,0.956143,0.717156,0.0578083,0.309021,0.38555,0.283371,0.0778365,0.455961,0.440241,0.124867,0.423878,0.0936657,0.461012,0.352066,0.245148,0.137112,0.408132,0.321897,0.829131,0.678565,0.22663,0.879735,0.307453,0.269526,0.158388,0.333141,0.597028,0.879985,0.905606,0.788827,0.995149,0.861749,0.505982,0.0529572,0.17077,0.891533,0.336328,0.248607,0.347494,0.77657,0.373474,0.771372,0.870236,0.834486,0.123438,0.115384,0.971598,0.53157,0.437281,0.800729,0.210135,0.663911,0.680464,0.517588,0.933437,0.838851,0.850729,0.530466,0.718837,0.756335,0.319292,0.713986,0.618085,0.825275,0.766943,0.788855,0.716808,0.103271,0.037462,0.0643016,0.879841,0.410936,0.835674,0.750077,0.245422,0.959112,0.865461,0.217019,0.490683,0.302742,0.017748,0.700818,0.966653,0.698212,0.218405,0.900091,0.537063,0.0691344,0.430557,0.2559,0.825469,0.749849,0.969886,0.443554,0.575123,0.736829,0.232409,0.291931,0.8401,0.269871,0.356233,0.719941,0.680807,0.191907,0.470018,0.926229,0.151019,0.335479,0.143248,0.641702,0.638221,0.160996,0.342519,0.604874,0.859208,0.560925,0.504965,0.396271,0.630059,0.935521,0.652171,0.455529,0.68537,0.622056,0.899083,0.260494,0.358885,0.131492,0.552425,0.198985,0.401363,0.908657,0.918926,0.0821702,0.100564,0.388945,0.00839915,0.251583,0.724423,0.151647,0.893285,0.362644,0.312644,0.235804,0.967518,0.171851,0.796729,0.472483,0.568122,0.426788,0.408004,0.220293,0.882317,0.0933744,0.842349,0.7814,0.353868,0.201234,0.912892,0.906293,0.400219,0.314255,0.81495,0.319146,0.396425,0.915514,0.70809,0.404824,0.167097,0.432514,0.556472,0.0603824,0.795158,0.869115,0.296187,0.762676,0.0409668,0.092916,0.235159,0.609089,0.519704,0.643164,0.829382,0.402022,0.736538,0.671732,0.183422,0.0904057,0.872966,0.0963134,0.996698,0.273185,0.410568,0.811648,0.592331,0.806993,0.727162,0.300421,0.211818,0.89426,0.732934,0.768289,0.954642,0.528092,0.637405,0.250829,0.290768,0.678371,0.343745,0.525927,0.287461,0.863449,0.169091,0.116843,0.265471,0.905629,0.788574,0.448892,0.996034,0.66154,0.545206,0.992733,0.934725,0.955774,0.804381,0.527056,0.762767,0.531543,0.827477,0.974585,0.425803,0.560411,0.742874,0.380445,0.0885036,0.380279,0.631274,0.379272,0.0586503,0.975019,0.905199,0.346111,0.838468,0.0742902,0.462954,0.103939,0.979919,0.251528,0.552832,0.975953,0.913068,0.0980375,0.968686,0.847793,0.0538116,0.773067,0.374849,0.816579,0.304611,0.202326,0.791164,0.730414,0.762738,0.534038,0.110859,0.851241,0.914317,0.742133,0.230513,0.972968,0.717152,0.135713,0.319078,0.55562,0.210003,0.782032,0.65956,0.189922,0.0335596,0.212391,0.165875,0.946627,0.310429,0.134561,0.794421,0.36424,0.907629,0.16927,0.180819,0.212239,0.371597,0.971984,0.942653,0.134334,0.506022,0.053512,0.985576,0.420339,0.795645,0.216089,0.393307,0.512797,0.351802,0.712385,0.0684177,0.561804,0.494417,0.727977,0.751726,0.527976,0.940369,0.917602,0.474604,0.250798,0.0521631,0.269025,0.615038,0.959792,0.438295,0.795857,0.172031,0.809891,0.767841,0.114684,0.944226,0.273863,0.168196,0.929801,0.694202,0.963841,0.14589,0.0875088,0.476638,0.497692,0.799894,0.545056,0.0594965,0.294311,0.273033,0.811223,0.822287,0.213402,0.728824,0.296891,0.4642,0.780988,0.565916,0.0792378,0.740779,0.00421092,0.875095,0.91281,0.814102,0.642936,0.0274942,0.758328,0.916799,0.19569,0.68813,0.611001,0.159531,0.83402,0.69851,0.636169,0.331712,0.498404,0.181225,0.391209,0.792715,0.454259,0.202431,0.615002,0.667661,0.931256,0.911893,0.131861,0.712243,0.477809,0.211099,0.453023,0.48202,0.086194,0.365833,0.296122,0.72913,0.393327,0.0544507,0.64593,0.589017,0.74258,0.256931,0.748548,0.5766,0.955441,0.384718,0.908313,0.453845,0.565943,0.299521,0.24656,0.0202018,0.501953,0.861562,0.687863,0.433209,0.773455,0.819724,0.145452,0.251264,0.0308225,0.598475,0.733284,0.117016,0.964307,0.0294069,0.846147,0.357634,0.0838575,0.492076,0.946651,0.826438,0.749007,0.6952,0.403038,0.704448,0.0799172,0.311351,0.158294,0.64586,0.610872,0.404854,0.666062,0.112825,0.266416,0.353925,0.546033,0.0398708,0.173649,0.691485,0.291135,0.204471,0.28996,0.0244194,0.321488,0.254267,0.0538263,0.167634,0.611902,0.137684,0.659711,0.558553,0.964122,0.408718,0.253753,0.36716,0.113167,0.33367,0.678511,0.27146,0.97953,0.289383,0.676314,0.645592,0.402208,0.942729,0.999517,0.948241,0.9826,0.173165,0.639726,0.273735,0.377637,0.929686,0.298155,0.699124,0.183954,0.351981,0.866759,0.795855,0.489665,0.526469,0.354408,0.453787,0.935187,0.608161,0.820946,0.0483538,0.94183,0.499457,0.319814,0.92136,0.78884,0.996128,0.566952,0.191048,0.938857,0.566469,0.139289,0.921458,0.739634,0.779015,0.195193,0.117271,0.708702,0.493348,0.816395,0.892655,0.845329,0.683154,0.68851,0.334993,0.209623,0.0429185,0.78878,0.14481,0.651079,0.609726,0.193164,0.592909,0.109184,0.512978,0.51427,0.898024,0.509106,0.0812219,0.0890714,0.447963,0.647691,0.22836,0.369421,0.387325,0.00737571,0.564614,0.504596,0.716077,0.0579614,0.320992,0.608733,0.90329,0.00414524,0.297243,0.238284,0.213768,0.340162,0.0270636,0.358578,0.991241,0.63679,0.551742,0.58415,0.745974,0.0647205,0.0984199,0.643997,0.573826,0.179642,0.733069,0.0217898,0.827333,0.961429,0.391211,0.214658,0.968805,0.955825,0.719255,0.684882,0.0137861,0.0402461,0.293615,0.917076,0.0443914,0.590858,0.15536,0.25816,0.93102,0.182423,0.616738,0.92226,0.819213,0.16848,0.506411,0.565187,0.233201,0.60483,0.209184,0.807027,0.784472,0.942253,0.828817,0.611805,0.903682,0.220028,0.826463,0.872487,0.175852,0.545718,0.557369,0.189639,0.585964,0.850984,0.106715,0.630355,0.441842,0.262074,0.888515,0.372862,0.444498,0.505253,0.295122,0.263711,0.673733,0.801533,0.828898,0.906934,0.406363,0.0380823,0.713961,0.190836,0.980335,0.542778,0.802641,0.884017,0.762806,0.629104,0.756504,0.938658,0.174821,0.313873,0.128297,0.760785,0.164858,0.235012,0.39114,0.6067,0.497086,0.279655,0.979562,0.941584,0.784908,0.274684,0.205295,0.458641,0.0762175,0.034193,0.365574,0.482581,0.0722753,0.0795353,0.673417,0.0526106,0.622313,0.476057,0.936628,0.385119,0.105161,0.693132,0.323778,0.279983,0.00700581,0.452075,0.0407677,0.171863,0.687087,0.431908,0.778563,0.184173,0.711563,0.758125,0.125757,0.496471,0.0328099,0.331052,0.955112,0.109027,0.365245,0.320686,0.591609,0.43752,0.400221,0.265025,0.490131,0.0225346,0.741083,0.426759,0.407654,0.846244,0.119891,0.731432,0.126226,0.126897,0.183507,0.166994,0.29876,0.870594,0.598902,0.0773236,0.0547667,0.310465,0.835449,0.180524,0.806936,0.868259,0.511575,0.762048,0.977287,0.87682,0.0827335,0.568895,0.31434,0.482955,0.83392,0.804471,0.505489,0.575003,0.23123,0.913143,0.421246,0.351121,0.644575,0.547473,0.478017,0.828082,0.714467,0.776778,0.698676,0.313369,0.854101,0.753443,0.623834,0.68955,0.933966,0.43077,0.557809,0.445542,0.192818,0.535096,0.322362,0.275551,0.103991,0.636702,0.758506,0.937911,0.441173,0.263995,0.512914,0.672403,0.177139,0.93416,0.0235232,0.821714,0.481633,0.50154,0.649796,0.196099,0.278318,0.348473,0.509468,0.132419,0.101916,0.133302,0.821969,0.035882,0.564072,0.379778,0.481424,0.75689,0.914874,0.803786,0.0324409,0.0188649,0.440488,0.790947,0.956776,0.881661,0.0549419,0.46969,0.554064,0.23208,0.40385,0.577587,0.0537943,0.885482,0.0791273,0.703591,0.0815817,0.357445,0.0520632,0.59105,0.489864,0.153979,0.724351,0.311834,0.189861,0.288423,0.691612,0.671284,0.045313,0.606486,0.47507,0.0777539,0.625351,0.915558,0.868701,0.582127,0.797219,0.923642,0.0518166,0.351283,0.155723,0.455666,0.92887,0.209517,0.341149,0.00799727,0.913108,0.42273,0.365443,0.965171,0.0137799,0.855307,0.11915,0.738131,0.16714,0.30901,0.0265547,0.858753,0.980295,0.0718678,0.465239,0.455365,0.149622,0.09059,0.370923,0.0183222,0.672717,0.168142,0.941965,0.724534,0.519425,0.0976875,0.1802,0.448295,0.307205,0.521348,0.456293,0.220312,0.944079,0.821735,0.185483,0.957859,0.677042,0.304633,0.69599,0.844182,0.613643,0.722545,0.702935,0.593938,0.794412,0.168174,0.0493029,0.944034,0.258764,0.420226,0.962356,0.931481,0.588368,0.904321,0.656015,0.107794,0.00200858,0.836214,0.556089,0.309213,0.357563,0.0123815,0.529526,0.301642,0.834117,0.715009,0.2595,0.511159,0.0196419,0.95549,0.355341,0.633285,0.678035,0.0582759,0.227223,0.472447,0.22645,0.276526,0.416481,0.485213,0.696752,0.378838,0.416694,0.285121,0.283159,0.0727087,0.392914,0.285167,0.908923,0.949003,0.594381,0.266486,0.961385,0.123906,0.568128,0.795501,0.838915,0.827628,0.30666,0.858557,0.783118,0.662001,0.491842,0.461153,0.720277,0.719065,0.9336,0.946726,0.995592,0.350081,0.43194,0.692344,0.728919,0.848634,0.977464,0.0120777,0.921343,0.370379,0.297245,0.830266,0.319382,0.891626,0.0967516,0.280767,0.0155317,0.664879,0.0762681,0.854447,0.492507,0.382928,0.713004,0.275625,0.0449291,0.204846,0.736777,0.765206,0.923912,0.670377,0.711932,0.919503,0.0204587,0.143872,0.611847,0.749378,0.992506,0.589312,0.761455,0.913849,0.95969,0.0587004,0.744115,0.279072,0.950326,0.840866,0.559839,0.965858,0.505745,0.636107,0.820305,0.998252,0.019035,0.533308,0.273877,0.0639641,0.738155,0.0106541,0.82917,0.662066,0.681032,0.541103,0.58157,0.70149,0.684975,0.193417,0.450868,0.677481,0.782728,0.212323,0.59133,0.742419,0.271024,0.335444,0.0214906,0.22135,0.176311,0.581329,0.187207,0.682056,0.217436,0.00751197,0.680308,0.236471,0.54082,0.954185,0.300435,0.278975,0.964839,0.129606,0.941041,0.64587,0.670708,0.522611,0.347361,0.355683,0.716028,0.798228,0.0331637,0.498756,0.0105517,0.624493,0.241175,0.281576,0.959938,0.262665,0.502925,0.136248,0.843995,0.690133,0.818304,0.0614309,0.697645,0.498612,0.297902,0.238465,0.452797,0.598338,0.51744,0.417636,0.727943,0.458482,0.0635058,0.398651,0.981093,0.410866,0.754334,0.69712,0.209095,0.787498,0.195877,0.219646,0.411991,0.437051,0.501222,0.371929,0.699717,0.00414726,0.508177,0.543712,0.69428,0.326481,0.605142,0.391925,0.825094,0.903045,0.63039,0.27789,0.501382,0.14783,0.695526,0.229325,0.606311,0.759032,0.627976,0.587404,0.169898,0.38231,0.284524,0.378993,0.169808,0.480401,0.598639,0.581799,0.917453,0.0998611,0.953728,0.617169,0.104008,0.461905,0.160881,0.798288,0.788386,0.766023,0.190213,0.61348,0.669068,0.820603,0.89137,0.17045,0.968432,0.586896,0.399775,0.574744,0.345927,0.0277516,0.162148,0.515825,0.410062,0.446672,0.894818,0.579869,0.927073,0.493457,0.161668,0.844526,0.593318,0.115396,0.461695,0.697326,0.577301,0.622576,0.495615,0.365687,0.388599,0.685827,0.979167,0.0576672,0.50643,0.870537,0.228117,0.474862,0.457433,0.627892,0.0496062,0.80336,0.655644,0.211754,0.319185,0.0657058,0.658426,0.214003,0.645575,0.585499,0.707459,0.807243,0.430025,0.300777,0.922639,0.89172,0.998103,0.49994,0.514296,0.493718,0.865627,0.902895,0.179545,0.844793,0.960562,0.685975,0.71533,0.188679,0.160838,0.172763,0.816572,0.210444,0.976123,0.472216,0.422198,0.295308,0.537922,0.0806241,0.50931,0.183497,0.666123,0.216769,0.99074,0.096148,0.517547,0.91338,0.987868,0.51565,0.413319,0.502164,0.00936818,0.278946,0.405059,0.188914,0.123739,0.365621,0.874889,0.83907,0.5543,0.0357269,0.0118324,0.370872,0.246171,0.987955,0.843088,0.668369,0.283263,0.38101,0.748993,0.792573,0.564507,0.415116,0.00934208,0.555248,0.511264,0.526889,0.468627,0.499132,0.042539,0.881947,0.00129587,0.0519072,0.160892,0.406355,0.240821,0.284632,0.771975,0.11571,0.123701,0.326276,0.151437,0.135534,0.697148,0.397608,0.123489,0.540236,0.0659769,0.406751,0.921247,0.81497,0.199324,0.485754,0.230086,0.208666,0.0410017,0.741351,0.735555,0.509629,0.240483,0.778094,0.391576,0.241779,0.830001,0.552468,0.648133,0.0708218,0.8371,0.420109,0.186532,0.960801,0.746385,0.337968,0.096335,0.443533,0.735576,0.219824,0.983769,0.801553,0.626575,0.905015,0.616523,0.825899,0.390769,0.846609,0.0345651,0.431771,0.58796,0.77012,0.9414,0.828443,0.548214,0.332976,0.070222,0.378215,0.885444,0.718355,0.449036,0.722543,0.138464,0.635568,0.683345,0.884849,0.973536,0.77968,0.328381,0.709112,0.999503,0.31215,0.510665,0.626079,0.217166,0.127188,0.451978,0.607935,0.973798,0.486543,0.039706,0.561758,0.256663,0.981106,0.390201,0.804876,0.314082,0.460423,0.183091,0.199525,0.178779,0.632128,0.922069,0.317243,0.267696,0.605413,0.202091,0.241232,0.385093,0.530473,0.950344,0.384596,0.842623,0.46101,0.010675,0.0597887,0.588198,0.462653,0.667724,0.561996,0.949196,0.70743,0.123754,0.205858,0.688536,0.513955,0.0107347,0.00261733,0.974378,0.193826,0.202142,0.153157,0.825953,0.124211,0.4704,0.0936489,0.729624,0.672491,0.334881,0.114717,0.202964,0.285225,0.499313,0.0455868,0.746235,0.509988,0.105376,0.334433,0.972641,0.773099,0.896429,0.921837,0.480529,0.0201827,0.127695,0.169065,0.534138,0.13843,0.171682,0.508516,0.332256,0.373825,0.661673,0.158209,0.498036,0.132073,0.251858,0.22766,0.804564,0.586739,0.342377,0.00752743,0.871964,0.84169,0.0531142,0.618198,0.351679,0.15849,0.952631,0.32432,0.931589,0.84906,0.246157,0.412118,0.869242,0.373852,0.581183,0.40338,0.512282,0.752865,0.911896,0.844537,0.12669,0.573569,0.00274616,0.624725,0.705642,0.254604,0.852385,0.510206,0.841343,0.194762,0.517733,0.713306,0.036452,0.570847,0.331504,0.388131,0.729337,0.284135,0.71245,0.660926,0.133195,0.958607,0.0730442,0.00243725,0.332459,0.654227,0.405817,0.84474,0.407092,0.317714,0.689278,0.533782,0.891283,0.692024,0.158507,0.596925,0.946628,0.0108919,0.107131,0.78797,0.205654,0.624864,0.501276,0.242106,0.195711,0.832781,0.630236,0.925048,0.116916,0.342687,0.585974,0.250111,0.301294,0.659019,0.252548,0.633752,0.313246,0.658366,0.478493,0.720338,0.976079,0.16777,0.25412,0.867362,0.859794,0.412626,0.464288,0.806422,0.423518,0.571418,0.594392,0.629172,0.196282,0.0956682,0.871278,0.391993,0.928449,0.501514,0.317042,0.0453651,0.844201,0.903016,0.295476,0.145495,0.562035,0.548024,0.779247,0.87528,0.20639,0.25774,0.595618,0.18247,0.42551,0.849738,0.0498321,0.285304,0.262364,0.51412,0.0917256,0.685883,0.0855381,0.686117,0.315055,0.28182,0.781786,0.186333,0.673814,0.710235,0.687847,0.990855,0.7556,0.532048,0.893872,0.0510759,0.677542,0.455906,0.5991,0.456789,0.331187,0.80549,0.714529,0.926805,0.98796,0.140039,0.776543,0.037792,0.425343,0.0389067,0.551912,0.517069,0.724789,0.63745,0.203186,0.0398439,0.91927,0.984972,0.226176,0.593084,0.695206,0.914023,0.583939,0.450806,0.446071,0.477811,0.501882,0.123613,0.933717,0.100982,0.580402,0.264904,0.906473,0.294931,0.191709,0.894433,0.434971,0.968251,0.932225,0.860314,0.00715777,0.484136,0.377382,0.731947,0.121586,0.580569,0.771791,0.0408564,0.56554,0.997967,0.63394,0.260747,0.911991,0.217879,0.711553,0.358061,0.69569,0.213435,0.481675,0.629407,0.314417,0.0620769,0.894311,0.22089,0.357008,0.0860195,0.115323,0.791979,0.0542706,0.0475474,0.652293,0.0614284,0.531684,0.0296752,0.793375,0.65327,0.610244,0.565166,0.694127,0.175784,0.563134,0.328067,0.436531,0.475124,0.545946,0.148084,0.833186,0.241636,0.361519,0.31486,0.871044,0.675936,0.376937,0.765355,0.896826,0.733946,0.851374,0.0121483,0.525925,0.905645,0.0596957,0.178217,0.967073,0.59138,0.207893,0.760449,0.24465,0.818136,0.325615,0.938776,0.99392,0.888749,0.266843,0.430451,0.363873,0.812789,0.578535,0.197059,0.0544259,0.940053,0.511919,0.92547,0.615989,0.888857,0.690825,0.512815,0.622802,0.542199,0.524963,0.148727,0.447844,0.584659,0.326945,0.414917,0.176038,0.534837,0.175366,0.420688,0.352973,0.500981,0.359465,0.346894,0.38973,0.626308,0.777345,0.753603,0.439097,0.35588,0.950662,0.493523,0.295933,0.462582,0.418993,0.911922,0.351439,0.109817,0.424737,0.974241,0.652016,0.9497,0.122968,0.0998598,0.534359,0.449913,0.514777,0.710397,0.98475,0.690143,0.131085,0.337723,0.191124,0.49055,0.684617,0.580854,0.116857,0.461962,0.334457,0.555955,0.817842,0.285119,0.0494774,0.113775,0.747701,0.46847,0.0256975,0.0991392,0.578287,0.450435,0.0733803,0.230303,0.400134,0.196348,0.330163,0.934493,0.646261,0.84494,0.64489,0.631011,0.535083,0.775975,0.968735,0.726207,0.266525,0.653352,0.30706,0.383383,0.115314,0.641517,0.939337,0.933156,0.926636,0.988814,0.0469315,0.674337,0.457285,0.072629,0.773476,0.0355717,0.523063,0.846856,0.265875,0.923198,0.0432049,0.596038,0.857691,0.689466,0.440978,0.502581,0.320477,0.97606,0.278557,0.289212,0.702267,0.545082,0.942564,0.00932746,0.928464,0.0578781,0.650845,0.867801,0.991034,0.577481,0.856616,0.0379657,0.251818,0.3139,0.110595,0.0252941,0.349472,0.633658,0.872151,0.615347,0.556856,0.915356,0.211385,0.414547,0.604822,0.652362,0.917128,0.925299,0.628423,0.195685,0.214511,0.33069,0.740767,0.157075,0.340017,0.669231,0.214953,0.990862,0.537032,0.205988,0.568343,0.393648,0.243953,0.820161,0.707548,0.354548,0.845455,0.0570198,0.988206,0.717606,0.672367,0.545062,0.632961,0.883751,0.95961,0.237783,0.536114,0.876738,0.163082,0.164537,0.0724231,0.377593,0.495227,0.81319,0.534669,0.835244,0.48242,0.749622,0.826106,0.0194522,0.955609,0.394449,0.4131,0.199563,0.21461,0.120648,0.554111,0.0600653,0.177667,0.542317,0.777671,0.850034,0.0873793,0.410632,0.733786,0.046989,0.648415,0.269899,0.923727,0.811497,0.434436,0.99615,0.189091,0.929663,0.80934,0.723759,0.764907,0.29176,0.473381,0.591013,0.311212,0.428991,0.985462,0.724312,0.628553,0.200073,0.84496,0.182664,0.260138,0.0226273,0.724981,0.0378088,0.872661,0.81236,0.448441,0.606447,0.859349,0.0968562,0.876346,0.783076,0.908354,0.310782,0.779227,0.0974444,0.240445,0.588566,0.821204,0.00535189,0.880327,0.294585,0.596365,0.191539,0.723576,0.581827,0.915851,0.352129,0.7819,0.760811,0.534793,0.0420377,0.783439,0.259774,0.0798465,0.6561,0.0721344,0.528287,0.262547,0.931484,0.625144,0.138893,0.71456,0.533497,0.449675,0.493787,0.630942,0.690121,0.0823532,0.452145,0.695472,0.96268,0.746731,0.291837,0.154219,0.470307,0.873665,0.0700704,0.822436,0.655565,0.830882,0.357229,0.697602,0.61432,0.617003,0.777449,0.27042,0.689138,0.305736,0.532967,0.620621,0.93088,0.67186,0.335181,0.464377,0.121536,0.828968,0.0953186,0.811656,0.911321,0.547464,0.507129,0.874001,0.294195,0.798966,0.0282199,0.764501,0.672631,0.0982904,0.586937,0.328195,0.929172,0.944166,0.0257976,0.543492,0.561169,0.803246,0.813913,0.250307,0.108983,0.34688,0.870928,0.0398625,0.0187403,0.206109,0.504239,0.140276,0.0350768,0.599558,0.951933,0.946398,0.147022,0.459061,0.820399,0.441217,0.258027,0.848619,0.205718,0.930658,0.946909,0.792655,0.258854,0.876081,0.736821,0.284651,0.419574,0.29799,0.0878976,0.233486,0.548296,0.19688,0.580366,0.419224,0.236743,0.599106,0.625333,0.740982,0.739383,0.66041,0.34054,0.691315,0.606808,0.487562,0.150376,0.427206,0.928779,0.408404,0.275825,0.134497,0.339062,0.222734,0.927152,0.597916,0.0988154,0.663972,0.882567,0.518389,0.961962,0.970464,0.751875,0.510258,0.167344,0.332241,0.929483,0.404087,0.931348,0.554816,0.145069,0.67073,0.215225,0.485609,0.362046,0.822033,0.973172,0.512422,0.24924,0.901951,0.920826,0.525065,0.0364476,0.259888,0.747799,0.963599,0.857804,0.846614,0.627572,0.74037,0.365003,0.589534,0.710835,0.116878,0.0997923,0.878179,0.44912,0.0292748,0.282266,0.380467,0.58409,0.427335,0.0511979,0.799316,0.912944,0.413244,0.621349,0.886116,0.925666,0.870588,0.788067,0.846492,0.395653,0.824514,0.10638,0.143452,0.788114,0.964183,0.990066,0.415685,0.704553,0.355069,0.00521932,0.415388,0.471948,0.105012,0.293567,0.921067,0.134286,0.575833,0.301535,0.718377,0.00316787,0.352733,0.517693,0.916112,0.765976,0.139041,0.802228,0.691642,0.00962972,0.590295,0.538133,0.405283,0.41481,0.644513,0.548735,0.202923,0.608696,0.538801,0.618609,0.31325,0.89387,0.623828,0.728638,0.365818,0.72884,0.0222045,0.286885,0.863126,0.598037,0.58842,0.581503,0.601205,0.941152,0.0991956,0.517317,0.707128,0.238237,0.319546,0.39877,0.247867,0.909841,0.936903,0.653149,0.324651,0.581416,0.201884,0.527574,0.190113,0.740685,0.146183,0.503362,0.634555,0.770011,0.232,0.000372606,0.498851,0.254205,0.287258,0.361977,0.852242,0.875677,0.94348,0.453447,0.81683,0.0426753,0.970765,0.523958,0.280912,0.29031,0.922728,0.528779,0.200151,0.859632,0.181928,0.524802,0.441048,0.383812,0.052376,0.631161,0.124497,0.198559,0.134523,0.759052,0.96857,0.366523,0.759425,0.46742,0.620728,0.0466823,0.829397,0.47297,0.92236,0.772877,0.926417,0.739189,0.815552,0.897182,0.263147,0.0964643,0.187492,0.185875,0.625243,0.387644,0.0455069,0.807172,0.912446,0.486555,0.190984,0.964822,0.117716,0.315481,0.163381,0.252239,0.0745333,0.13195,0.618762,0.833958,0.59937,0.239491,0.88064,0.428767,0.712461,0.803,0.201644,0.638878,0.542189,0.0171959,0.53606,0.805336,0.11366,0.723552,0.991212,0.738904,0.111195,0.0367188,0.546075,0.0236413,0.523274,0.737059,0.988463,0.64099,0.0525406,0.151844,0.893229,0.127074,0.283794,0.511991,0.961032,0.883164,0.751482,0.841672,0.311931,0.463942,0.644672,0.513575,0.10282,0.186861,0.530771,0.63888,0.992197,0.644432,0.362432,0.983409,0.383335,0.473627,0.0201281,0.92941,0.497268,0.543402,0.66647,0.485731,0.184391,0.71901,0.637575,0.07762,0.846084,0.921369,0.589611,0.807116,0.804533,0.341093,0.648788,0.116465,0.805035,0.293459,0.63004,0.907855,0.48032,0.160811,0.546735,0.472518,0.805243,0.909167,0.455927,0.188578,0.382794,0.476055,0.117988,0.880062,0.0194568,0.784458,0.365794,0.203848,0.503468,0.00336864,0.281468,0.349552,0.924738,0.871079,0.156668,0.729271,0.212172,0.805455,0.845735,0.0172073,0.0989148,0.475775,0.925063,0.579235,0.636587,0.471798,0.0517527,0.44183,0.380964,0.50768,0.630407,0.763758,0.983734,0.748396,0.64382,0.00319122,0.532854,0.00961397,0.207039,0.0363216,0.0129826,0.488508,0.385874,0.93772,0.359587,0.542541,0.666991,0.571759,0.347997,0.512726,0.588966,0.446912,0.988501,0.514029,0.0261466,0.625088,0.985827,0.0778993,0.0669176,0.366791,0.585579,0.697325,0.13055,0.569313,0.445721,0.77437,0.572505,0.978574,0.783984,0.779544,0.0148959,0.796966,0.268052,0.400769,0.734687,0.627638,0.943311,0.401678,0.199398,0.291308,0.914404,0.788364,0.738219,0.902905,0.302393,0.764366,0.527993,0.28822,0.842265,0.594911,0.655011,0.427844,0.292236,0.78556,0.997157,0.737957,0.55993,0.569662,0.716531,0.343914,0.349206,0.731427,0.140881,0.617257,0.132196,0.875567,0.244896,0.0755073,0.277245,0.444293,0.366815,0.191649,0.232657,0.105034,0.0945539,0.53505,0.8694,0.622547,0.823269,0.711665,0.217458,0.47828,0.139509,0.509694,0.263841,0.136666,0.247651,0.823771,0.706328,0.964182,0.167685,0.0555334,0.695609,0.308566,0.672791,0.827805,0.184133,0.917686,0.903313,0.461378,0.36198,0.270128,0.653027,0.594637,0.375161,0.747581,0.129687,0.244561,0.370128,0.952956,0.956226,0.587586,0.431237,0.0957345,0.09728,0.695077,0.2324,0.344931,0.518848,0.938728,0.309113,0.686533,0.994262,0.00472165,0.995099,0.667052,0.832527,0.179232,0.584739,0.73584,0.640611,0.946719,0.00596731,0.293637,0.541355,0.381129,0.0412181,0.671042,0.62569,0.411346,0.623998,0.581916,0.998932,0.0552349,0.67765,0.0962119,0.750312,0.910051,0.441143,0.26916,0.848779,0.750255,0.955694,0.843041,0.754977,0.950793,0.510093,0.587504,0.130025,0.0948317,0.323344,0.770636,0.0415503,0.329311,0.064273,0.582906,0.71044,0.105491,0.253948,0.33613,0.516837,0.877946,0.918046,0.515769,0.933181,0.595696,0.611981,0.683493,0.505747,0.0531237,0.952653,0.354526,0.803379,0.908347,0.197566,0.558356,0.85914,0.707659,0.14586,0.989165,0.802491,0.469204,0.7598,0.844041,0.798516,0.824073,0.426947,0.508956,0.929564,0.680894,0.845086,0.446401,0.558841,0.763131,0.962171,0.492021,0.358827,0.574152,0.175515,0.864574,0.627275,0.128168,0.219099,0.430654,0.036515,0.416665,0.989011,0.895655,0.124325,0.134871,0.884819,0.926815,0.604075,0.64462,0.770856,0.402591,0.468693,0.197803,0.911547,0.398257,0.878698,0.756632,0.844659,0.437538,0.519763,0.806829,0.92956,0.87859,0.380981,0.105074,0.743164,0.00825601,0.233242,0.962264,0.43891,0.269757,0.378929,0.427921,0.165412,0.503254,0.562792,0.0502314,0.430069,0.166868,0.694851,0.200925,0.569459,0.163544,0.398729,0.481005,0.561801,0.277426,0.237638,0.40646,0.714964,0.757401,0.213289,0.644524,0.635992,0.59427,0.749598,0.379156,0.602526,0.982841,0.341419,0.0414364,0.252598,0.720348,0.469358,0.41801,0.223602,0.0321498,0.468241,0.653671,0.199017,0.163092,0.854596,0.768476,0.326636,0.253325,0.249482,0.888438,0.530751,0.487119,0.294898,0.245716,0.24452,0.508187,0.89024,0.880512,0.102457,0.639838,0.259668,0.704983,0.622679,0.601087,0.746419,0.875276,0.321435,0.215777,0.293286,0.545037,0.247927,0.761527,0.198708,0.446944,0.92462,0.0533041,0.21542,0.251256,0.306629,0.464902,0.139694,0.83738,0.952021,0.434592,0.083096,0.196542,0.942779,0.973336,0.0770539,0.0452355,0.613174,0.336722,0.750218,0.235852,0.937809,0.496638,0.111129,0.259244,0.712415,0.404415,0.804281,0.960341,0.165943,0.00298909,0.407285,0.0905624,0.0562932,0.622706,0.341819,0.362922,0.0876075,0.481513,0.200302,0.0396288,0.916104,0.283398,0.236171,0.858883,0.256734,0.313224,0.904118,0.869908,0.649946,0.654337,0.10576,0.587755,0.150974,0.216889,0.846999,0.863389,0.621304,0.65128,0.82373,0.787247,0.654269,0.231015,0.877809,0.710563,0.853721,0.219628,0.0734848,0.941328,0.70114,0.273787,0.980957,0.617244,0.557186,0.217128,0.476127,0.81392,0.530352,0.380245,0.683828,0.180298,0.0345819,0.789588,0.768053,0.185556,0.0064776,0.615052,0.048945,0.627782,0.266333,0.872675,0.415029,0.920602,0.10369,0.292838,0.631165,0.957411,0.512466,0.704649,0.89874,0.213606,0.978437,0.879697,0.83085,0.535622,0.0968244,0.306978,0.349542,0.627177,0.687223,0.0333702,0.807475,0.721805,0.822959,0.575528,0.907361,0.829436,0.190581,0.956306,0.457218,0.456913,0.828981,0.872247,0.377515,0.932671,0.165085,0.00868001,0.890083,0.67755,0.713329,0.788822,0.891156,0.691766,0.668519,0.722007,0.227388,0.765343,0.0289841,0.576931,0.39252,0.716207,0.610301,0.199995,0.438012,0.433259,0.775523,0.345373,0.262695,0.966104,0.301679,0.719913,0.423017,0.130659,0.59216,0.800532,0.0633308,0.757245,0.809212,0.953413,0.434795,0.522542,0.742235,0.325951,0.214308,0.410754,0.0479576,0.441696,0.176097,0.0769417,0.0186266,0.568617,0.793149,0.628927,0.768612,0.23116,0.0621864,0.544135,0.576533,0.324882,0.510239,0.878211,0.0447951,0.933256,0.00887096,0.636955,0.733788,0.0722017,0.3942,0.543001,0.0256151,0.828995,0.0655426,0.767851,0.154946,0.27985,0.178605,0.202903,0.721546,0.354702,0.279845,0.740173,0.92332,0.0729935,0.3691,0.691932,0.304154,0.431287,0.236067,0.880687,0.756168,0.746305,0.758898,0.800964,0.679561,0.767769,0.437919,0.41335,0.839971,0.832119,0.95635,0.865586,0.661113,0.0218929,0.633437,0.816059,0.301743,0.812042,0.0189617,0.0232898,0.166744,0.298807,0.763463,0.0900634,0.3718,0.132563,0.781995,0.675954,0.56385,0.0180616,0.556641,0.320018,0.764367,0.315539,0.120982,0.443928,0.0833085,0.558901,0.857278,0.92328,0.391019,0.813628,0.788866,0.0521323,0.835521,0.422303,0.868191,0.137264,0.234344,0.887153,0.160554,0.401088,0.185959,0.924017,0.491151,0.557759,0.0565801,0.273146,0.233713,0.62043,0.291208,0.790354,0.940448,0.0555749,0.105893,0.0614304,0.499503,0.189202,0.620331,0.356781,0.112481,0.0113504,0.170409,0.901347,0.0634827,0.00593062,0.32365,0.931674,0.143195,0.557994,0.818826,0.303749,0.959081,0.00478553,0.227766,0.450233,0.562545,0.284346,0.723379,0.796258,0.904776,0.0145867,0.586612,0.845225,0.0701616,0.692505,0.906655,0.569665,0.881706,0.526986,0.926446,0.994187,0.538337,0.0968556,0.895534,0.60182,0.102786,0.219184,0.533493,0.245981,0.777178,0.35232,0.549731,0.736259,0.357105,0.777497,0.186492,0.91965,0.0618432,0.909871,0.715908,0.96662,0.924457,0.302519,0.811844,0.994619,0.995024,0.7185,0.564284,0.87673,0.245486,0.49073,0.870917,0.783823,0.587586,0.766452,0.385642,0.690372,0.985636,0.919136,0.936353,0.762813,0.271455,0.486084,0.499073,0.62856,0.26358,0.685564,0.54821,0.325424,0.595435,0.264118,0.292043,0.519892,0.566637,0.103888,0.514511,0.561661,0.822387,0.0787945,0.438391,0.0678735,0.569524,0.309308,0.851696,0.15711,0.0757599,0.237339,0.847482,0.0613958,0.156474,0.783835,0.824209,0.427929,0.269918,0.323282,0.0564896,0.533499,0.00884594,0.6047,0.858923,0.604281,0.868817,0.150966,0.124173,0.435454,0.254854,0.638683,0.997114,0.0772411,0.717478,0.435505,0.145115,0.287002,0.744813,0.996811,0.444112,0.820573,0.23415,0.291594,0.881969,0.390624,0.0754289,0.706178,0.818553,0.345347,0.0294597,0.875043,0.878846,0.0383057,0.479743,0.737769,0.642586,0.34856,0.888735,0.766759,0.784014,0.143588,0.405442,0.781128,0.220829,0.12292,0.216633,0.365944,0.409923,0.961446,0.362755,0.854035,0.782019,0.596905,0.145629,0.663988,0.987529,0.221058,0.370166,0.806082,0.566406,0.399626,0.681125,0.445252,0.437932,0.160868,0.183021,0.0805179,0.509428,0.0717554,0.847277,0.293441,0.215344,0.252719,0.0745694,0.436173,0.37564,0.291203,0.802117,0.785563,0.252649,0.164872,0.639598,0.0346683,0.761776,0.785227,0.698656,0.749305,0.00628551,0.0688225,0.555387,0.572691,0.468448,0.236512,0.0179431,0.90638,0.39738,0.200964,0.986898,0.906808,0.272719,0.834175,0.200249,0.488063,0.0868941,0.274818,0.924236,0.462534,0.566021,0.726353,0.248097,0.81867,0.891224,0.887694,0.853338,0.653001,0.672922,0.551994,0.402306,0.679207,0.620817,0.957693,0.251898,0.0892653,0.194205,0.269842,0.995645,0.591585,0.470805,0.982543,0.498392,0.743525,0.816718,0.698641,0.231587,0.903612,0.97346,0.155823,0.366146,0.53948,0.882176,0.614242,0.35815,0.7734,0.501937,0.211488,0.426401,0.174859,0.763482,0.828707,0.854066,0.384299,0.786399,0.105964,0.473565,0.980604,0.375806,0.46921,0.572189,0.846611,0.451753,0.0705816,0.590136,0.268471,0.769223,0.821723,0.172082,0.742682,0.977547,0.538228,0.282163,0.859723,0.152471,0.640313,0.633123,0.654408,0.851801,0.0595239,0.829266,0.615283,0.888231,0.683332,0.999583,0.67463,0.789296,0.473147,0.655234,0.165102,0.942357,0.227423,0.0117134,0.39411,0.298005,0.601849,0.662581,0.067228,0.423573,0.834663,0.809911,0.401119,0.372891,0.0920734,0.260842,0.525362,0.732386,0.893965,0.179769,0.584187,0.953488,0.00903545,0.199471,0.841719,0.692367,0.199053,0.516349,0.481664,0.672201,0.171583,0.646766,0.614558,0.399007,0.658479,0.00866766,0.697012,0.260328,0.671248,0.76424,0.683901,0.505911,0.574151,0.0850201,0.878802,0.666224,0.345862,0.404164,0.39861,0.239826,0.583934,0.982797,0.193315,0.592969,0.182268,0.0350338,0.285336,0.381321,0.551383,0.767,0.0535218,0.722966,0.413766,0.66808,0.121973,0.0722453,0.676747,0.818985,0.332574,0.347995,0.583225,0.0164747,0.853907,0.157375,0.101495,0.732709,0.823599,0.447357,0.136873,0.222209,0.687183,0.720807,0.205007,0.880498,0.313776,0.387275,0.915532,0.599112,0.768596,0.466914,0.366112,0.822118,0.189881,0.779878,0.490197,0.311854,0.852124,0.166945,0.130839,0.184697,0.51494,0.714064,0.201172,0.368847,0.871439,0.302667,0.101556,0.695039,0.750024,0.238429,0.917248,0.437207,0.959235,0.122255,0.317704,0.273011,0.50953,0.233236,0.872123,0.278126,0.70015,0.238236,0.100244,0.890031,0.0181142,0.590441,0.201885,0.870238,0.757386,0.332724,0.0549353,0.272326,0.0467876,0.256107,0.641172,0.918227,0.558774,0.742728,0.613266,0.308798,0.981157,0.530514,0.746005,0.940392,0.652769,0.0637087,0.213404,0.162299,0.296945,0.085527,0.440425,0.997095,0.323763,0.540669,0.887126,0.341877,0.13111,0.0890107,0.212115,0.888496,0.421734,0.26705,0.160821,0.468522,0.523157,0.801994,0.386749,0.0819319,0.544722,1.47875e-05,0.39073,0.525879,0.530529,0.136734,0.466271,0.183298,0.200443,0.679675,0.345597,0.497388,0.765202,0.786022,0.494482,0.0889642,0.32669,0.381608,0.430841,0.4578,0.470619,0.642956,0.346296,0.892353,0.910006,0.507117,0.360875,0.433163,0.309111,0.747624,0.515095,0.853833,0.747639,0.905825,0.379712,0.278168,0.0425594,0.845983,0.461466,0.243002,0.525657,0.807063,0.74039,0.290859,0.593084,0.234873,0.379823,0.919775,0.616481,0.810664,0.377575,0.0870997,0.45362,0.72387,0.979453,0.363626,0.230987,0.340329,0.796789,0.540098,0.0879531,0.311884,0.393931,0.835592,0.21771,0.773643,0.11376,0.260269,0.619625,0.575226,0.503271,0.145282,0.382289,0.243661,0.436141,0.975374,0.478534,0.815964,0.895148,0.0950148,0.626627,0.272723,0.182115,0.0802472,0.996593,0.161568,0.443873,0.227581,0.501896,0.240662,0.767679,0.589849,0.552547,0.16161,0.425442,0.770256,0.935253,0.539202,0.0305249,0.554878,0.114428,0.533796,0.70016,0.496717,0.777458,0.1363,0.472091,0.255992,0.952264,0.367239,0.351007,0.578891,0.639962,0.533121,0.659138,0.636556,0.694689,0.103011,0.864136,0.196585,0.343673,0.631815,0.786434,0.89622,0.793425,0.211876,0.666476,0.728678,0.751078,0.697001,0.283555,0.865506,0.230797,0.983715,0.362224,0.00825506,0.120016,0.834315,0.264247,0.0722795,0.201554,0.615254,0.651171,0.841516,0.148375,0.310309,0.478072,0.843063,0.413321,0.342208,0.0396484,0.756994,0.974023,0.826083,0.653214,0.767449,0.0379586,0.31969,0.496126,0.789036,0.0166908,0.779682,0.654543,0.247488,0.763397,0.0167663,0.255743,0.883413,0.851081,0.51999,0.955692,0.0526346,0.135243,0.606863,0.894151,0.283618,0.917172,0.372222,0.126681,0.330493,0.71443,0.16633,0.0874867,0.688454,0.992413,0.740701,0.455902,0.0303711,0.0603906,0.952029,0.819408,0.0770814,0.73171,0.47395,0.324569,0.495107,0.490717,0.580312,0.37852,0.341797,0.100302,0.334212,0.394432,0.235546,0.941075,0.288583,0.519164,0.858247,0.660805,0.645845,0.188739,0.375235,0.812175,0.276226,0.0636887,0.804588,0.0169267,0.519591,0.834959,0.0773174,0.471619,0.654367,0.154399,0.20333,0.128317,0.478968,0.698437,0.619033,0.0592807,0.076957,0.960831,0.159583,0.411169,0.355263,0.395129,0.352244,0.643845,0.914293,0.21049,0.30465,0.560139,0.39923,0.679885,0.372314,0.675456,0.743574,0.176902,0.692383,0.263165,0.0118608,0.7697,0.734784,0.666227,0.924099,0.938114,0.794544,0.403067,0.636551,0.413578,0.462347,0.713508,0.374409,0.621931,0.124677,0.729671,0.0170597,0.476921,0.373517,0.931353,0.687411,0.678167,0.491491,0.086641,0.358052,0.863805,0.762097,0.101627,0.0407067,0.454479,0.364791,0.0525675,0.224179,0.0995759,0.718795,0.148278,0.03769,0.513339,0.551345,0.674241,0.926917,0.013692,0.38775,0.301326,0.635623,0.512427,0.0309971,0.652682,0.989348,0.404514,0.584035,0.676759,0.082681,0.0755261,0.7634,0.440733,0.939331,0.525497,0.54236,0.980038,0.979976,0.907152,0.0326054,0.204155,0.00672737,0.7514,0.352433,0.0444174,0.264739,0.903778,0.718659,0.191656,0.91747,0.106409,0.492982,0.553092,0.618836,0.523979,0.205774,0.608184,0.928493,0.789809,0.284943,0.0111741,0.865335,0.0483437,0.451908,0.804667,0.573841,0.994268,0.784704,0.553817,0.901419,0.81731,0.757973,0.908147,0.56871,0.110406,0.952564,0.83345,0.0141832,0.671223,0.0251061,0.931653,0.777631,0.518088,0.484745,0.396467,0.0420673,0.690519,0.00465117,0.97056,0.480329,0.289595,0.981734,0.345664,0.337938,0.433642,0.150331,0.911779,0.42791,0.935035,0.465596,0.329329,0.752345,0.223569,0.237475,0.321055,0.333975,0.190039,0.154505,0.348158,0.861262,0.179611,0.279811,0.638893,0.697699,0.764555,0.0353607,0.739766,0.455075,0.0400119,0.710327,0.935403,0.329606,0.692061,0.281067,0.667545,0.125703,0.431398,0.579324,0.553613,0.366433,0.0449201,0.882941,0.118778,0.268489,0.120417,0.439833,0.602463,0.310456,0.594338,0.950621,0.171718,0.773948,0.230432,0.810612,0.471647,0.994987,0.845972,0.211413,0.450062,0.885984,0.92174,0.385465,0.215591,0.613801,0.666533,0.883135,0.739504,0.0979304,0.462459,0.293117,0.464363,0.507379,0.176058,0.583141,0.775868,0.296475,0.0229745,0.378332,0.606931,0.617312,0.328953,0.778649,0.391261,0.559385,0.589261,0.862908,0.554372,0.435233,0.0743214,0.00443373,0.321217,0.996061,0.389899,0.536808,0.609862,0.0564315,0.419943,0.349366,0.154362,0.882402,0.642483,0.618725,0.389781,0.818541,0.201867,0.165649,0.115016,0.224841,0.543981,0.721948,0.842153,0.872934,0.500597,0.233414,0.432318,0.0898578,0.0963221,0.98669,0.525091,0.170643,0.991124,0.846309,0.166705,0.381023,0.383117,0.776567,0.437454,0.80306,0.125934,0.591816,0.685462,0.768417,0.210541,0.0752434,0.586958,0.412408,0.240893,0.701974,0.63725,0.784874,0.423922,0.479403,0.657807,0.924519,0.712817,0.0901255,0.0143766,0.809139,0.0768156,0.539468,0.979783,0.0679393,0.385776,0.146488,0.448962,0.768893,0.923055,0.886416,0.571953,0.0489885,0.478232,0.257415,0.817405,0.688774,0.332658,0.404363,0.101182,0.573551,0.106338,0.738432,0.358424,0.53026,0.217835,0.0162316,0.454778,0.930652,0.106357,0.469155,0.739791,0.183173,0.00862271,0.719574,0.251112,0.394399,0.866061,0.700074,0.163292,0.789116,0.58649,0.735244,0.838105,0.0647226,0.992659,0.65551,0.753496,0.325317,0.0598733,0.854678,0.898868,0.166211,0.59311,0.257292,0.69647,0.810944,0.273524,0.151249,0.741596,0.379881,0.620404,0.481387,0.563054,0.629027,0.200961,0.814166,0.0234255,0.0670224,0.51424,0.186717,0.856139,0.10073,0.921962,0.694243,0.165453,0.914621,0.349753,0.918949,0.239938,0.409627,0.773627,0.138805,0.575838,0.366737,0.396098,0.272308,0.177682,0.669621,0.423557,0.919278,0.0495021,0.0439607,0.400665,0.612556,0.672987,0.601626,0.426721,0.696413,0.668648,0.940961,0.88313,0.524787,0.0416908,0.805092,0.21903,0.207143,0.719712,0.568784,0.126092,0.95965,0.978411,0.899719,0.0984551,0.554248,0.266457,0.494553,0.826556,0.444138,0.164174,0.250113,0.363416,0.213676,0.294074,0.764081,0.826232,0.967061,0.365707,0.252953,0.663474,0.0343556,0.193914,0.546604,0.559143,0.235605,0.351695,0.778173,0.442748,0.0714074,0.346957,0.56884,0.0310572,0.325368,0.468559,0.129512,0.879616,0.735016,0.624065,0.706172,0.179154,0.788239,0.956286,0.54257,0.0019149,0.250359,0.306652,0.828147,0.217421,0.672359,0.0810996,0.880894,0.706714,0.275013,0.427498,0.265857,0.510618,0.779193,0.0440302,0.953366,0.850601,0.390987,0.522206,0.881658,0.716355,0.990765,0.0111703,0.595971,0.725782,0.635235,0.302143,0.904936,0.423474,0.258429,0.447506,0.425389,0.508788,0.754158,0.253536,0.726209,0.426517,0.334635,0.607103,0.133231,0.609649,0.0346014,0.399088,0.120267,0.813795,0.443119,0.0736325,0.664396,0.834106,0.595838,0.546054,0.550461,0.586604,0.557224,0.146432,0.312385,0.192459,0.448575,0.217321,0.615933,0.707004,0.664828,0.0413219,0.215793,0.418985,0.294857,0.942002,0.845502,0.629493,0.549105,0.978733,0.239141,0.583706,0.377822,0.359408,0.397501,0.82094,0.43304,0.0618966,0.655046,0.0288789,0.60795,0.205507,0.615483,0.165174,0.351939,0.927868,0.357633,0.800514,0.145189,0.973566,0.507519,0.810017,0.0148878,0.723311,0.229002,0.309745,0.665313,0.0745044,0.939238,0.214418,0.0532378,0.178379,0.798124,0.431059,0.537787,0.195625,0.251999,0.970828,0.257522,0.907045,0.999706,0.865472,0.112552,0.615189,0.0306458,0.464491,0.543057,0.388279,0.265005,0.688246,0.361845,0.772524,0.498263,0.376732,0.495835,0.727265,0.686478,0.161148,0.80177,0.625716,0.375566,0.855008,0.804095,0.17369,0.286067,0.341882,0.369315,0.538067,0.312709,0.626837,0.445112,0.312416,0.492309,0.557664,0.927605,0.522955,0.0221554,0.470662,0.911233,0.287161,0.158908,0.273078,0.0596844,0.657172,0.64981,0.55552,0.384437,0.336288,0.716668,0.186207,0.962004,0.0922334,0.0412145,0.766098,0.265923,0.327281,0.10798,0.635239,0.865348,0.42069,0.262076,0.31046,0.733105,0.754384,0.868124,0.66071,0.277339,0.89028,0.131372,0.188573,0.17744,0.29028,0.461651,0.237125,0.947452,0.111461,0.792644,0.331889,0.447749,0.509312,0.518096,0.409753,0.601545,0.55931,0.175851,0.867469,0.886592,0.283832,0.502707,0.75194,0.704521,0.764783,0.0623998,0.437627,0.519167,0.930524,0.0983368,0.796506,0.820804,0.229709,0.985079,0.998244,0.519989,0.446729,0.235369,0.467441,0.55819,0.0280127,0.79933,0.00593948,0.537324,0.317426,0.415692,0.13887,0.876737,0.591543,0.00633821,0.763328,0.875375,0.509045,0.515268,0.579896,0.273828,0.577668,0.0175228,0.792995,0.508192,0.11586,0.589502,0.328996,0.345569,0.574581,0.327239,0.865558,0.02131,0.562608,0.332999,0.5795,0.590621,0.13233,0.58544,0.127945,0.449756,0.00113212,0.266815,0.326492,0.592676,0.273153,0.0898208,0.46805,0.782198,0.605089,0.0479466,0.0560265,0.182757,0.0654694,0.849022,0.690949,0.181329,0.438523,0.0199444,0.526898,0.013104,0.347184,0.392456,0.034414,0.909792,0.725455,0.613914,0.500412,0.857785,0.199354,0.628358,0.307541,0.200486,0.895172,0.634033,0.793162,0.168325,0.723854,0.261212,0.950524,0.328943,0.309159,0.00655026,0.5117,0.374628,0.855572,0.202649,0.555957,0.294096,0.222593,0.0828549,0.3072,0.569777,0.47531,0.341614,0.479568,0.200765,0.955528,0.979981,0.0585499,0.154882,0.608338,0.366091,0.355369,0.503511,0.000123613,0.148531,0.671836,0.723978,0.409743,0.62236,0.0529205,0.718902,0.62891,0.56462,0.0935305,0.484482,0.767269,0.649488,0.778578,0.989862,0.732343,0.0857771,0.559639,0.207653,0.427391,0.0392069,0.408418,0.382919,0.0191876,0.466968,0.537801,0.627526,0.833059,0.89317,0.131036,0.833182,0.0417004,0.802872,0.55716,0.451443,0.425232,0.61008,0.170346,0.054142,0.174701,0.263876,0.538624,0.94197,0.913364,0.317202,0.931831,0.645707,0.402979,0.49147,0.85336,0.830369,0.530677,0.261778,0.213288,0.549864,0.728746,0.751089,0.17739,0.561805,0.644258,0.308427,0.394988,0.685959,0.111299,0.952148,0.137402,0.536531,0.562228,0.307748,0.590673,0.736929,0.571624,0.129297,0.678898,0.484988,0.446499,0.61073,0.130695,0.849477,0.1022,0.984054,0.679847,0.632876,0.245833,0.893135,0.182741,0.974579,0.644224,0.360131,0.536384,0.288482,0.668558,0.931372,0.974441,0.779857,0.88352,0.111843,0.316388,0.445748,0.419591,0.907062,0.182676,0.991215,0.0363588,0.861575,0.476203,0.482858,0.472304,0.606898,0.332335,0.574504,0.590952,0.0121819,0.207381,0.836785,0.905317,0.390121,0.811364,0.54954,0.750253,0.347748,0.838022,0.418811,0.27912,0.812463,0.198668,0.162639,0.924306,0.515056,0.608387,0.343897,0.422118,0.791063,0.335112,0.458476,0.652638,0.811315,0.941334,0.124943,0.418213,0.273669,0.699447,0.00916559,0.285851,0.906827,0.84595,0.191168,0.296949,0.657314,0.740708,0.0472011,0.00506229,0.57873,0.466012,0.284182,0.391193,0.664679,0.446822,0.315499,0.179735,0.0552086,0.659396,0.601853,0.846272,0.994508,0.0603294,0.49891,0.805824,0.00166353,0.623853,0.224037,0.275333,0.323299,0.233203,0.561184,0.230127,0.079153,0.752352,0.527075,0.736467,0.49306,0.574276,0.74153,0.0717896,0.0402879,0.0257117,0.462983,0.704967,0.472533,0.778482,0.884703,0.527742,0.437878,0.486556,0.374014,0.432386,0.546885,0.872924,0.238209,0.548549,0.496777,0.462246,0.823881,0.820076,0.695449,0.385065,0.050203,0.774602,0.137417,0.577278,0.511069,0.630476,0.151554,0.252599,0.702266,0.191842,0.27831,0.165248,0.896809,0.750844,0.94373,0.781512,0.278586,0.381608,0.268067,0.6526,0.813993,0.814952,0.525524,0.0522028,0.363501,0.0223009,0.514449,0.187382,0.842377,0.209898,0.572447,0.89258,0.9845,0.709864,0.469858,0.495569,0.34034,0.621413,0.748168,0.0426063,0.813255,0.0264782,0.207855,0.710064,0.777322,0.151585,0.491576,0.0559076,0.533192,0.759644,0.708507,0.347186,0.574596,0.234031,0.399388,0.938097,0.256332,0.913838,0.12548,0.0987094,0.123736,0.697927,0.99129,0.108236,0.407791,0.461148,0.603805,0.748131,0.0825612,0.351973,0.790738,0.895816,0.378451,0.998592,0.605881,0.155773,0.150177,0.0974569,0.21168,0.683369,0.857101,0.920188,0.0305551,0.431697,0.154219,0.429944,0.369794,0.410551,0.343781,0.495274,0.50926,0.467517,0.193201,0.50055,0.575753,0.600992,0.961698,0.179558,0.349123,0.0442596,0.53153,0.139861,0.940076,0.909981,0.138453,0.545956,0.0657541,0.28863,0.643413,0.277434,0.972,0.500514,0.197622,0.00255495,0.932211,0.351841,0.432499,0.302005,0.762392,0.77628,0.797279,0.271652,0.243797,0.99048,0.772202,0.81955,0.591472,0.733901,0.999107,0.940595,0.77816,0.530638,0.0804557,0.718236,0.440619,0.218909,0.264192,0.506373,0.507539,0.907605,0.783808,0.479539,0.408119,0.98143,0.482094,0.34033,0.333271,0.914593,0.642336,0.0956623,0.690872,0.439615,0.367314,0.934669,0.430095,0.139516,0.754219,0.0215666,0.873417,0.753326,0.962162,0.651577,0.283964,0.0426173,0.369813,0.724583,0.261526,0.634005,0.230957,0.769066,0.541611,0.0147643,0.248605,0.94973,0.996194,0.730699,0.290061,0.329465,0.645292,0.932396,0.425127,0.336164,0.372011,0.792441,0.270834,0.802106,0.931958,0.0250525,0.823673,0.805375,0.778379,0.785834,0.456951,0.0623429,0.828452,0.826764,0.786926,0.089978,0.46077,0.0178828,0.859044,0.00238017,0.0326471,0.107648,0.95211,0.0288412,0.838347,0.242171,0.358306,0.483639,0.174567,0.783433,0.819803,0.546579,0.575874,0.0906369,0.348685,0.507832,0.115689,0.172358,0.313206,0.894068,0.958192,0.770158,0.956411,0.786644,0.596922,0.743337,0.876622,0.0576916,0.76122,0.735666,0.0600717,0.793867,0.843314,0.012182,0.822708,0.681662,0.254353,0.181014,0.165301,0.42892,0.964447,0.985104,0.975499,0.540321,0.0757413,0.324184,0.0481527,0.191431,0.496542,0.361359,0.085499,0.454734,0.131516,0.0419101,0.241378,0.728438,0.785248,0.118001,0.78613,0.546468,0.853667,0.846202,0.340335,0.696981,0.858384,0.163043,0.378643,0.112737,0.344058,0.543944,0.541657,0.308505,0.529048,0.517156,0.848826,0.604789,0.84134,0.896978,0.79622,0.337882,0.258337,0.881719,0.792616,0.389854,0.923629,0.0339943,0.118292,0.708877,0.151995,0.904422,0.255344,0.00566148,0.750624,0.595679,0.702642,0.609008,0.758723,0.081285,0.721744,0.10278,0.625229,0.263401,0.411285,0.154277,0.780557,0.260111,0.759066,0.621897,0.157089,0.555286,0.959778,0.415426,0.437005,0.752394,0.80528,0.360634,0.786388,0.923572,0.0695111,0.938383,0.827994,0.324855,0.944045,0.578618,0.920535,0.646687,0.187626,0.679257,0.727972,0.90937,0.782038,0.353201,0.172772,0.193323,0.507478,0.953329,0.453433,0.266544,0.575226,0.610522,0.82183,0.535004,0.0259485,0.258835,0.287398,0.831228,0.61947,0.0737863,0.754801,0.688981,0.0121696,0.582795,0.0138362,0.956214,0.161413,0.934371,0.602901,0.349039,0.613628,0.330874,0.258409,0.395666,0.684075,0.431181,0.588989,0.191552,0.38451,0.0424218,0.458096,0.959735,0.652944,0.279926,0.494739,0.678893,0.538761,0.782137,0.510121,0.158231,0.855923,0.264922,0.847212,0.868093,0.847716,0.861048,0.824307,0.00912935,0.795419,0.427209,0.358168,0.409047,0.758082,0.616577,0.804713,0.442157,0.0477583,0.393702,0.633709,0.432268,0.436124,0.091805,0.392003,0.0890679,0.371731,0.886742,0.76796,0.910492,0.668879,0.278081,0.0687233,0.524802,0.543003,0.915935,0.392895,0.390719,0.776983,0.217202,0.399849,0.572402,0.64441,0.758017,0.981449,0.402493,0.374594,0.786163,0.844649,0.422353,0.179865,0.478358,0.854621,0.615989,0.570163,0.246624,0.705057,0.941894,0.133366,0.473017,0.852386,0.802244,0.751099,0.92111,0.327046,0.294102,0.837045,0.719941,0.684821,0.614028,0.937143,0.0846696,0.18643,0.581553,0.842686,0.167879,0.984046,0.217281,0.954042,0.828695,0.639633,0.133907,0.307053,0.494254,0.749896,0.877217,0.740878,0.454953,0.819111,0.874243,0.92797,0.671497,0.676487,0.679068,0.592607,0.00353387,0.97317,0.429652,0.723475,0.657991,0.0436797,0.660618,0.74266,0.23011,0.242171,0.585347,0.397989,0.226217,0.802628,0.352031,0.0549124,0.442261,0.485938,0.361966,0.936515,0.235834,0.239182,0.677393,0.690787,0.0582932,0.551636,0.618757,0.72979,0.228123,0.297825,0.322397,0.231657,0.270995,0.752049,0.955132,0.928986,0.795729,0.61575,0.671646,0.0258385,0.857921,0.256993,0.423827,0.0841387,0.0596201,0.775858,0.139051,0.501881,0.261796,0.501017,0.438396,0.49763,0.740199,0.115789,0.188417,0.798492,0.667425,0.807174,0.528283,0.895548,0.104999,0.85068,0.127205,0.375993,0.602729,0.0823369,0.304979,0.398458,0.698087,0.976625,0.424297,0.556008,0.233617,0.848124,0.640147,0.293237,0.623983,0.779198,0.795119,0.885779,0.280215,0.233515,0.383409,0.0204142,0.349304,0.571827,0.818907,0.0167283,0.379,0.347189,0.912276,0.483999,0.19787,0.0394809,0.859993,0.800599,0.121818,0.164971,0.199058,0.819905,0.141596,0.623355,0.375913,0.375213,0.471479,0.0160602,0.668451,0.0954615,0.795258,0.463569,0.981241,0.0754733,0.697084,0.36465,0.0958875,0.046388,0.936477,0.914794,0.0631164,0.315477,0.261983,0.975392,0.799476,0.459853,0.0148734,0.659469,0.260452,0.136691,0.82444,0.45951,0.956596,0.966036,0.0828641,0.332509,0.34125,0.554343,0.348569,0.00970065,0.649805,0.143828,0.47327,0.631045,0.219301,0.170354,0.995695,0.315188,0.216742,0.932172,0.229982,0.279859,0.247649,0.491966,0.255251,0.0471252,0.951819,0.270125,0.706594,0.21227,0.406816,0.531034,0.67178,0.363412,0.49707,0.754644,0.695921,0.83832,0.308987,0.04449,0.848021,0.958792,0.188318,0.321291,0.589837,0.407618,0.491645,0.585532,0.722807,0.708388,0.517704,0.952789,0.988247,0.765353,0.444755,0.243498,0.812478,0.396573,0.513623,0.519072,0.608844,0.920438,0.0501059,0.280624,0.28385,0.547176,0.0352678,0.979771,0.385497,0.344255,0.0242608,0.233517,0.303046,0.212578,0.554808,0.892883,0.620197,0.0464538,0.478415,0.343004,0.754842,0.996118,0.295793,0.743088,0.761471,0.740547,0.986586,0.573949,0.137121,0.500209,0.0930207,0.745965,0.420647,0.143127,0.0265882,0.704497,0.690303,0.061856,0.684268,0.0757995,0.406111,0.708529,0.309317,0.709157,0.921107,0.864125,0.60204,0.541304,0.910579,0.080455,0.884308,0.665421,0.0765735,0.180101,0.408509,0.838045,0.920648,0.395096,0.411994,0.0577688,0.895305,0.505014,0.803733,0.315952,0.648141,0.830322,0.0204494,0.338444,0.892178,0.704718,0.414243,0.298288,0.413247,0.72356,0.0074454,0.334354,0.587686,0.609486,0.875658,0.498265,0.689941,0.759966,0.163686,0.766514,0.940067,0.572195,0.604559,0.860715,0.96729,0.0165521,0.918484,0.862595,0.521566,0.722217,0.178547,0.169707,0.552539,0.198996,0.508151,0.444716,0.903714,0.922394,0.743004,0.316961,0.645954,0.75045,0.651315,0.23364,0.359935,0.526973,0.731905,0.0498758,0.286939,0.89559,0.81639,0.227006,0.467785,0.420948,0.087721,0.435076,0.4375,0.00620466,0.297671,0.959067,0.728422,0.476217,0.128774,0.28096,0.675214,0.636925,0.725676,0.578927,0.559319,0.46868,0.895888,0.205274,0.21913,0.547203,0.438914,0.579065,0.074176,0.170818,0.628941,0.361115,0.0664084,0.445331,0.588122,0.534194,0.866279,0.675843,0.969269,0.30378,0.682047,0.26694,0.262847,0.410469,0.743157,0.391621,0.691429,0.418371,0.0285459,0.417105,0.997298,0.587865,0.885786,0.893186,0.793139,0.104916,0.440389,0.232052,0.683981,0.514565,0.402871,0.312923,0.87568,0.469279,0.758254,0.463802,0.00347251,0.624533,0.139644,0.972742,0.928313,0.821692,0.239681,0.19116,0.23216,0.982838,0.582781,0.923589,0.401209,0.611326,0.340694,0.398507,0.199192,0.22648,0.291693,0.99233,0.331396,0.732082,0.224383,0.0153771,0.246647,0.627253,0.3283,0.122327,0.0965324,0.0865533,0.586129,0.100005,0.711086,0.725773,0.0727465,0.639399,0.547464,0.312428,0.830559,0.779625,0.295266,0.41334,0.703214,0.696474,0.0246663,0.0439083,0.0949808,0.223858,0.270388,0.386674,0.216188,0.601784,0.118755,0.440571,0.617161,0.365402,0.0678245,0.945461,0.487728,0.164357,0.0320142,0.073857,0.264362,0.743101,0.79963,0.337108,0.3825,0.347094,0.649536,0.213059,0.126719,0.944801,0.626399,0.829933,0.641276,0.651066,0.873841,0.736256,0.874924,0.144229,0.12293,0.0911119,0.746013,0.241685,0.531683,0.363175,0.607087,0.599508,0.308636,0.0948153,0.763865,0.34065,0.168672,0.0282263,0.0837504,0.968302,0.365335,0.466251,0.315396,0.0148705,0.67931,0.442115,0.959672,0.305709,0.272047,0.600948,0.956775,0.145888,0.337204,0.831698,0.290117,0.460134,0.92281,0.0361308,0.701819,0.454493,0.399305,0.308906,0.0540006,0.707941,0.403721,0.817865,0.0485907,0.572393,0.846091,0.132341,0.540695,0.211426,0.598592,0.856091,0.226297,0.277902,0.298206,0.185969,0.583611,0.570254,0.786916,0.540385,0.716142,0.12412,0.372083,0.00625944,0.584254,0.294893,0.0423903,0.286073,0.749386,0.441696,0.594979,0.803387,0.149637,0.9987,0.621252,0.198227,0.571093,0.467343,0.330569,0.111788,0.67877,0.92916,0.96788,0.905066,0.207062,0.266086,0.0910346,0.790672,0.83634,0.877951,0.331058,0.552482,0.0020707,0.703141,0.558741,0.586325,0.998034,0.601131,0.872398,0.74742,0.0428271,0.467376,0.550807,0.192464,0.466076,0.172059,0.390691,0.0371691,0.639403,0.72126,0.148957,0.318172,0.65042,0.116837,0.223238,0.857482,0.382923,0.314273,0.648154,0.219263,0.192224,0.979212,0.771745,0.194294,0.682353,0.330486,0.780619,0.680387,0.931617,0.653016,0.427807,0.974445,0.120393,0.978615,0.166908,0.586469,0.150674,0.5576,0.623638,0.790077,0.278859,0.772595,0.108249,0.92928,0.889433,0.331487,0.786762,0.272356,0.64576,0.434916,0.491619,0.837984,0.414128,0.263364,0.0322781,0.0964804,0.59385,0.812897,0.776867,0.525467,0.465914,0.204675,0.499912,0.586306,0.183289,0.66682,0.172775,0.333963,0.22442,0.796413,0.12404,0.503279,0.569008,0.232288,0.432559,0.458441,0.563776,0.21932,0.730797,0.209536,0.654236,0.222416,0.0475196,0.0683639,0.48578,0.0797977,0.164844,0.0796296,0.892695,0.941712,0.605097,0.358608,0.146386,0.105009,0.944914,0.329675,0.771829,0.117689,0.663638,0.996249,0.914102,0.787678,0.499528,0.48311,0.0199663,0.932087,0.941551,0.583742,0.151408,0.672347,0.793278,0.805644,0.894763,0.840797,0.874008,0.380543,0.920595,0.0388522,0.460172,0.81329,0.980564,0.0652694,0.171898,0.12695,0.170278,0.116812,0.456625,0.942107,0.234502,0.120263,0.938356,0.148604,0.907941,0.437885,0.631714,0.927907,0.369972,0.573265,0.511649,0.521379,0.245612,0.304927,0.327023,0.140375,0.145725,0.201031,0.520918,0.0663198,0.239883,0.981091,0.87961,0.220447,0.04636,0.0515076,0.347397,0.216638,0.16832,0.804022,0.158746,0.402822,0.924285,0.0971019,0.551426,0.832226,0.534986,0.18314,0.760134,0.904958,0.756404,0.271783,0.426338,0.0020167,0.57671,0.753361,0.142392,0.722435,0.954392,0.66331,0.788755,0.194276,0.644401,0.668365,0.414723,0.690761,0.719872,0.76212,0.907399,0.888192,0.566141,0.066145,0.291014,0.490427,0.163247,0.842439,0.322653,0.698233,0.025579,0.0827864,0.603191,0.781983,0.35457,0.0295292,0.784,0.93128,0.78289,0.926392,0.653715,0.737282,0.589703,0.44247,0.931558,0.234104,0.110835,0.346281,0.924865,0.830707,0.1084,0.832264,0.718899,0.674542,0.898409,0.00991283,0.164968,0.061656,0.852352,0.487621,0.759889,0.877931,0.570408,0.363081,0.659915,0.924977,0.39261,0.443915,0.856257,0.1755,0.370307,0.509972,0.912783,0.96001,0.952442,0.844341,0.194113,0.0632766,0.190622,0.118978,0.893983,0.299022,0.951242,0.612882,0.973564,0.849651,0.622795,0.138532,0.911307,0.475147,0.626153,0.671197,0.353079,0.196561,0.0342774,0.0129932,0.121538,0.426887,0.456908,0.977795,0.602388,0.827215,0.487767,0.51517,0.787224,0.440209,0.359511,0.981337,0.503486,0.550133,0.100315,0.397469,0.849154,0.0515577,0.0103512,0.822718,0.901209,0.633146,0.96125,0.812516,0.108294,0.587403,0.483713,0.461372,0.783964,0.517991,0.474366,0.905502,0.944878,0.931274,0.883296,0.547265,0.758488,0.371063,0.0624358,0.545712,0.811272,0.421947,0.52705,0.314758,0.972079,0.627365,0.712227,0.821234,0.678923,0.722578,0.643952,0.580132,0.355724,0.605202,0.392649,0.464018,0.192605,0.876362,0.925391,0.976569,0.394352,0.399756,0.882071,0.33923,0.33103,0.765367,0.886496,0.089518,0.136431,0.948931,0.63523,0.947703,0.370878,0.16228,0.262461,0.342958,0.789646,0.974687,0.164192,0.468569,0.697265,0.808144,0.048701,0.0529893,0.413346,0.44135,0.517007,0.605951,0.317711,0.442398,0.58252,0.712064,0.842154,0.464591,0.0512939,0.173184,0.229958,0.93779,0.262702,0.366389,0.886721,0.897932,0.314092,0.257599,0.0602127,0.576553,0.600557,0.849858,0.55124,0.764749,0.318427,0.248505,0.572893,0.367128,0.301494,0.986238,0.808478,0.818501,0.592189,0.126189,0.260899,0.17471,0.838253,0.103054,0.639301,0.889547,0.276238,0.869259,0.827337,0.538939,0.235648,0.714058,0.436872,0.54974,0.971657,0.497085,0.126293,0.572214,0.346943,0.677532,0.336963,0.66537,0.926037,0.909856,0.0324984,0.227532,0.896095,0.840976,0.046033,0.488284,0.967166,0.306932,0.662993,0.805419,0.409986,0.302294,0.694966,0.686224,0.171553,0.522303,0.225163,0.407201,0.23636,0.662035,0.956941,0.208018,0.159119,0.0832335,0.780232,0.506063,0.760766,0.117195,0.171433,0.686803,0.0270515,0.203931,0.914335,0.923146,0.0449075,0.960368,0.41143,0.0120732,0.2673,0.0744233,0.817492,0.677286,0.376717,0.512458,0.36351,0.54827,0.0347606,0.588673,0.955471,0.271121,0.250708,0.912412,0.479139,0.409827,0.995645,0.259371,0.91589,0.756411,0.376566,0.0873225,0.443215,0.403618,0.291254,0.357549,0.326764,0.336161,0.317917,0.738193,0.348234,0.585218,0.812617,0.165726,0.262504,0.189334,0.678184,0.626014,0.737604,0.712945,0.214687,0.693075,0.984066,0.465394,0.605487,0.463205,0.875222,0.601132,0.722575,0.791111,0.357544,0.0991414,0.878434,0.800758,0.502759,0.169687,0.158308,0.829522,0.505848,0.476225,0.567716,0.854083,0.0614428,0.380333,0.0198091,0.323947,0.569667,0.697993,0.949961,0.307271,0.410938,0.164647,0.000345915,0.395004,0.630041,0.605833,0.858209,0.505263,0.206965,0.580784,0.296374,0.564509,0.679926,0.174808,0.365267,0.182685,0.344495,0.523575,0.0122074,0.850344,0.9998,0.579923,0.704427,0.0612431,0.960256,0.724236,0.38519,0.529922,0.422229,0.33515,0.837193,0.833168,0.499797,0.837539,0.228172,0.129839,0.443372,0.0863814,0.635102,0.650337,0.667166,0.931476,0.214846,0.347092,0.106284,0.580114,0.529777,0.45078,0.103689,0.541984,0.301124,0.103489,0.121907,0.00555064,0.164732,0.0821632,0.729787,0.549922,0.612086,0.152016,0.885072,0.449279,0.985184,0.38487,0.286817,0.213356,0.514709,0.730189,0.299737,0.149811,0.380527,0.966903,0.081287,0.595373,0.313995,0.187571,0.175487,0.843771,0.638351,0.279175,0.385756,0.939475,0.382664,0.507663,0.945026,0.547396,0.589826,0.674812,0.0973179,0.201912,0.826828,0.98239,0.65119,0.812012,0.36726,0.938008,0.0253678,0.881968,0.668197,0.325105,0.031779,0.0487238,0.292008,0.113066,0.644097,0.606003,0.300638,0.819583,0.449774,0.938989,0.0987585,0.83553,0.878464,0.481423,0.343193,0.82349,0.0288187,0.933019,0.498303,0.126137,0.134931,0.325131,0.108527,0.786121,0.137143,0.475786,0.724128,0.162511,0.357755,0.392325,0.487616,0.389534,0.441049,0.779624,0.5026,0.0851459,0.385627,0.803237,0.904729,0.835401,0.742226,0.00348772,0.670931,0.620691,0.48491,0.0141235,0.444181,0.513729,0.947142,0.942483,0.639866,0.082073,0.267614,0.748392,0.868194,0.404757,0.224179,0.592322,0.567268,0.581933,0.984647,0.0548834,0.971467,0.425697,0.834507,0.474067,0.510842,0.220134,0.277304,0.415572,0.0555345,0.0195307,0.419059,0.726465,0.640221,0.90397,0.740589,0.0844019,0.417699,0.687731,0.0268852,0.0575643,0.769804,0.294499,0.805957,0.637998,0.699257,0.0301351,0.23032,0.266524,0.612068,0.214967,0.321408,0.583536,0.640664,0.155915,0.0576026,0.151506,0.376048,0.334907,0.567078,0.431583,0.354438,0.986137,0.158048,0.994659,0.890107,0.898637,0.0790609,0.307806,0.586368,0.105946,0.36537,0.356172,0.400446,0.171327,0.99417,0.099702,0.201462,0.22449,0.366226,0.81353,0.439457,0.687634,0.397066,0.0801205,0.843549,0.454669,0.231627,0.219597,0.789576,0.798705,0.65118,0.144013,0.784842,0.809228,0.138672,0.674949,0.707864,0.217733,0.982755,0.294232,0.323679,0.348125,0.650404,0.724125,0.519451,0.644574,0.823827,0.720913,0.869064,0.190053,0.534443,0.308521,0.877687,0.931509,0.388641,0.721236,0.386178,0.620268,0.940832,0.175753,0.418973,0.592012,0.319767,0.203814,0.40124,0.458439,0.878763,0.109104,0.676172,0.861518,0.403337,0.999851,0.209642,0.0537413,0.723976,0.729094,0.698316,0.547803,0.450007,0.567379,0.737856,0.98445,0.8759,0.615543,0.915959,0.264541,0.336778,0.302137,0.884809,0.277611,0.47789,0.303782,0.869623,0.797657,0.507596,0.270863,0.256096,0.386359,0.379967,0.932268,0.247877,0.783304,0.932119,0.45752,0.837045,0.656095,0.186613,0.535361,0.203897,0.63662,0.10274,0.941753,0.62107,0.97864,0.557296,0.537029,0.243182,0.894074,0.839166,0.127991,0.171684,0.317056,0.431773,0.0413069,0.114713,0.939369,0.31217,0.370808,0.325728,0.692137,0.303076,0.573605,0.475441,0.235195,0.0311247,0.312486,0.89129,0.217738,0.847847,0.0951871,0.854358,0.950587,0.0369401,0.475427,0.929227,0.594236,0.0124561,0.172408,0.488309,0.851622,0.300399,0.659994,0.168678,0.732172,0.701301,0.28339,0.67154,0.0134702,0.654198,0.997269,0.705607,0.957274,0.570874,0.181048,0.192469,0.601998,0.493533,0.0837589,0.819736,0.34138,0.178946,0.674094,0.291966,0.215886,0.149521,0.221193,0.810122,0.161977,0.393602,0.298431,0.0135991,0.694001,0.958425,0.182277,0.426173,0.659726,0.465667,0.0977134,0.673196,0.119865,0.094982,0.378803,0.0771399,0.665856,0.55985,0.269609,0.267854,0.0533837,0.353368,0.0875906,0.394764,0.532314,0.761684,0.68673,0.7482,0.911206,0.907924,0.558322,0.0731831,0.301525,0.856753,0.0867822,0.995526,0.815179,0.269059,0.421699,0.474904,0.734726,0.519413,0.1481,0.854591,0.614395,0.526903,0.931731,0.280251,0.0867534,0.20134,0.548105,0.140137,0.554708,0.635695,0.534901,0.0870225,0.39738,0.221631,0.835223,0.308586,0.129554,0.393545,0.381769,0.43108,0.250298,0.468551,0.426606,0.0654767,0.73761,0.848305,0.540381,0.472336,0.367718,0.688481,0.326927,0.982113,0.215384,0.258658,0.262363,0.302137,0.459999,0.810468,0.442275,0.014707,0.446164,0.977175,0.101729,0.843543,0.198806,0.936952,0.152129,0.32836,0.330497,0.533898,0.75944,0.580795,0.00244865,0.186046,0.646272,0.740059,0.0343515,0.186653,0.212394,0.40207,0.875134,0.539321,0.384182,0.0905178,0.79798,0.646546,0.392655,0.257978,0.457014,0.83493,0.272685,0.903178,0.812105,0.374415,0.746721,0.0109109,0.311367,0.89885,0.339271,0.641864,0.432748,0.0987112,0.222659,0.435197,0.284757,0.868931,0.175255,0.319109,0.0555835,0.387649,0.721178,0.930717,0.926971,0.105361,0.021235,0.724951,0.751907,0.41389,0.982929,0.208921,0.24882,0.255614,0.112099,0.0609247,0.630029,0.85882,0.0718355,0.941396,0.75767,0.411107,0.58326,0.190418,0.509818,0.805918,0.625615,0.794575,0.674849,0.80087,0.113684,0.730433,0.188519,0.834862,0.66115,0.11549,0.940223,0.682385,0.840441,0.69213,0.096275,0.823369,0.901051,0.345095,0.0789835,0.0131494,0.406019,0.709012,0.871969,0.477855,0.650408,0.629639,0.888962,0.233668,0.820057,0.39878,0.039586,0.445672,0.193355,0.714435,0.246541,0.307039,0.444868,0.43506,0.141902,0.106018,0.55055,0.0821249,0.788402,0.390991,0.774255,0.884677,0.21436,0.675306,0.229772,0.293344,0.688455,0.635792,0.00235618,0.560424,0.113647,0.652764,0.190063,0.00260859,0.886432,0.0101205,0.401388,0.926018,0.455792,0.594743,0.640453,0.702333,0.901783,0.0853207,0.137394,0.0436843,0.191338,0.687944,0.125809,0.979741,0.0789349,0.900064,0.864418,0.293295,0.57537,0.0941906,0.586639,0.263825,0.729982,0.588995,0.824249,0.843629,0.24176,0.0143128,0.846238,0.128191,0.0244333,0.247626,0.0542092,0.480225,0.84237,0.694662,0.182559,0.744152,0.779983,0.319952,0.787837,0.971321,0.00789615,0.913646,0.951062,0.0868311,0.81371,0.81548,0.380126,0.38908,0.909671,0.966765,0.652905,0.639653,0.555761,0.477154,0.483283,0.79752,0.491467,0.32952,0.925712,0.5159,0.577147,0.979921,0.996126,0.419516,0.674583,0.178684,0.163669,0.454566,0.498636,0.951505,0.425887,0.506533,0.865151,0.37695,0.593364,0.678861,0.19243,0.97349,0.0679407,0.102101,0.940255,0.720846,0.741755,0.496016,0.198,0.225037,0.293536,0.689467,0.554558,0.219248,0.205367,0.131704,0.199169,0.201493,0.55122,0.873752,0.380177,0.714889,0.328318,0.878813,0.666394,0.754206,0.385346,0.531545,0.131155,0.978709,0.210406,0.323585,0.952199,0.278347,0.425686,0.892455,0.999192,0.167441,0.388471,0.197192,0.392478,0.682007,0.886659,0.947035,0.901255,0.092026,0.0787394,0.100424,0.293519,0.62996,0.974176,0.673696,0.344849,0.302494,0.552509,0.0112426,0.0567,0.937855,0.542788,0.187855,0.916564,0.753194,0.51144,0.868764,0.0315406,0.937126,0.761219,0.030733,0.104567,0.14969,0.227925,0.497045,0.831697,0.114584,0.44408,0.732952,0.20661,0.522819,0.833377,0.500129,0.152779,0.807553,0.173824,0.497628,0.110048,0.726334,0.50887,0.166748,0.664188,0.0516579,0.354603,0.580753,0.804852,0.866043,0.449517,0.836392,0.803169,0.210735,0.867125,0.907736,0.360425,0.0950505,0.40478,0.192122,0.209634,0.84886,0.925074,0.416244,0.37168,0.758451,0.916373,0.524459,0.566004,0.0901976,0.0220862,0.676052,0.816531,0.530956,0.842799,0.48072,0.582614,0.197402,0.0614724,0.387466,0.0634448,0.510989,0.223858,0.866614,0.721724,0.0909837,0.77435,0.0821493,0.186034,0.17913,0.274271,0.395669,0.0279906,0.199345,0.811913,0.39967,0.957796,0.728286,0.924129,0.5238,0.818484,0.946215,0.199852,0.635015,0.477172,0.0426515,0.115735,0.0597859,0.240054,0.177207,0.447252,0.303498,0.688196,0.67111,0.170112,0.409921,0.762094,0.944462,0.49207,0.948128,0.123592,0.766341,0.343797,0.151583,0.965686,0.15571,0.551253,0.923483,0.883996,0.475382,0.447283,0.70248,0.421597,0.647135,0.337495,0.898769,0.689787,0.453229,0.958555,0.92984,0.630436,0.405807,0.233339,0.318632,0.0769168,0.403451,0.728553,0.839011,0.347913,0.220623,0.787139,0.471505,0.986964,0.130936,0.623088,0.95265,0.286645,0.174341,0.876133,0.170641,0.649723,0.323416,0.873121,0.0713201,0.970551,0.210616,0.970089,0.660338,0.663845,0.928644,0.590178,0.294282,0.33445,0.823517,0.612914,0.411367,0.226968,0.341467,0.250378,0.574881,0.56209,0.0375168,0.0463855,0.549054,0.168452,0.669473,0.501704,0.455098,0.843814,0.377837,0.625739,0.493537,0.701253,0.498861,0.564857,0.671804,0.709477,0.534946,0.332142,0.373322,0.46359,0.92232,0.667604,0.79804,0.745837,0.280518,0.209407,0.972805,0.621985,0.459785,0.547685,0.184075,0.497302,0.594071,0.733129,0.665755,0.263544,0.234832,0.120853,0.107358,0.612669,0.746592,0.600894,0.313922,0.245453,0.165751,0.985725,0.95493,0.700697,0.317867,0.328252,0.164287,0.240187,0.995856,0.962327,0.986024,0.276374,0.171734,0.958829,0.89836,0.63152,0.506514,0.0824347,0.128822,0.100585,0.815563,0.794577,0.364129,0.0503957,0.915429,0.471487,0.663065,0.662022,0.0723809,0.976986,0.907474,0.238132,0.962712,0.862404,0.938829,0.280579,0.190656,0.103116,0.520766,0.186512,0.0654431,0.506791,0.462887,0.237177,0.46562,0.361246,0.868697,0.972134,0.443681,0.997519,0.0727189,0.259244,0.792096,0.436848,0.30964,0.707526,0.908334,0.972705,0.369547,0.980715,0.949691,0.277021,0.218847,0.912403,0.139426,0.157676,0.192982,0.330082,0.260792,0.713748,0.516594,0.326235,0.220539,0.979481,0.563413,0.686159,0.340728,0.43211,0.658293,0.784409,0.42963,0.731011,0.043653,0.221726,0.167859,0.353293,0.929251,0.0761934,0.325998,0.298798,0.0569085,0.275689,0.57582,0.275756,0.188092,0.715245,0.433432,0.381074,0.0453273,0.694224,0.094822,0.561922,0.0204599,0.315361,0.541403,0.583873,0.00151954,0.88213,0.0159832,0.659812,0.666539,0.445613,0.390824,0.710192,0.667339,0.558683,0.0634851,0.59659,0.634876,0.389483,0.895388,0.691785,0.665172,0.471208,0.96754,0.853264,0.186454,0.400972,0.234338,0.231781,0.0951967,0.32916,0.793702,0.115657,0.644521,0.335105,0.699529,0.64604,0.217235,0.715513,0.305852,0.883774,0.161125,0.696676,0.593966,0.828464,0.255359,0.657451,0.425054,0.890235,0.0469343,0.320442,0.582019,0.712106,0.791651,0.54956,0.56537,0.978104,0.950532,0.799708,0.209885,0.0457285,0.128868,0.00358762,0.161385,0.773388,0.338693,0.860915,0.419428,0.555928,0.576427,0.725281,0.439702,0.737553,0.421957,0.0336687,0.566017,0.677315,0.69112,0.991071,0.56755,0.738054,0.311513,0.149569,0.450161,0.103164,0.699129,0.0155311,0.0812684,0.649661,0.815239,0.291154,0.695389,0.944107,0.294741,0.856774,0.717495,0.633434,0.717689,0.136924,0.189362,0.294116,0.862204,0.629065,0.0316687,0.284161,0.662733,0.597686,0.961476,0.353853,0.588756,0.529026,0.0919075,0.90027,0.678595,0.542068,0.00343385,0.377724,0.557599,0.0847023,0.0273853,0.372838,0.375856,0.722775,0.316945,0.670597,0.579549,0.0344403,0.304031,0.297238,0.171364,0.493393,0.591354,0.0335682,0.122458,0.623023,0.317729,0.785191,0.220708,0.279205,0.139044,0.809465,0.808231,0.230952,0.709734,0.486827,0.77302,0.713168,0.864551,0.330619,0.797871,0.891936,0.703457,0.173726,0.614711,0.0204026,0.844324,0.19426,0.0548429,0.148355,0.491498,0.226207,0.641748,0.0828519,0.259775,0.764206,0.705875,0.577504,0.549397,0.926583,0.856709,0.688441,0.736048,0.66494,0.919393,0.445782,0.151767,0.692412,0.15895,0.0163178,0.0230314,0.956821,0.908254,0.726489,0.130547,0.522965,0.746891,0.974871,0.717225,0.801734,0.123225,0.208723,0.0279411,0.764973,0.291575,0.287716,0.529179,0.997449,0.86522,0.0785759,0.924032,0.721929,0.767017,0.66008,0.386869,0.686409,0.105862,0.538636,0.378822,0.264812,0.554954,0.401853,0.221633,0.463208,0.128342,0.35218,0.986173,0.875233,0.327051,0.703398,0.676968,0.450276,0.912121,0.704909,0.21525,0.203696,0.992625,0.744429,0.201145,0.857845,0.823005,0.125177,0.579774,0.590021,0.785257,0.966643,0.276431,0.891119,0.505279,0.655253,0.155931,0.0602333,0.0571057,0.377564,0.523441,0.185448,0.729744,0.509615,0.0606811,0.0567944,0.213013,0.737649,0.507071,0.125133,0.442558,0.72232,0.328829,0.435183,0.466749,0.529974,0.293028,0.289753,0.655151,0.872802,0.879775,0.440408,0.839445,0.156206,0.331527,0.344724,0.811458,0.487458,0.404958,0.868564,0.865022,0.928399,0.0540115,0.594766,0.438014,0.114693,0.65156,0.651026,0.852341,0.158631,0.77616,0.294899,0.880951,0.104989,0.730082,0.3477,0.634962,0.0231096,0.637453,0.290114,0.895911,0.517228,0.730522,0.735356,0.673434,0.0620491,0.0800805,0.484892,0.549507,0.485038,0.353456,0.41453,0.413437,0.407467,0.00929553,0.851451,0.52216,0.660856,0.502477,0.374501,0.819487,0.278636,0.6694,0.700438,0.383625,0.399482,0.0481375,0.0185875,0.422592,0.685591,0.308701,0.318503,0.202818,0.0392229,0.0538593,0.876252,0.101272,0.13394,0.361144,0.650779,0.618978,0.714599,0.0653089,0.032415,0.122066,0.0746045,0.883866,0.644226,0.73546,0.386343,0.0187268,0.554947,0.664979,0.688127,0.255385,0.0486042,0.0876093,0.303523,0.0671917,0.510201,0.989113,0.375893,0.828704,0.191932,0.415116,0.882564,0.0681837,0.516388,0.0165033,0.429327,0.167167,0.635481,0.143927,0.232476,0.667896,0.265993,0.30708,0.551762,0.910219,0.0425408,0.938105,0.928945,0.597488,0.603084,0.617073,0.852873,0.651688,0.704682,0.156396,0.71888,0.214883,0.145509,0.0947725,0.0435873,0.337441,0.509888,0.926151,0.405625,0.0262759,0.942654,0.834952,0.193443,0.578135,0.978879,0.425919,0.246031,0.244871,0.732999,0.797793,0.15509,0.77554,0.735898,0.0840354,0.373028,0.338982,0.701108,0.225901,0.99067,0.40579,0.382297,0.70955,0.620673,0.527806,0.804322,0.66426,0.865247,0.31421,0.590411,0.270872,0.340486,0.533065,0.105824,0.533929,0.1112,0.0847021,0.959848,0.357232,0.329573,0.692847,0.155025,0.484663,0.468387,0.890923,0.568699,0.841416,0.229905,0.269807,0.067317,0.220575,0.675596,0.449614,0.930124,0.296269,0.977421,0.734446,0.960529,0.842668,0.0486565,0.55094,0.113539,0.389143,0.084005,0.219363,0.923072,0.195205,0.304065,0.882919,0.552437,0.633639,0.575767,0.707462,0.118302,0.0441538,0.598385,0.687001,0.885569,0.82829,0.956807,0.952886,0.0488648,0.632404,0.402501,0.978989,0.928673,0.379921,0.713435,0.889202,0.222589,0.762092,0.440142,0.336128,0.151234,0.524147,0.555491,0.074306,0.719352,0.859556,0.957225,0.271789,0.493195,0.532992,0.979251,0.611497,0.577146,0.577636,0.298497,0.462715,0.405926,0.255305,0.415601,0.454791,0.887709,0.818102,0.43378,0.816382,0.198023,0.147216,0.705584,0.420612,0.909307,0.145726,0.75674,0.0605419,0.669873,0.312231,0.134848,0.389226,0.171787,0.0920733,0.661015,0.664982,0.625065,0.640266,0.276478,0.202211,0.217903,0.574976,0.664926,0.623829,0.830281,0.0805268,0.0786204,0.717989,0.898628,0.512401,0.534371,0.0966512,0.659616,0.239955,0.517263,0.568924,0.385681,0.274002,0.629466,0.0555537,0.586233,0.764314,0.444779,0.75802,0.856387,0.105794,0.423002,0.481452,0.74606,0.69948,0.683663,0.963963,0.274456,0.348589,0.587792,0.104737,0.429116,0.666413,0.822727,0.327744,0.178813,0.357098,0.424395,0.83843,0.597052,0.941658,0.407354,0.982733,0.21566,0.0368193,0.0382867,0.801894,0.801133,0.483066,0.559914,0.65752,0.58886,0.982916,0.138972,0.334921,0.682396,0.822635,0.298884,0.956853,0.171224,0.886676,0.06159,0.600339,0.553089,0.884317,0.928083,0.731902,0.241414,0.352479,0.570332,0.838467,0.294137,0.977685,0.8212,0.509797,0.0145045,0.859487,0.311691,0.815637,0.342553,0.871604,0.473157,0.931413,0.85452,0.61213,0.266333,0.536917,0.434765,0.565217,0.493769,0.605989,0.451893,0.555359,0.206328,0.00498151,0.439676,0.134411,0.736883,0.68109,0.48689,0.307215,0.519557,0.781027,0.2849,0.340757,0.290824,0.299405,0.200244,0.602514,0.115042,0.542797,0.474119,0.5882,0.47421,0.328639,0.200329,0.740543,0.865555,0.635094,0.30576,0.359325,0.241083,0.757653,0.914684,0.447411,0.762635,0.35436,0.581823,0.499518,0.0354503,0.0687127,0.806733,0.555008,0.849739,0.0916334,0.895765,0.140563,0.391038,0.0960092,0.743077,0.50608,0.638806,0.217195,0.0942798,0.113016,0.545834,0.294609,0.853559,0.411389,0.929703,0.159319,0.770714,0.170786,0.916972,0.685398,0.618198,0.679607,0.0397578,0.20002,0.179125,0.0752081,0.268733,0.985858,0.630216,0.118472,0.0774916,0.525981,0.259035,0.46853,0.62199,0.00211188,0.97461,0.260796,0.219307,0.0688895,0.373812,0.765141,0.363499,0.227371,0.176531,0.293202,0.38669,0.947245,0.463989,0.303662,0.632643,0.0821861,0.983269,0.672401,0.282206,0.162394,0.747609,0.550939,0.148252,0.377824,0.669411,0.225744,0.903805,0.928446,0.694273,0.525795,0.930558,0.668883,0.786591,0.149866,0.737773,0.160403,0.915007,0.101271,0.387774,0.0915378,0.394473,0.774463,0.0387827,0.858462,0.0781255,0.671425,0.940648,0.0613945,0.343826,0.222854,0.223789,0.0914347,0.773794,0.372041,0.469259,0.443205,0.597785,0.373064,0.371651,0.292058,0.89886,0.30221,0.960941,0.685451,0.452075,0.698714,0.845854,0.367082,0.799985,0.233628,0.45862,0.194459,0.00809104,0.497402,0.0529207,0.0862166,0.168828,0.993569,0.147611,0.512654,0.216423,0.3714,0.604089,0.990217,0.743441,0.0733479,0.433422,0.341225,0.446412,0.805073,0.633283,0.345272,0.107283,0.594225,0.0307228,0.559358,0.292938,0.876577,0.92644,0.0929237,0.110204,0.38506,0.287382,0.118295,0.882462,0.340303,0.204512,0.05129,0.333872,0.352123,0.563944,0.550295,0.723523,0.168033,0.540512,0.466963,0.241381,0.973933,0.808188,0.687793,0.779006,0.441471,0.0330647,0.886289,0.035696,0.0637875,0.445646,0.328634,0.940364,0.372086,0.421558,0.0505684,0.757146,0.70894,0.168864,0.639608,0.0492435,0.373375,0.690898,0.383115,0.725498,0.254842,0.93341,0.449021,0.422875,0.473922,0.915984,0.664255,0.447855,0.724172,0.352048,0.226861,0.165644,0.385113,0.11315,0.20134,0.4489,0.558796,0.529974,0.389264,0.930882,0.951532,0.439833,0.688028,0.660473,0.608696,0.327635,0.709716,0.982072,0.0185332,0.0928314,0.70757,0.273375,0.0262417,0.156591,0.696249,0.500164,0.072575,0.360505,0.948019,0.796747,0.712553,0.17488,0.962391,0.0976652,0.288029,0.16373,0.546565,0.846825,0.693704,0.93583,0.777707,0.645236,0.375662,0.465735,0.305709,0.984358,0.793371,0.0154251,0.96643,0.811904,0.108256,0.674,0.0852787,0.134498,0.830591,0.781528,0.634662,0.903166,0.142033,0.58268,0.699914,0.854585,0.75756,0.662304,0.952251,0.0455892,0.826035,0.498816,0.892415,0.519739,0.434646,0.670122,0.164976,0.810308,0.135857,0.470685,0.794666,0.929228,0.48611,0.761097,0.741131,0.594366,0.435097,0.82641,0.728865,0.265688,0.607938,0.363526,0.168855,0.749971,0.946207,0.868768,0.604556,0.703766,0.531073,0.556807,0.749356,0.357108,0.0556229,0.64177,0.876847,0.490268,0.311892,0.0418226,0.300576,0.447749,0.512507,0.0952425,0.376977,0.998617,0.856339,0.118108,0.592984,0.291436,0.944518,0.321848,0.557124,0.552456,0.685375,0.725979,0.302427,0.631581,0.594747,0.906984,0.335348,0.12582,0.463791,0.0847033,0.482927,0.519414,0.726474,0.359774,0.00968201,0.0383656,0.401597,0.310258,0.486115,0.914104,0.405501,0.863091,0.912722,0.26184,0.981199,0.505705,0.553276,0.925717,0.827554,0.1104,0.478173,0.512928,0.836379,0.7806,0.144509,0.431126,0.687584,0.479857,0.556946,0.151374,0.56456,0.0398732,0.670788,0.291034,0.399648,0.68047,0.329399,0.801244,0.990728,0.815514,0.715349,0.396229,0.678605,0.62807,0.658068,0.659804,0.133776,0.211344,0.585521,0.961329,0.321744,0.0636938,0.474257,0.158122,0.844294,0.618766,0.589248,0.531877,0.0986233,0.146194,0.683252,0.663184,0.186067,0.354039,0.954217,0.585715,0.034509,0.283617,0.386959,0.025237,0.0991306,0.102308,0.421466,0.777736,0.730378,0.0795339,0.43754,0.864154,0.290878,0.0230609,0.825483,0.612621,0.0867547,0.29974,0.770743,0.931049,0.918506,0.359991,0.462926,0.0171297,0.506185,0.146177,0.680313,0.692252,0.500217,0.63453,0.277967,0.534726,0.918147,0.664926,0.559963,0.0172776,0.767233,0.981428,0.795013,0.497612,0.0609622,0.232553,0.361765,0.35184,0.255614,0.187248,0.964461,0.342369,0.486988,0.735204,0.273417,0.405494,0.0951957,0.736343,0.422624,0.601381,0.882521,0.102937,0.293633,0.382737,0.737468,0.5716,0.917463,0.655615,0.236526,0.477426,0.672892,0.00375941,0.458854,0.467906,0.501371,0.519816,0.700459,0.863136,0.871656,0.956073,0.0503841,0.836117,0.298442,0.537372,0.571321,0.571859,0.942866,0.666517,0.308203,0.36549,0.267898,0.190723,0.468428,0.561531,0.573461,0.205895,0.133131,0.490924,0.86151,0.369657,0.96835,0.534403,0.373417,0.427204,0.00230842,0.874787,0.94702,0.702767,0.737924,0.818676,0.658841,0.788308,0.654793,0.957283,0.32568,0.226115,0.529142,0.268546,0.892632,0.837345,0.634036,0.16053,0.0280685,0.102464,0.722061,0.601529,0.30836,0.855192,0.092453,0.16987,0.224849,0.0608025,0.704272,0.598266,0.488006,0.706581,0.473053,0.435026,0.409348,0.210977,0.253702,0.068189,0.999285,0.908495,0.0254719,0.324964,0.13461,0.554614,0.59351,0.0272418,0.391959,0.227547,0.187771,0.420028,0.330011,0.909832,0.0215572,0.63837,0.765024,0.11401,0.80824,0.989874,0.174813,0.512512,0.588139,0.662819,0.219093,0.0611924,0.0978451,0.628441,0.272169,0.351547,0.69663,0.271454,0.260043,0.722102,0.596418,0.394653,0.276717,0.189928,0.421895,0.668676,0.417475,0.609666,0.0887041,0.747486,0.519499,0.110261,0.385856,0.284523,0.224271,0.194096,0.274396,0.399084,0.706609,0.862536,0.0619031,0.925702,0.923728,0.159748,0.554143,0.195897,0.511296,0.250774,0.467351,0.771338,0.972876,0.0637689,0.165991,0.249593,0.253697,0.587886,0.918269,0.671172,0.197553,0.00697274,0.418658,0.717051,0.117234,0.804514,0.00157396,0.341506,0.998611,0.27597,0.74059,0.70522,0.138506,0.802493,0.630922,0.0622342,0.962241,0.185065,0.258132,0.473537,0.435839,0.725483,0.244875,0.408715,0.789251,0.410866,0.658307,0.0429486,0.998753,0.576576,0.714121,0.196305,0.583548,0.132779,0.913356,0.700783,0.937293,0.91493,0.0422881,0.935904,0.190901,0.782878,0.641124,0.329407,0.585371,0.272045,0.391641,0.547612,0.45711,0.649772,0.0211481,0.892949,0.375255,0.266023,0.301664,0.164506,0.676889,0.959971,0.207455,0.675642,0.536546,0.921576,0.871947,0.120095,0.0543544,0.785304,0.820877,0.991647,0.700234,0.863165,0.927551,0.891135,0.646043,0.568675,0.220541,0.231414,0.84072,0.612182,0.779025,0.297831,0.261955,0.800173,0.19078,0.63721,0.0661965,0.492443,0.801716,0.743086,0.452414,0.00917094,0.418728,0.98896,0.930747,0.290676,0.109055,0.985101,0.0759793,0.929933,0.976748,0.776213,0.793098,0.9043,0.667348,0.439142,0.472975,0.887889,0.670555,0.313695,0.500071,0.449581,0.611526,0.762026,0.249754,0.802305,0.399235,0.315951,0.294749,0.200951,0.0590368,0.747163,0.210122,0.477765,0.736123,0.140869,0.768441,0.845178,0.12597,0.84442,0.775111,0.102718,0.620633,0.568209,0.00701815,0.287981,0.00735048,0.479993,0.17587,0.677906,0.793688,0.675941,0.127487,0.405214,0.437967,0.377241,0.20752,0.837202,0.693192,0.502268,0.038153,0.752229,0.249431,0.248275,0.229994,0.985554,0.389144,0.998434,0.830732,0.515114,0.842854,0.605842,0.617832,0.463487,0.174051,0.62485,0.751468,0.181402,0.104843,0.927338,0.859308,0.898531,0.603278,0.986794,0.303745,0.041245,0.364035,0.511265,0.878447,0.0572271,0.0135331,0.9166,0.809456,0.262964,0.164875,0.0394493,0.248517,0.554019,0.0378836,0.079249,0.0691322,0.880738,0.685091,0.686964,0.344225,0.859143,0.311814,0.0956929,0.0405445,0.416657,0.0230305,0.899852,0.315189,0.626309,0.886647,0.618934,0.667554,0.250682,0.130199,0.546001,0.307909,0.143732,0.4626,0.117365,0.406696,0.627475,0.156814,0.655213,0.181494,0.194697,0.734462,0.250626,0.0754351,0.419553,0.93759,0.41966,0.278696,0.249404,0.515353,0.319241,0.666061,0.538383,0.219093,0.98125,0.164692,0.105739,0.600184,0.832246,0.356421,0.730383,0.378247,0.66433,0.874115,0.840847,0.781694,0.280811,0.468322,0.938508,0.936024,0.649816,0.133206,0.670486,0.900442,0.208641,0.0900391,0.838032,0.628301,0.368735,0.0874361,0.143654,0.687976,0.753498,0.682037,0.907068,0.734748,0.846729,0.0128075,0.334932,0.678976,0.369229,0.0653144,0.0572225,0.0335584,0.939429,0.89807,0.815253,0.22024,0.366392,0.753761,0.156263,0.0162079,0.886967,0.826749,0.91665,0.0956074,0.916788,0.754682,0.723908,0.285523,0.842118,0.867562,0.973499,0.595615,0.549599,0.880567,0.330363,0.396328,0.893375,0.665295,0.0753038,0.262604,0.730609,0.132526,0.296162,0.670038,0.030596,0.111415,0.890278,0.396988,0.865176,0.0465416,0.413196,0.752142,0.873291,0.329846,0.84775,0.790079,0.0845274,0.571658,0.0756025,0.926645,0.43922,0.0491017,0.522261,0.988818,0.929669,0.852624,0.385147,0.823044,0.517918,0.460451,0.0856477,0.248528,0.592977,0.38181,0.918566,0.623573,0.493224,0.808844,0.0205609,0.3584,0.855386,0.433757,0.110542,0.728676,0.763602,0.958292,0.518756,0.84813,0.52995,0.594358,0.774775,0.969169,0.64346,0.297036,0.957988,0.573129,0.14966,0.343135,0.396173,0.667578,0.803585,0.481821,0.916106,0.396562,0.86363,0.834671,0.0201349,0.356855,0.643516,0.0406958,0.715254,0.498901,0.474453,0.825797,0.227578,0.238055,0.784089,0.746333,0.0861849,0.314039,0.340691,0.86096,0.283208,0.984151,0.157996,0.241196,0.55728,0.307655,0.584331,0.953453,0.975233,0.387916,0.435274,0.891339,0.784478,0.298904,0.72601,0.804613,0.655759,0.369526,0.845309,0.371013,0.868427,0.319761,0.19681,0.0960052,0.557816,0.980898,0.842339,0.644001,0.294937,0.18303,0.504961,0.578145,0.167181,0.662957,0.819341,0.724461,0.970612,0.403671,0.677914,0.945846,0.791587,0.113188,0.837185,0.576065,0.412092,0.563195,0.380678,0.0678507,0.932721,0.225986,0.438864,0.801149,0.545748,0.635673,0.897154,0.103564,0.616572,0.739493,0.747565,0.911509,0.922523,0.252526,0.489654,0.0897037,0.915483,0.308995,0.814165,0.886096,0.712666,0.492079,0.831941,0.504253,0.605267,0.669126,0.0803183,0.0173592,0.232321,0.460996,0.0852099,0.165043,0.686982,0.524074,0.966191,0.23273,0.159747,0.863345,0.336294,0.776319,0.602838,0.0838586,0.687828,0.52536,0.336385,0.177481,0.615064,0.251868,0.486476,0.429229,0.137963,0.199142,0.921308,0.969905,0.703395,0.526575,0.639031,0.783713,0.543934,0.871352,0.24471,0.629144,0.036395,0.931692,0.153218,0.00258645,0.164422,0.312965,0.865932,0.500716,0.089284,0.46877,0.584574,0.777112,0.99413,0.920959,0.954593,0.609194,0.172827,0.441069,0.0384232,0.31079,0.640211,0.959731,0.280695,0.343606,0.486307,0.919726,0.127319,0.030241,0.791078,0.372029,0.659385,0.827473,0.303721,0.812603,0.83006,0.468143,0.125568,0.695992,0.968858,0.214852,0.164761,0.553433,0.991964,0.158892,0.474392,0.946557,0.768086,0.647218,0.387626,0.806509,0.958009,0.0278366,0.76624,0.238704,0.371442,0.252547,0.15843,0.498762,0.282788,0.949508,0.87079,0.942173,0.776982,0.174511,0.754777,0.607042,0.642654,0.880345,0.303033,0.611512,0.0951974,0.467795,0.164945,0.0871615,0.626686,0.639336,0.0337185,0.394772,0.286555,0.421344,0.201281,0.244563,0.449181,0.967521,0.483267,0.820623,0.220068,0.641697,0.319385,0.502856,0.591205,0.190175,0.445029,0.368187,0.364687,0.199806,0.975228,0.00734034,0.0801507,0.278261,0.618852,0.175348,0.746056,0.783797,0.26251,0.372743,0.423134,0.296228,0.767515,0.709688,0.717572,0.968796,0.954252,0.166753,0.936317,0.437519,0.987377,0.156385,0.0792158,0.306762,0.659241,0.670421,0.496937,0.10427,0.0386075,0.861624,0.304076,0.0138357,0.868964,0.384227,0.292097,0.487816,0.559575,0.0381533,0.271614,0.822085,0.410896,0.694748,0.118313,0.17841,0.404436,0.835885,0.147206,0.358688,0.00263827,0.0835232,0.796207,0.990015,0.239909,0.875423,0.296777,0.89915,0.545844,0.793714,0.00342034,0.584451,0.655337,0.307497,0.598287,0.524301,0.691723,0.890384,0.0121179,0.251298,0.928537,0.283732,0.0733829,0.339433,0.978479,0.191695,0.517843,0.382915,0.0275804,0.665049,0.741603,0.0302187,0.748573,0.53781,0.0202337,0.988481,0.413233,0.31701,0.887631,0.959076,0.110724,0.891052,0.543527,0.766062,0.198548,0.141814,0.290363,0.890271,0.032198,0.302481,0.14157,0.960735,0.586213,0.214953,0.300168,0.564692,0.406648,0.818012,0.947607,0.434229,0.483061,0.68921,0.464447,0.231634,0.22702,0.484681,0.220115,0.640253,0.801691,0.107746,0.599329,0.912415,0.998798,0.142857,0.678477,0.197346,0.284671,0.96884,0.0876172,0.316869,0.271321,0.229187,0.277604,0.857534,0.44414,0.577772,0.422225,0.850788,0.395784,0.369833,0.285016,0.878845,0.0590429,0.749463,0.110479,0.286063,0.234144,0.330594,0.926316,0.0358356,0.43834,0.525646,0.948251,0.437138,0.668503,0.626728,0.634484,0.953174,0.595568,0.722101,0.270043,0.866888,0.951288,0.547647,0.724422,0.395428,0.12542,0.146647,0.246216,0.521204,0.51648,0.531232,0.400049,0.575523,0.280695,0.510527,0.861586,0.51484,0.841121,0.787902,0.550675,0.279462,0.313548,0.498926,0.7166,0.982051,0.125654,0.351084,0.935225,0.721222,0.073185,0.205268,0.58811,0.0244732,0.752915,0.312532,0.419901,0.878334,0.459179,0.666117,0.399538,0.975659,0.197348,0.799586,0.551182,0.478044,0.310114,0.412768,0.992884,0.151235,0.20067,0.543559,0.430697,0.514218,0.0424852,0.147297,0.496269,0.168139,0.498381,0.431494,0.889361,0.571566,0.636761,0.477471,0.596039,0.389676,0.790002,0.0159398,0.26801,0.249182,0.682056,0.667548,0.224841,0.879405,0.467135,0.776022,0.357449,0.777249,0.18879,0.350332,0.928484,0.38946,0.893891,0.359181,0.903679,0.936377,0.506478,0.399948,0.104516,0.00485867,0.831442,0.993877,0.576424,0.468203,0.471347,0.172463,0.857879,0.26135,0.188403,0.12589,0.510531,0.870459,0.793438,0.735372,0.749864,0.260573,0.511394,0.107313,0.0378215,0.700185,0.457646,0.966306,0.0896451,0.351537,0.325487,0.993324,0.287914,0.831965,0.393272,0.39243,0.836823,0.224713,0.386306,0.413248,0.692916,0.857653,0.585711,0.550795,0.119003,0.774114,0.676685,0.629534,0.644573,0.470123,0.364906,0.394438,0.730695,0.8763,0.501751,0.768517,0.576485,0.959396,0.734823,0.66613,0.310933,0.0603094,0.659454,0.598847,0.892274,0.0527253,0.991277,0.729098,0.277438,0.377583,0.142345,0.970354,0.235236,0.728056,0.52115,0.354239,0.50217,0.197834,0.983773,0.146743,0.667957,0.348679,0.541181,0.398653,0.224979,0.0429316,0.16717,0.801464,0.00232797,0.901992,0.467594,0.313261,0.962302,0.127048,0.912108,0.854576,0.179773,0.903385,0.583674,0.457212,0.280968,0.726019,0.427566,0.516203,0.454075,0.948716,0.870442,0.956245,0.14655,0.854215,0.102989,0.814508,0.202894,0.64417,0.21316,0.427873,0.687101,0.38033,0.229337,0.689429,0.282322,0.696932,0.00269051,0.244624,0.82398,0.914799,0.0991997,0.00375331,0.818184,0.682873,0.460965,0.0991513,0.408892,0.888532,0.615355,0.862967,0.837248,0.485797,0.819213,0.983798,0.340012,0.922201,0.798306,0.542905,0.566371,0.0114659,0.970778,0.253472,0.391796,0.200116,0.942901,0.674118,0.897047,0.945592,0.918742,0.721027,0.860391,0.0179412,0.72478,0.678574,0.700814,0.185746,0.777726,0.109706,0.0742773,0.39308,0.972674,0.911525,0.878877,0.791886,0.895323,0.218888,0.714088,0.693629,0.761794,0.280459,0.705095,0.732572,0.533931,0.0968905,0.932688,0.476832,0.771008,0.829735,0.422424,0.68975,0.550762,0.282814,0.707691,0.275543,0.961389,0.408505,0.461288,0.739114,0.518212,0.535566,0.132195,0.490886,0.447091,0.0110717,0.282772,0.342414,0.22996,0.99686,0.0360425,0.991754,0.277318,0.741137,0.724326,0.811249,0.838028,0.657014,0.288081,0.609036,0.486749,0.710505,0.298786,0.037511,0.993319,0.00647695,0.313054,0.954708,0.414982,0.774342,0.693822,0.933194,0.309907,0.826017,0.42408,0.756998,0.837089,0.706852,0.0994115,0.0670487,0.703712,0.135454,0.0588027,0.981031,0.876591,0.783129,0.79228,0.714619,0.440143,0.0803609,0.323655,0.926891,0.790865,0.622441,0.964402,0.784184,0.628918,0.277456,0.738892,0.0439004,0.0517977,0.432714,0.977095,0.361705,0.258731,0.401175,0.118703,0.0958194,0.108027,0.218114,0.162868,0.811739,0.353568,0.221671,0.79277,0.23016,0.00479952,0.58505,0.944779,0.444942,0.665411,0.268434,0.371833,0.456276,0.890875,0.336236,0.240461,0.519793,0.613692,0.979353,0.563694,0.665489,0.412067,0.540788,0.0271945,0.670798,0.941963,0.145897,0.766617,0.0499903,0.364012,0.929485,0.86173,0.71758,0.151156,0.6545,0.94774,0.155955,0.23955,0.892519,0.600897,0.904961,0.160953,0.972731,0.361237,0.0518283,0.308967,0.601698,0.571621,0.922658,0.58105,0.135315,0.588148,0.993117,0.676103,0.615342,0.663915,0.618066,0.76124,0.430532,0.668057,0.125252,0.360017,0.529786,0.842832,0.511173,0.184286,0.790572,0.667128,0.423836,0.683091,0.268025,0.328797,0.844045,0.240756,0.690033,0.895873,0.549723,0.291731,0.467494,0.472381,0.872781,0.602809,0.060529,0.865898,0.278913,0.675871,0.529813,0.896979,0.437111,0.960344,0.565036,0.562363,0.320361,0.094822,0.405195,0.831533,0.279108,0.195767,0.498661,0.702944,0.878858,0.766687,0.0317407,0.722903,0.00744302,0.721774,0.618776,0.557166,0.0135049,0.0862703,0.0295471,0.886286,0.68908,0.0900762,0.752184,0.967992,0.765948,0.281997,0.864971,0.203059,0.242341,0.430007,0.765421,0.562702,0.524829,0.170616,0.394235,0.803937,0.366383,0.892897,0.506881,0.245242,0.659584,0.538622,0.968145,0.667027,0.260396,0.58692,0.224193,0.273901,0.673191,0.25374,0.160186,0.36227,0.343816,0.91237,0.330263,0.109763,0.194367,0.195234,0.312822,0.436708,0.62524,0.0782434,0.99941,0.150069,0.24886,0.393646,0.954006,0.615243,0.286543,0.460887,0.860484,0.946126,0.999509,0.828629,0.613153,0.259904,0.41555,0.837346,0.533805,0.0887403,0.0910854,0.693991,0.451011,0.434901,0.606362,0.781273,0.544665,0.800729,0.976507,0.857487,0.237437,0.601747,0.93573,0.236847,0.751816,0.18459,0.630493,0.705822,0.799833,0.917035,0.166709,0.660317,0.863161,0.166218,0.488946,0.476314,0.426122,0.904496,0.31366,0.959927,0.993236,0.404745,0.653918,0.444247,0.839647,0.260279,0.22552,0.384311,0.0610083,0.202027,0.241798,0.298445,0.803774,0.177528,0.535292,0.55559,0.362118,0.165785,0.261413,0.16195,0.0828201,0.428122,0.822268,0.945982,0.594339,0.311214,0.422296,0.0204614,0.215709,0.735956,0.980388,0.208945,0.140701,0.634306,0.653192,0.980348,0.894585,0.878711,0.364659,0.955594,0.0807381,0.606457,0.254039,0.884512,0.783985,0.789331,0.440103,0.146103,0.955116,0.701515,0.308054,0.0379363,0.129637,0.130321,0.983918,0.723977,0.441535,0.406214,0.744438,0.657244,0.14217,0.724826,0.866189,0.282871,0.359132,0.51938,0.263219,0.253717,0.398091,0.627878,0.209311,0.47883,0.234335,0.46335,0.363342,0.01832,0.252681,0.803444,0.164423,0.207797,0.50496,0.472477,0.245734,0.634597,0.602798,0.229651,0.358573,0.0443322,0.635865,0.103011,0.701576,0.778035,0.827838,0.567764,0.0609059,0.186969,0.0871446,0.324125,0.440687,0.485236,0.952002,0.649998,0.964066,0.186337,0.113347,0.327407,0.204657,0.366029,0.130852,0.36908,0.573826,0.635812,0.841557,0.819559,0.270408,0.444354,0.0492109,0.628982,0.488687,0.685076,0.731993,0.190262,0.463111,0.559831,0.758027,0.524017,0.7468,0.845172,0.848142,0.187487,0.330408,0.800144,0.837485,0.294473,0.986481,0.950832,0.62188,0.191139,0.316861,0.752732,0.560219,0.890686,0.388544,0.401776,0.710246,0.658952,0.84613,0.759457,0.287934,0.334817,0.444533,0.0199274,0.525079,0.907644,0.579758,0.283106,0.431661,0.326559,0.128278,0.279803,0.514046,0.458685,0.0799475,0.35153,0.753158,0.066429,0.302362,0.375039,0.257568,0.619223,0.127771,0.817787,0.509909,0.516314,0.219562,0.220155,0.175266,0.0656925,0.979612,0.4632,0.400509,0.424145,0.483128,0.925588,0.331789,0.0628859,0.208695,0.76345,0.389444,0.336972,0.0432533,0.90349,0.795657,0.123201,0.25502,0.548815,0.18963,0.557382,0.923854,0.447197,0.176605,0.0516246,0.264984,0.686514,0.567939,0.484546,0.906669,0.743205,0.550239,0.886281,0.206405,0.950748,0.310426,0.689533,0.876336,0.642215,0.752419,0.085031,0.405665,0.141863,0.422003,0.448918,0.0453533,0.21766,0.572119,0.300373,0.766476,0.761749,0.857756,0.69033,0.208946,0.0343608,0.741954,0.47393,0.720875,0.309893,0.958477,0.627545,0.0530983,0.508716,0.513826,0.259504,0.459464,0.824252,0.949037,0.3358,0.466466,0.701456,0.420831,0.872131,0.843319,0.842834,0.321049,0.888673,0.0604948,0.893169,0.189046,0.826971,0.654917,0.0468017,0.5173,0.863864,0.0811625,0.259255,0.337794,0.802038,0.569148,0.296271,0.429582,0.622246,0.804986,0.943408,0.88175,0.26445,0.76766,0.830787,0.60025,0.234126,0.532243,0.0210815,0.106257,0.375562,0.863916,0.427307,0.264235,0.924411,0.320475,0.453281,0.751381,0.975393,0.500083,0.268682,0.839256,0.581245,0.527937,0.17705,0.383283,0.0970848,0.473321,0.812865,0.719331,0.278307,0.756273,0.601081,0.542757,0.523933,0.431869,0.143007,0.758059,0.964112,0.164089,0.864317,0.339674,0.0280048,0.291623,0.603909,0.952415,0.612099,0.0571901,0.703797,0.587492,0.557273,0.972479,0.426748,0.138518,0.500415,0.603798,0.521801,0.5975,0.0771185,0.334666,0.316832,0.355426,0.0909396,0.917913,0.898183,0.614873,0.349782,0.04119,0.372932,0.313893,0.205279,0.237248,0.653567,0.233284,0.528872,0.257476,0.185699,0.140971,0.314666,0.889496,0.728462,0.871939,0.861975,0.15521,0.0104572,0.36239,0.759008,0.532258,0.959891,0.836126,0.866925,0.276722,0.191552,0.957864,0.194635,0.0897343,0.572737,0.544417,0.130924,0.945668,0.85831,0.336203,0.182917,0.511877,0.569487,0.711788,0.769353,0.755186,0.852759,0.0840188,0.644682,0.581221,0.955958,0.506657,0.736431,0.966415,0.869047,0.495439,0.498673,0.828938,0.331565,0.365598,0.10566,0.523117,0.323462,0.300295,0.612851,0.896199,0.844711,0.743775,0.841867,0.703021,0.0799784,0.0247842,0.214897,0.649465,0.736573,0.98425,0.404651,0.589332,0.0682691,0.0493333,0.170553,0.0242269,0.55599,0.906984,0.990642,0.425037,0.402422,0.489315,0.253975,0.733987,0.854913,0.359634,0.257104,0.178375,0.659929,0.869955,0.0745743,0.50464,0.61373,0.916442,0.207661,0.693708,0.941226,0.422559,0.343173,0.677799,0.406809,0.747825,0.26713,0.475078,0.797158,0.437683,0.499305,0.353148,0.344667,0.489947,0.778186,0.747089,0.979262,0.0321604,0.481077,0.834175,0.391795,0.73818,0.0125503,0.051724,0.608135,0.0871247,0.556364,0.221865,0.00356645,0.764026,0.915573,0.944792,0.186584,0.258747,0.622591,0.593393,0.00657161,0.889721,0.0684709,0.80373,0.327404,0.567776,0.156878,0.672071,0.0577223,0.935064,0.419161,0.0369842,0.967224,0.900237,0.871159,0.359019,0.638418,0.883709,0.410743,0.246553,0.970834,0.967107,0.468418,0.974401,0.731133,0.383991,0.919193,0.917717,0.642738,0.541784,0.51111,0.64931,0.431506,0.579581,0.45304,0.75891,0.147356,0.609918,0.430981,0.205079,0.544981,0.850142,0.242063,0.512205,0.750379,0.113222,0.871224,0.388797,0.996931,0.281967,0.63535,0.967766,0.249074,0.103768,0.942166,0.980207,0.487759,0.861359,0.897923,0.130497,0.403143,0.409033,0.779807,0.834649,0.988614,0.232847,0.593559,0.13597,0.842764,0.0245401,0.341049,0.387745,0.874682,0.583112,0.89995,0.625061,0.696334,0.771174,0.0138577,0.693265,0.0531404,0.649207,0.661031,0.302214,0.752975,0.603197,0.282421,0.240734,0.464556,0.180344,0.371231,0.8677,0.589377,0.151038,0.702349,0.577991,0.383884,0.295908,0.713961,0.226649,0.320448,0.0550104,0.614394,0.19513,0.638122,0.514344,0.820191,0.334456,0.285517,0.834048,0.027721,0.338658,0.483256,0.688752,0.640872,0.236231,0.291949,0.923293,0.476964,0.756505,0.103637,0.848195,0.624205,0.693014,0.999233,0.326554,0.271005,0.383118,0.622462,0.984967,0.609766,0.94291,0.0399773,0.22416,0.13804,0.678099,0.738504,0.958231,0.0125551,0.0240209,0.792279,0.0402761,0.362679,0.275535,0.729028,0.00355071,0.511765,0.020977,0.926843,0.98873,0.777482,0.0304803,0.836925,0.401688,0.723495,0.836158,0.728242,0.9945,0.219276,0.350704,0.979467,0.829042,0.293614,0.0194441,0.0532024,0.431654,0.697543,0.791706,0.389885,0.710099,0.815727,0.182164,0.750375,0.178406,0.457699,0.479403,0.181956,0.969464,0.50038,0.1088,0.958194,0.277862,0.13928,0.795119,0.67955,0.862775,0.631277,0.407792,0.857275,0.850553,0.758496,0.836741,0.679596,0.0521103,0.856185,0.732798,0.483764,0.553729,0.524504,0.873649,0.263827,0.340231,0.0558126,0.0142021,0.518637,0.513511,0.493605,0.700593,0.482975,0.993984,0.809393,0.441169,0.271847,0.948673,0.236287,0.951396,0.811447,0.867564,0.359188,0.668722,0.718118,0.117684,0.505463,0.397713,0.169795,0.361649,0.130511,0.653559,0.915378,0.655015,0.527208,0.179205,0.995246,0.58302,0.193407,0.513883,0.0965316,0.687012,0.214475,0.579507,0.680996,0.0238681,0.0206754,0.952843,0.972541,0.256963,0.904239,0.783988,0.124527,0.263427,0.45271,0.842645,0.381112,0.958173,0.240358,0.550906,0.319822,0.370869,0.204465,0.2352,0.0258842,0.731673,0.414405,0.0211303,0.314694,0.607812,0.535013,0.411225,0.294824,0.749488,0.990732,0.97582,0.773356,0.0114072,0.928663,0.745897,0.26837,0.832902,0.529885,0.392897,0.0963297,0.982595,0.235542,0.477442,0.940769,0.475899,0.0283479,0.260591,0.846768,0.232813,0.49579,0.872653,0.964486,0.910195,0.893783,0.27918,0.518006,0.428796,0.690405,0.81283,0.178284,0.681137,0.78865,0.951641,0.692544,0.717313,0.697538,0.960914,0.550215,0.227423,0.353811,0.646545,0.210018,0.589353,0.123987,0.150787,0.065252,0.152335,0.411377,0.91202,0.385148,0.907168,0.784673,0.349634,0.817362,0.678456,0.628814,0.335369,0.107252,0.319219,0.148199,0.285536,0.000355599,0.936849,0.237177,0.6929,0.654162,0.934714,0.653813,0.204377,0.162137,0.00762437,0.850923,0.372156,0.596977,0.974909,0.522943,0.662229,0.127244,0.93432,0.574249,0.512392,0.841488,0.358922,0.862026,0.65885,0.0373785,0.49084,0.994218,0.14463,0.810059,0.142417,0.430166,0.810414,0.0792658,0.667343,0.503314,0.733428,0.602057,0.157127,0.937805,0.764195,0.164752,0.788728,0.136351,0.761728,0.763637,0.659293,0.423957,0.890881,0.593613,0.998207,0.403272,0.435101,0.357129,0.265298,0.0939505,0.394508,0.756138,0.0881688,0.539138,0.566196,0.230586,0.969304,0.376611,0.309852,0.636647,0.879924,0.0432793,0.238705,0.0370516,0.981084,0.00289976,0.201803,0.769812,0.13925,0.963532,0.533449,0.798543,0.387489,0.424329,0.392157,0.385696,0.827602,0.827257,0.742825,0.0929,0.921208,0.137332,0.849038,0.00937678,0.67647,0.415234,0.239963,0.645775,0.791845,0.549814,0.282422,0.671769,0.593093,0.521127,0.708821,0.574178,0.524027,0.910624,0.34399,0.663277,0.874156,0.877439,0.461821,0.261645,0.301768,0.853978,0.64734,0.12937,0.681235,0.390165,0.22227,0.602443,0.527498,0.071308,0.61182,0.203968,0.486542,0.851782,0.849743,0.278387,0.401596,0.132165,0.950157,0.99469,0.653293,0.658978,0.568868,0.17732,0.569602,0.912857,0.840597,0.443758,0.790296,0.302418,0.705402,0.0920643,0.156395,0.352743,0.221434,0.83763,0.742908,0.443704,0.440073,0.270405,0.515012,0.0518925,0.474374,0.00155472,0.903675,0.324117,0.279942,0.305271,0.456282,0.230099,0.299961,0.109574,0.889077,0.868829,0.286894,0.458679,0.781686,0.127491,0.902436,0.571982,0.429909,0.607839,0.664046,0.586304,0.960581,0.885481,0.423934,0.703489,0.329185,0.864007,0.973894,0.844197,0.915899,0.448268,0.845752,0.819574,0.772384,0.125694,0.124845,0.228666,0.355793,0.424806,0.338241,0.24487,0.293634,0.625135,0.703549,0.0753205,0.752626,0.605985,0.647303,0.182534,0.213824,0.311349,0.768838,0.174405,0.19683,0.192772,0.877894,0.526015,0.0567789,0.851788,0.370212,0.972678,0.300056,0.215964,0.792252,0.0724401,0.341659,0.917097,0.301106,0.697452,0.341903,0.639347,0.942322,0.635538,0.264482,0.645871,0.710858,0.0171076,0.251856,0.358161,0.199642,0.46568,0.66951,0.96848,0.640085,0.86634,0.161252,0.517978,0.392354,0.218031,0.369766,0.762566,0.190709,0.669822,0.978531,0.982961,0.742262,0.320189,0.900059,0.0433681,0.0176409,0.241962,0.682715,0.959963,0.8775,0.947197,0.605834,0.588358,0.964305,0.85769,0.946518,0.163947,0.32337,0.616028,0.132427,0.963454,0.482368,0.293679,0.481432,0.874722,0.51171,0.851198,0.637288,0.702419,0.52102,0.615819,0.68538,0.263282,0.936008,0.585439,0.30665,0.953649,0.827401,0.989365,0.913612,0.7049,0.936562,0.519446,0.293258,0.900867,0.377135,0.239777,0.0648131,0.700505,0.855805,0.19724,0.663959,0.338172,0.490918,0.145392,0.212894,0.00262806,0.99659,0.850183,0.705047,0.51761,0.466002,0.390427,0.780892,0.40201,0.975866,0.0875413,0.355659,0.803267,0.0769062,0.269271,0.508167,0.0134682,0.788716,0.801425,0.914335,0.165852,0.041202,0.979148,0.866357,0.897007,0.176387,0.530316,0.235179,0.667306,0.675708,0.448074,0.669934,0.672298,0.298256,0.374981,0.189908,0.764258,0.765408,0.970799,0.166268,0.741274,0.0583407,0.521927,0.544541,0.135247,0.791198,0.0527086,0.148715,0.579915,0.854134,0.0630498,0.745767,0.895336,0.0421976,0.612124,0.792343,0.218585,0.14244,0.0275221,0.885891,0.818148,0.475596,0.555825,0.490446,0.773852,0.930806,0.680353,0.538111,0.696214,0.651153,0.704379,0.437488,0.709493,0.226306,0.982029,0.84474,0.017504,0.034738,0.993455,0.597419,0.888872,0.056505,0.343185,0.784208,0.0987026,0.955309,0.576551,0.317288,0.0977485,0.604073,0.203179,0.915896,0.0796692,0.759004,0.406342,0.853521,0.689809,0.086695,0.391632,0.386023,0.737848,0.0960108,0.823511,0.447341,0.322317,0.805541,0.292081,0.339821,0.840279,0.285536,0.937239,0.729151,0.342041,0.280425,0.513359,0.440744,0.235733,0.0899103,0.758031,0.333482,0.693984,0.96121,0.249378,0.773653,0.720214,0.65572,0.627174,0.410023,0.742415,0.0188062,0.796046,0.480262,0.114817,0.619557,0.927603,0.437134,0.425098,0.219684,0.776955,0.265377,0.505221,0.714194,0.994528,0.847262,0.994619,0.507887,0.288005,0.230352,0.597797,0.046037,0.563834,0.29178,0.00724716,0.813212,0.0654332,0.727461,0.468932,0.692607,0.137484,0.211347,0.711414,0.93353,0.691609,0.826231,0.553088,0.619212,0.263365,0.978186,0.838897,0.0403193,0.243562,0.344117,0.754514,0.23809,0.191379,0.749132,0.745977,0.479384,0.979484,0.343774,0.525421,0.543318,0.635554,0.532668,0.35653,0.700987,0.260129,0.825462,0.393595,0.397613,0.0368083,0.105008,0.331143,0.728417,0.931239,0.884231,0.347629,0.194604,0.862417,0.186526,0.234923,0.105979,0.530643,0.989436,0.344069,0.722022,0.738569,0.0900459,0.201406,0.718053,0.433819,0.726827,0.261371,0.0693735,0.259496,0.617901,0.770361,0.519625,0.443363,0.163955,0.917238,0.480171,0.268964,0.248382,0.208589,0.200203,0.132613,0.556218,0.394807,0.99503,0.742744,0.62973,0.101009,0.273387,0.619166,0.445078,0.995409,0.357735,0.535124,0.196815,0.0757878,0.968944,0.923643,0.337159,0.038317,0.183138,0.95506,0.808678,0.702763,0.398423,0.972633,0.620002,0.878595,0.241597,0.868383,0.0871834,0.4418,0.000996245,0.643402,0.836606,0.996026,0.386146,0.466336,0.0970346,0.659533,0.0855016,0.542113,0.654942,0.443236,0.0772368,0.851757,0.519024,0.0461804,0.7754,0.856183,0.0844974,0.958539,0.811243,0.893175,0.661302,0.209667,0.865808,0.281304,0.0882617,0.107405,0.149687,0.175445,0.549205,0.150684,0.818847,0.385811,0.146709,0.204992,0.852147,0.243744,0.864525,0.937648,0.785857,0.519467,0.380884,0.863094,0.371225,0.899908,0.909274,0.146625,0.756092,0.993771,0.105163,0.567335,0.886946,0.766465,0.777002,0.752755,0.0477693,0.865263,0.86016,0.197457,0.0407085,0.409364,0.34814,0.859555,0.795175,0.494849,0.0645474,0.647322,0.738593,0.929073,0.58497,0.52445,0.44854,0.965854,0.387544,0.819765,0.865763,0.296818,0.96639,0.621854,0.290589,0.071553,0.189189,0.177535,0.838018,0.966191,0.93029,0.885788,0.831454,0.79045,0.0832443,0.872163,0.199814,0.431384,0.731718,0.994989,0.926234,0.796265,0.642311,0.664827,0.725338,0.22728,0.189277,0.173878,0.193135,0.576821,0.993642,0.0588972,0.873639,0.960032,0.680751,0.164227,0.031585,0.86994,0.341763,0.869603,0.836131,0.272053,0.755391,0.667585,0.0625021,0.838635,0.539748,0.262316,0.27002,0.271466,0.257305,0.196253,0.0677308,0.899616,0.861081,0.793069,0.126896,0.0503578,0.966946,0.320031,0.627179,0.960589,0.378928,0.500817,0.920621,0.0596791,0.665045,0.952206,0.929619,0.00680731,0.821809,0.765751,0.27886,0.577201,0.433336,0.341362,0.415836,0.973084,0.603678,0.685856,0.244549,0.860983,0.882109,0.31228,0.760598,0.74319,0.105349,0.887494,0.793548,0.0722953,0.207525,0.420726,0.0328842,0.586452,0.921543,0.953505,0.646131,0.586588,0.905711,0.575751,0.593395,0.72752,0.341501,0.872255,0.304721,0.774837,0.213617,0.720557,0.747921,0.817295,0.406413,0.99247,0.678278,0.288522,0.304751,0.438876,0.0317114,0.4101,0.32637,0.825259,0.482395,0.533895,0.245985,0.515279,0.120347,0.167528,0.468784,0.766479,0.754116,0.374495,0.342229,0.347512,0.102015,0.683731,0.219767,0.406736,0.458568,0.433384,0.127293,0.206489,0.250679,0.533706,0.19896,0.928957,0.822227,0.50371,0.367833,0.853939,0.91381,0.694203,0.679197,0.396205,0.228097,0.925183,0.911484,0.348445,0.0927109,0.380268,0.114923,0.846827,0.754763,0.457152,0.194339,0.856778,0.140883,0.414106,0.263514,0.599452,0.84749,0.390807,0.805941,0.0981684,0.924513,0.00490064,0.027125,0.74674,0.508611,0.394958,0.600679,0.422421,0.0891603,0.279876,0.818626,0.317258,0.205059,0.730109,0.665702,0.29777,0.110377,0.780625,0.144597,0.865139,0.237778,0.338936,0.721917,0.378661,0.753042,0.985431,0.978113,0.600531,0.376238,0.784054,0.698699,0.300751,0.788954,0.725824,0.0474912,0.297565,0.120782,0.64817,0.719986,0.209942,0.928046,0.538612,0.5272,0.133105,0.268721,0.192902,0.430874,0.379097,0.973528,0.575471,0.244236,0.211305,0.914407,0.966153,0.589967,0.667449,0.951585,0.568079,0.26798,0.327823,0.352133,0.966679,0.628574,0.141087,0.692504,0.676065,0.438653,0.813286,0.324235,0.158639,0.0232281,0.252281,0.697251,0.550428,0.385386,0.965971,0.74333,0.81626,0.345069,0.716858,0.391732,0.589305,0.928163,0.306139,0.555458,0.51813,0.973588,0.507043,0.0862093,0.241567,0.834866,0.438342,0.208246,0.46344,0.57943,0.90075,0.139505,0.0180824,0.714036,0.46374,0.176721,0.737264,0.716022,0.873972,0.287692,0.101408,0.839943,0.0310226,0.917668,0.185012,0.747881,0.3094,0.774317,0.676044,0.615539,0.329775,0.194174,0.589126,0.836818,0.280384,0.830694,0.671684,0.718726,0.0389402,0.135124,0.298156,0.93969,0.274629,0.316238,0.653726,0.738369,0.492959,0.39099,0.454391,0.366931,0.678682,0.555799,0.206874,0.709705,0.473466,0.391886,0.457586,0.782866,0.166202,0.13363,0.398405,0.495978,0.327804,0.987531,0.332796,0.608188,0.818225,0.00447936,0.326914,0.857165,0.139603,0.625069,0.796855,0.414232,0.941307,0.450581,0.152601,0.434266,0.841571,0.606992,0.801197,0.520254,0.162791,0.00807135,0.229959,0.636257,0.399957,0.687544,0.419123,0.56616,0.821174,0.817528,0.0621371,0.148978,0.805059,0.394933,0.757166,0.623284,0.399412,0.0840792,0.480449,0.539015,0.709148,0.277305,0.953247,0.650455,0.727886,0.105848,0.0847218,0.569457,0.712841,0.885919,0.0897111,0.875631,0.89399,0.31967,0.511888,0.293948,0.00721372,0.931012,0.860107,0.828388,0.74854,0.922244,0.977366,0.5536,0.317177,0.734531,0.176884,0.716589,0.81861,0.657333,0.255604,0.527759,0.934638,0.208851,0.178214,0.662524,0.3147,0.262936,0.231981,0.0275402,0.148855,0.321693,0.903171,0.0428456,0.641362,0.41506,0.336793,0.648576,0.346072,0.1969,0.476964,0.094612,0.119144,0.454329,0.648212,0.436321,0.18886,0.825096,0.15291,0.00747097,0.482429,0.408515,0.53523,0.417067,0.617366,0.713444,0.079591,0.932065,0.97638,0.311572,0.959606,0.125236,0.633265,0.862777,0.168081,0.274627,0.277837,0.504874,0.923203,0.623909,0.701775,0.400167,0.718521,0.820919,0.854496,0.366732,0.257241,0.0433564,0.191828,0.410151,0.0508274,0.674257,0.818666,0.586057,0.0913242,0.436031,0.299502,0.170915,0.368097,0.275882,0.482488,0.327702,0.401118,0.115753,0.190479,0.569199,0.39038,0.468316,0.0740732,0.313583,0.0922245,0.775848,0.71375,0.810745,0.596767,0.568246,0.177477,0.854007,0.611602,0.369306,0.264158,0.66243,0.0435627,0.0828238,0.248487,0.134887,0.518855,0.547989,0.305802,0.886952,0.823871,0.78829,0.214654,0.224988,0.904043,0.405133,0.794187,0.294423,0.873448,0.86826,0.608006,0.965673,0.644108,0.321756,0.776418,0.240875,0.890002,0.953895,0.094882,0.501604,0.323201,0.35904,0.164034,0.366764,0.441864,0.412521,0.50165,0.960719,0.96051,0.807453,0.847671,0.78438,0.595742,0.0623241,0.00936818,0.499785,0.467457,0.803555,0.794207,0.340905,0.671815,0.402213,0.306578,0.315923,0.723969,0.0829956,0.556797,0.613971,0.0368909,0.651679,0.115575,0.360092,0.0107197,0.279609,0.726855,0.452584,0.692129,0.228506,0.413303,0.652639,0.0359584,0.260973,0.437019,0.631701,0.323297,0.446387,0.131486,0.790754,0.249942,0.925693,0.131659,0.921757,0.327906,0.438237,0.23768,0.0518749,0.521232,0.794478,0.665845,0.558123,0.446157,0.78142,0.918215,0.456877,0.0610286,0.64507,0.909461,0.753158,0.873576,0.322763,0.405797,0.909534,0.583736,0.842816,0.541235,0.907034,0.289204,0.672721,0.697788,0.539146,0.598414,0.829447,0.460904,0.92632,0.267683,0.698584,0.978195,0.788915,0.493062,0.64404,0.347038,0.939219,0.42546,0.265253,0.396096,0.486489,0.910323,0.305556,0.239647,0.783899,0.62832,0.645444,0.693434,0.212056,0.48826,0.234669,0.11909,0.777464,0.90739,0.816878,0.31661,0.505803,0.646324,0.777513,0.432123,0.914007,0.476097,0.410318,0.702922,0.969159,0.0543579,0.0499607,0.908378,0.479818,0.315214,0.304474,0.966307,0.225537,0.61003,0.205954,0.00943643,0.238349,0.851398,0.70287,0.450406,0.339658,0.937539,0.569495,0.117122,0.844929,0.386373,0.433731,0.350733,0.0326973,0.211245,0.782856,0.946705,0.687342,0.193174,0.649627,0.656501,0.247532,0.699588,0.564879,0.72735,0.0148015,0.869353,0.693656,0.240338,0.479382,0.89961,0.249775,0.717732,0.751007,0.952645,0.168138,0.0906651,0.890185,0.737633,0.207787,0.735114,0.124006,0.641518,0.0858462,0.156703,0.852763,0.868702,0.103408,0.540105,0.0618756,0.753035,0.196606,0.309407,0.452623,0.761485,0.0367567,0.467424,0.630838,0.730413,0.707763,0.11022,0.630023,0.957537,0.827952,0.38103,0.910183,0.99609,0.471696,0.800367,0.733723,0.679482,0.535481,0.857729,0.321,0.621327,0.0144321,0.173763,0.490029,0.11784,0.713868,0.551905,0.870875,0.910475,0.861312,0.323497,0.67196,0.898068,0.790922,0.302798,0.628481,0.498684,0.413018,0.258504,0.456222,0.24097,0.639535,0.366404,0.23706,0.11123,0.166772,0.970783,0.790713,0.702252,0.828511,0.111713,0.32358,0.842943,0.285476,0.813609,0.960783,0.999345,0.365513,0.831658,0.909819,0.226825,0.155156,0.581779,0.124894,0.946077,0.884577,0.753375,0.444761,0.297595,0.0118795,0.900983,0.538565,0.651414,0.267387,0.775625,0.762645,0.434159,0.746407,0.553357,0.136411,0.574919,0.66507,0.459991,0.417862,0.950547,0.2736,0.378646,0.949891,0.639113,0.210304,0.85971,0.865938,0.36546,0.44149,0.990832,0.311537,0.326066,0.744207,0.756298,0.623661,0.756087,0.657281,0.162226,0.407501,0.924668,0.937851,0.170146,0.358827,0.684258,0.723503,0.495238,0.259177,0.388573,0.955229,0.677039,0.33912,0.228829,0.0556843,0.289011,0.867942,0.265988,0.148721,0.73388,0.631448,0.590211,0.724712,0.942985,0.916277,0.468919,0.699283,0.539939,0.225006,0.356564,0.702164,0.632506,0.281233,0.640015,0.802652,0.64006,0.324273,0.526155,0.135298,0.583449,0.914729,0.0905273,0.260488,0.253848,0.319356,0.316172,0.542859,0.187298,0.58216,0.691581,0.921178,0.213608,0.281792,0.64589,0.156593,0.198069,0.114809,0.855876,0.738008,0.339814,0.21244,0.440172,0.972321,0.493673,0.0801869,0.774973,0.133732,0.40446,0.301129,0.269031,0.987909,0.215857,0.359558,0.248397,0.469706,0.678914,0.564569,0.0125649,0.866212,0.14673,0.704146,0.78739,0.360338,0.985937,0.43328,0.516931,0.184006,0.548089,0.372807,0.922014,0.887903,0.585247,0.362186,0.860224,0.07892,0.442373,0.635197,0.212652,0.846832,0.936326,0.481683,0.834741,0.152183,0.841241,0.0831382,0.621888,0.520155,0.647707,0.634453,0.386366,0.794437,0.338599,0.173756,0.154775,0.324536,0.607036,0.671706,0.508542,0.155124,0.0445133,0.430556,0.0430273,0.629761,0.792742,0.903251,0.708681,0.235115,0.538449,0.921333,0.0819472,0.474774,0.403016,0.916689,0.626957,0.244257,0.999827,0.248846,0.764412,0.647534,0.883299,0.150778,0.441971,0.221898,0.324534,0.596746,0.546434,0.93157,0.268453,0.0549766,0.086694,0.312966,0.485533,0.129721,0.942727,0.278275,0.0329726,0.651407,0.51339,0.571421,0.57274,0.595337,0.0461957,0.975756,0.512026,0.673153,0.220013,0.511852,0.921999,0.984425,0.159386,0.805298,0.135202,0.601358,0.0271962,0.459736,0.198104,0.57363,0.391306,0.466557,0.628607,0.478,0.779523,0.11414,0.607721,0.72225,0.392415,0.640694,0.373657,0.905805,0.212115,0.946397,0.501142,0.258311,0.922153,0.0131678,0.931464,0.142166,0.52502,0.853463,0.12659,0.684407,0.658761,0.261793,0.285764,0.685957,0.721529,0.483869,0.259588,0.112836,0.950426,0.888195,0.590836,0.729949,0.00233475,0.198557,0.452198,0.39475,0.839251,0.825855,0.300555,0.0513666,0.772251,0.801697,0.309678,0.694404,0.814865,0.241142,0.83657,0.339885,0.0946048,0.963161,0.0242916,0.753366,0.224953,0.310056,0.439323,0.946483,0.793925,0.698911,0.0593184,0.74435,0.587106,0.650154,0.474299,0.589441,0.848711,0.926497,0.98419,0.687962,0.752351,0.284745,0.739329,0.524603,0.0864421,0.0490067,0.219007,0.901307,0.290148,0.0555771,0.241192,0.384753,0.0187377,0.265483,0.138119,0.243691,0.575539,0.577442,0.190174,0.369464,0.276353,0.249492,0.113814,0.863459,0.899646,0.588113,0.4529,0.748358,0.51461,0.43709,0.43632,0.266961,0.721836,0.175649,0.791564,0.808278,0.224656,0.0105707,0.709585,0.514804,0.0661478,0.950776,0.899557,0.0848856,0.21626,0.0376765,0.328577,0.791799,0.615119,0.518751,0.161263,0.891472,0.768243,0.275077,0.754932,0.66789,0.86319,0.207832,0.416247,0.377799,0.644922,0.852567,0.64476,0.366758,0.0282167,0.436324,0.175035,0.252873,0.446895,0.88462,0.767677,0.513043,0.835396,0.667234,0.597928,0.0516558,0.704911,0.926505,0.843455,0.32003,0.445256,0.00471745,0.211502,0.213499,0.279794,0.966434,0.881389,0.142984,0.174265,0.297636,0.520784,0.819187,0.150203,0.165544,0.185945,0.17842,0.601868,0.36098,0.431293,0.0487633,0.2456,0.198969,0.561806,0.0809967,0.866204,0.159734,0.132653,0.571115,0.0862394,0.976107,0.891144,0.531495,0.980825,0.102646,0.744994,0.260619,0.0690795,0.626383,0.403603,0.243345,0.924019,0.924387,0.0625319,0.0742218,0.089931,0.248477,0.252642,0.691799,0.609457,0.683934,0.740563,0.855058,0.882904,0.302369,0.936054,0.749108,0.462103,0.068707,0.320222,0.548343,0.0448141,0.211366,0.0798378,0.0256388,0.314012,0.824832,0.286258,0.383092,0.451215,0.689861,0.626437,0.375233,0.614248,0.688968,0.449455,0.704179,0.937445,0.702097,0.395978,0.546903,0.386031,0.136541,0.401961,0.268935,0.43891,0.338015,0.0180425,0.901013,0.406722,0.338265,0.449355,0.451536,0.549631,0.529193,0.477175,0.863643,0.354025,0.763433,0.246735,0.805239,0.453294,0.873172,0.180473,0.0675414,0.56214,0.629927,0.77172,0.499586,0.332024,0.167698,0.0464887,0.718055,0.304239,0.448449,0.98699,0.743149,0.786464,0.00503247,0.644161,0.193186,0.343297,0.0935163,0.644722,0.892928,0.622709,0.121897,0.756572,0.976734,0.88533,0.00330716,0.781973,0.338623,0.876479,0.962446,0.406165,0.43862,0.592373,0.177885,0.938205,0.924397,0.345583,0.984694,0.642453,0.649823,0.433143,0.629443,0.392971,0.219608,0.634475,0.0371326,0.412794,0.977772,0.130649,0.0575161,0.8707,0.753358,0.179413,0.627272,0.730092,0.064743,0.630579,0.512065,0.403366,0.507059,0.474511,0.809531,0.945678,0.0668841,0.987416,0.883884,0.991282,0.333,0.868578,0.633734,0.982822,0.301721,0.263177,0.375794,0.521329,0.897652,0.412926,0.934123,0.875424,0.543575,0.991639,0.746125,0.296933,0.171052,0.373397,0.0270249,0.235795,0.00397622,0.53909,0.639161,0.511035,0.0136008,0.448693,0.456713,0.0804849,0.436109,0.340597,0.0717665,0.769108,0.209174,0.705501,0.751931,0.510895,0.968677,0.127724,0.0322242,0.866329,0.54065,0.966347,0.741753,0.0842256,0.957985,0.487878,0.381159,0.129037,0.861274,0.408183,0.364832,0.865251,0.947273,0.00399332,0.376285,0.960874,0.452686,0.832998,0.041359,0.888795,0.173595,0.113125,0.657903,0.382769,0.818626,0.409834,0.893665,0.787304,0.537558,0.925889,0.653633,0.0782084,0.892236,0.395386,0.162434,0.850221,0.883263,0.543593,0.979258,0.744538,0.951776,0.34409,0.609788,0.899049,0.348083,0.986074,0.859923,0.800769,0.819072,0.901282,0.689564,0.992667,0.0144079,0.347467,0.375436,0.833034,0.7573,0.269101,0.620337,0.294858,0.19499,0.27397,0.373066,0.0872254,0.669356,0.5355,0.937446,0.552619,0.0790929,0.916704,0.297157,0.0308688,0.260794,0.906945,0.929918,0.608877,0.893019,0.789842,0.409646,0.712091,0.691124,0.0992101,0.704758,0.705532,0.446677,0.0801947,0.538566,0.203977,0.349296,0.158903,0.498835,0.544286,0.432873,0.871901,0.631511,0.102229,0.407402,0.568957,0.654849,0.486495,0.485661,0.952006,0.517364,0.746455,0.858951,0.447282,0.355333,0.75197,0.237123,0.764979,0.464062,0.928247,0.864189,0.16882,0.633779,0.310866,0.249015,0.172345,0.514843,0.59831,0.331248,0.0136777,0.142596,0.764121,0.885579,0.774107,0.866351,0.292981,0.343064,0.5212,0.779476,0.828725,0.473205,0.296839,0.575181,0.332156,0.744121,0.930513,0.0841266,0.981244,0.695493,0.548188,0.909491,0.559682,0.717008,0.54327,0.870548,0.966023,0.715615,0.38539,0.564334,0.0468627,0.399068,0.70693,0.810984,0.284647,0.481036,0.677335,0.577628,0.824101,0.198535,0.357104,0.652826,0.67174,0.653943,0.228007,0.00389606,0.398064,0.15852,0.0880227,0.379308,0.854013,0.636211,0.288799,0.413695,0.353219,0.832069,0.284242,0.319242,0.547684,0.669632,0.883576,0.594547,0.0687006,0.590505,0.405531,0.353348,0.0715419,0.0828662,0.930976,0.895643,0.281401,0.28808,0.548468,0.953141,0.942024,0.776475,0.957037,0.340088,0.934995,0.0450593,0.719396,0.789008,0.68127,0.00819537,0.202702,0.0344896,0.840265,0.486945,0.353732,0.387949,0.156577,0.237308,0.982496,0.225278,0.827813,0.388027,0.578626,0.899355,0.470893,0.509602,0.794998,0.752294,0.797682,0.343466,0.705435,0.739706,0.119941,0.662471,0.0797934,0.0549366,0.70753,0.799189,0.843945,0.388801,0.807385,0.0466471,0.42329,0.64765,0.533592,0.777022,0.0355985,0.690169,0.0143302,0.0180943,0.915447,0.842144,0.406121,0.494072,0.741499,0.877014,0.00367406,0.536497,0.629308,0.801356,0.879963,0.334743,0.541062,0.999904,0.997214,0.620855,0.0548411,0.704745,0.420045,0.898786,0.0935454,0.227429,0.945433,0.516836,0.875079,0.479025,0.293858,0.910677,0.169193,0.308188,0.928772,0.08464,0.150332,0.334893,0.578712,0.891831,0.211907,0.582386,0.428328,0.841216,0.383742,0.308291,0.175959,0.924804,0.308195,0.173173,0.545659,0.363036,0.877917,0.965704,0.261822,0.971463,0.193133,0.207255,0.488299,0.068212,0.68628,0.782157,0.978889,0.855473,0.090345,0.907661,0.940113,0.240677,0.242554,0.518825,0.132508,0.454461,0.101212,0.560836,0.295676,0.484954,0.869127,0.471635,0.409758,0.177322,0.644808,0.955418,0.540358,0.522725,0.921122,0.802181,0.494188,0.114255,0.00943578,0.982486,0.182467,0.695715,0.764643,0.161356,0.551189,0.854988,0.0690167,0.491302,0.0956647,0.31157,0.010127,0.228173,0.766031,0.111339,0.789008,0.0617073,0.596293,0.658135,0.533342,0.00605072,0.835457,0.17815,0.961468,0.375815,0.700875,0.88259,0.177996,0.195062,0.996845,0.187432,0.177548,0.179311,0.883147,0.942191,0.340667,0.434336,0.797179,0.409684,0.925638,0.892844,0.721254,0.935765,0.121016,0.487285,0.0471033,0.910025,0.548993,0.643396,0.56816,0.0823349,0.649447,0.403617,0.260485,0.610915,0.779432,0.961359,0.493505,0.957428,0.156422,0.49035,0.14486,0.33397,0.669661,0.0280077,0.276161,0.0103284,0.462344,0.0733405,0.420013,0.387981,0.966184,0.141267,0.323746,0.0872007,0.628552,0.37085,0.997226,0.177545,0.0142454,0.565385,0.25988,0.663692,0.969002,0.520365,0.274607,0.748434,0.481724,0.768112,0.705863,0.638146,0.258461,0.850723,0.972116,0.928122,0.878731,0.248277,0.938451,0.341074,0.321618,0.358463,0.729056,0.287802,0.49973,0.0528021,0.375003,0.128282,0.423652,0.372228,0.305827,0.437897,0.937613,0.565707,0.101589,0.906616,0.0860717,0.376196,0.65505,0.567796,0.144308,0.360913,0.205941,0.402769,0.211636,0.178057,0.330891,0.0903666,0.426334,0.269342,0.431441,0.747951,0.627805,0.160497,0.035753,0.127535,0.213299,0.410756,0.255817,0.636951,0.782984,0.561644,0.0748479,0.720597,0.127352,0.176437,0.627213,0.213423,0.552633,0.282263,0.781219,0.69694,0.643176,0.98716,0.0997093,0.854811,0.165216,0.4306,0.945178,0.59155,0.699942,0.376619,0.339501,0.327747,0.537116,0.375254,0.455282,0.750415,0.78601,0.711099,0.387366,0.568994,0.272743,0.462214,0.289591,0.400095,0.638651,0.916804,0.613518,0.191284,0.199066,0.394737,0.888224,0.842242,0.381896,0.987934,0.697053,0.547113,0.418534,0.642231,0.138663,0.118476,0.0188501,0.478164,0.446223,0.555966,0.853418,0.901504,0.306381,0.639428,0.612603,0.693747,0.208422,0.885346,0.155961,0.498012,0.285441,0.794612,0.414816,0.898959,0.985896,0.613882,0.293695,0.874121,0.456124,0.675592,0.862054,0.153177,0.222704,0.280588,0.795408,0.361367,0.399064,0.814258,0.839531,0.845287,0.370225,0.692949,0.746792,0.676606,0.332377,0.359395,0.370353,0.540799,0.244741,0.526315,0.0388111,0.530182,0.320927,0.453627,0.42914,0.306824,0.0675091,0.722836,0.180944,0.523633,0.398427,0.0429984,0.67681,0.621132,0.323587,0.472218,0.982499,0.722651,0.286477,0.82203,0.567938,0.656701,0.514979,0.31473,0.333307,0.847356,0.674125,0.703661,0.388155,0.918865,0.229975,0.426966,0.449047,0.550903,0.880593,0.878188,0.857726,0.948102,0.601023,0.0386704,0.471735,0.999451,0.0816687,0.148546,0.620583,0.405255,0.620764,0.603082,0.127906,0.907241,0.425112,0.695845,0.563942,0.940091,0.0105744,0.89725,0.787447,0.684699,0.60091,0.175602,0.603564,0.830886,0.602569,0.0526117,0.381788,0.483162,0.930799,0.239515,0.431264,0.531823,0.278185,0.902999,0.531274,0.359854,0.0515451,0.151857,0.765109,0.672309,0.754938,0.893015,0.57955,0.18005,0.58886,0.143493,0.120141,0.599434,0.040742,0.907588,0.284133,0.641652,0.0831901,0.887698,0.472538,0.685759,0.94031,0.854326,0.16892,0.871109,0.0938404,0.600184,0.402932,0.372025,0.503184,0.934206,0.731879,0.554729,0.0860623,0.496988,0.227038,0.841001,0.390003,0.806588,0.0210505,0.978864,0.950081,0.141191,0.578298,0.990823,0.0487791,0.862432,0.632475,0.131969,0.750129,0.105013,0.817728,0.690439,0.959339,0.986648,0.561548,0.053179,0.586832,0.96448,0.425204,0.0900162,0.898686,0.157083,0.644745,0.984748,0.654071,0.871783,0.825749,0.0440748,0.678371,0.846799,0.0229383,0.628452,0.98799,0.601236,0.619275,0.0367695,0.463668,0.25175,0.168739,0.213797,0.356763,0.986467,0.904236,0.316102,0.973115,0.465785,0.369281,0.559947,0.430265,0.794485,0.649964,0.32895,0.951568,0.294709,0.313699,0.605639,0.166492,0.139447,0.649714,0.844863,0.986246,0.672652,0.473315,0.974237,0.273889,0.0925908,0.0110063,0.737557,0.344341,0.179745,0.951354,0.701104,0.166212,0.855591,0.0172057,0.139327,0.321375,0.386486,0.699274,0.75164,0.180971,0.349238,0.0805904,0.132539,0.643946,0.394289,0.738179,0.810438,0.533736,0.387893,0.655301,0.519982,0.0605453,0.128616,0.494219,0.334434,0.221207,0.505226,0.0719909,0.565548,0.684971,0.023345,0.266652,0.851182,0.878936,0.283858,0.990509,0.200311,0.670344,0.689783,0.951951,0.851315,0.0390205,0.0325413,0.983855,0.682967,0.42683,0.722033,0.493404,0.960566,0.109926,0.148705,0.480549,0.170471,0.277321,0.974768,0.504906,0.498529,0.479993,0.576896,0.0640768,0.164964,0.600241,0.330729,0.0161464,0.479177,0.614587,0.00665542,0.679488,0.284932,0.696438,0.631439,0.136247,0.735459,0.66398,0.120102,0.418425,0.0908106,0.842135,0.91183,0.0513768,0.952061,0.0605348,0.531926,0.122533,0.337856,0.506693,0.627438,0.836385,0.986687,0.204335,0.900462,0.151651,0.804576,0.231191,0.167797,0.283753,0.845778,0.174453,0.963242,0.13071,0.870891,0.594681,0.266957,0.60635,0.258661,0.387059,0.0247755,0.349472,0.229194,0.936605,0.400848,0.181255,0.99714,0.932774,0.303788,0.334996,0.439467,0.931226,0.171381,0.426154,0.135561,0.0718427,0.577805,0.940137,0.303034,0.745603,0.223891,0.148812,0.920055,0.187132,0.279521,0.790946,0.781813,0.546478,0.397296,0.0404736,0.933537,0.422072,0.389945,0.16273,0.358677,0.790794,0.343986,0.355817,0.723568,0.647774,0.690814,0.163035,0.579,0.862195,0.589189,0.714561,0.934037,0.166995,0.654698,0.237071,0.912597,0.878589,0.385882,0.832652,0.0657206,0.665404,0.623599,0.847533,0.211882,0.0208952,0.888007,0.145418,0.442967,0.277952,0.308149,0.801644,0.0687456,0.652134,0.157462,0.792313,0.299908,0.848275,0.955348,0.878908,0.71047,0.544538,0.593468,0.644507,0.711532,0.248167,0.881578,0.624129,0.126755,0.267461,0.456781,0.192476,0.932864,0.0803802,0.0400089,0.144746,0.101275,0.928016,0.290164,0.544243,0.205968,0.598313,0.345887,0.274713,0.250447,0.503349,0.0670265,0.550354,0.351624,0.0223748,0.429262,0.0620937,0.566912,0.0227306,0.706601,0.278444,0.270897,0.588179,0.902574,0.397652,0.85564,0.359355,0.590128,0.788504,0.439735,0.630137,0.93325,0.541011,0.558153,0.223414,0.0852532,0.76412,0.821727,0.43114,0.0388337,0.0721733,0.934489,0.10586,0.622528,0.286113,0.128235,0.0517899,0.348206,0.695147,0.0745204,0.0548074,0.973592,0.345418,0.642986,0.876165,0.74307,0.498626,0.23552,0.333198,0.28713,0.675256,0.963335,0.22038,0.216266,0.521488,0.443794,0.301519,0.285608,0.265521,0.73266,0.324442,0.337694,0.667148,0.430302,0.960222,0.953261,0.558537,0.0120119,0.301468,0.253684,0.0865323,0.356275,0.227276,0.43195,0.999261,0.103442,0.17502,0.497888,0.338962,0.508218,0.785018,0.0142178,0.471552,0.00539833,0.230484,0.99304,0.449193,0.532003,0.278648,0.714713,0.264663,0.60309,0.0524077,0.931812,0.0333916,0.0126297,0.885073,0.591929,0.0246415,0.18654,0.845613,0.111174,0.542815,0.0728893,0.543124,0.542076,0.176331,0.718143,0.0399641,0.515293,0.226361,0.824982,0.529511,0.697913,0.83038,0.759995,0.690953,0.279573,0.291999,0.969601,0.994287,0.556662,0.572691,0.0466942,0.488473,0.606082,0.0593239,0.373546,0.198011,0.0839654,0.560086,0.043624,0.195139,0.102901,0.116513,0.738263,0.644977,0.292844,0.456406,0.684941,0.808138,0.682767,0.509923,0.337649,0.38068,0.340304,0.0976439,0.0716334,0.619877,0.389642,0.0412346,0.614164,0.946304,0.613925,0.660858,0.434777,0.220008,0.720182,0.808323,0.418019,0.804147,0.368409,0.461643,0.999286,0.47131,0.578156,0.737549,0.116287,0.871001,0.193955,0.801228,0.679138,0.876722,0.311152,0.016787,0.257402,0.651456,0.114431,0.329036,0.271333,0.504073,0.37027,0.885496,0.450377,0.984196,0.546354,0.885155,0.204203,0.266535,0.693477,0.622222,0.0706825,0.0618861,0.0838648,0.0699688,0.533196,0.662021,0.807518,0.649482,0.533021,0.00147304,0.450711,0.21216,0.878195,0.761862,0.228947,0.135598,0.413318,0.343378,0.464633,0.68465,0.847451,0.834904,0.570147,0.297828,0.819099,0.1165,0.182983,0.0233027,0.383036,0.876461,0.645525,0.453718,0.938347,0.72939,0.523687,0.471542,0.39141,0.331205,0.121025,0.924432,0.332678,0.571735,0.136591,0.210873,0.333597,0.365538,0.346471,0.746915,0.708915,0.811104,0.431566,0.556367,0.646008,0.00171215,0.854195,0.465107,0.118212,0.0371781,0.48841,0.501248,0.913639,0.133935,0.954967,0.851985,0.863324,0.478654,0.323528,0.254735,0.809859,0.444552,0.179166,0.142536,0.0162876,0.315758,0.35341,0.349885,0.681295,0.69988,0.0968004,0.390211,0.510984,0.528366,0.946577,0.156992,0.530078,0.800772,0.622099,0.648291,0.837951,0.110509,0.149539,0.751589,0.244444,0.104505,0.603575,0.107768,0.583159,0.927102,0.362502,0.393018,0.371655,0.541668,0.535554,0.387942,0.857426,0.888964,0.737827,0.538721,0.588844,0.834628,0.928932,0.099828,0.362994,0.87551,0.25682,0.893072,0.676282,0.878919,0.541363,0.514233,0.989428,0.690902,0.265822,0.233872,0.795407,0.869397,0.341639,0.378566,0.796499,0.704141,0.771584,0.168153,0.24581,0.307138,0.556096,0.103236,0.196101,0.293923,0.641957,0.784945,0.128551,0.57089,0.884773,0.491544,0.446399,0.141593,0.384616,0.122681,0.0205123,0.925979,0.636914,0.00994032,0.61688,0.902736,0.243812,0.412288,0.772133,0.585451,0.790854,0.568632,0.289593,0.562437,0.736785,0.535403,0.869575,0.292881,0.638638,0.0656764,0.586804,0.280596,0.850621,0.715354,0.851485,0.735394,0.206899,0.297885,0.876988,0.591515,0.420566,0.8975,0.517494,0.0574803,0.90744,0.134374,0.960216,0.151252,0.546662,0.732349,0.736703,0.337515,0.300981,0.0262959,0.899953,0.037766,0.561698,0.769528,0.330647,0.200337,0.835204,0.91745,0.480933,0.685826,0.632805,0.332418,0.42122,0.839703,0.630302,0.298208,0.431218,0.0508684,0.195708,0.948712,0.108349,0.103148,0.0830858,0.0685651,0.2544,0.629747,0.800914,0.991103,0.967263,0.101895,0.0173992,0.867216,0.139661,0.579098,0.636743,0.470308,0.779435,0.471948,0.387758,0.260367,0.157773,0.0205628,0.592785,0.578994,0.860266,0.223087,0.877202,0.291484,0.273955,0.0729093,0.240195,0.382304,0.176057,0.323281,0.450869,0.430457,0.953029,0.251784,0.421561,0.920292,0.353679,0.43896,0.787507,0.49334,0.0180575,0.424251,0.963648,0.797492,0.896198,0.351406,0.057859,0.0539719,0.371969,0.650644,0.632966,0.232235,0.873731,0.510167,0.523719,0.147686,0.583076,0.763914,0.52999,0.759134,0.0871957,0.98086,0.189591,0.0402244,0.232643,0.611152,0.960516,0.586322,0.0501113,0.748023,0.0796625,0.0681688,0.172274,0.0433106,0.865661,0.0684722,0.394717,0.92352,0.122444,0.766686,0.574163,0.75541,0.998921,0.447894,0.265577,0.52264,0.595581,0.848653,0.286555,0.125571,0.607787,0.37375,0.106431,0.797378,0.413975,0.339074,0.408529,0.374491,0.925397,0.458641,0.122514,0.00505914,0.52681,0.294788,0.0483698,0.39247,0.36326,0.443087,0.31599,0.485704,0.209773,0.890154,0.241114,0.208694,0.338048,0.506691,0.731334,0.933628,0.355344,0.017889,0.0591994,0.963131,0.391639,0.16563,0.760509,0.805614,0.504704,0.169038,0.180105,0.430101,0.627679,0.302619,0.43516,0.154489,0.597406,0.48353,0.546959,0.960666,0.926617,0.862949,0.44637,0.13639,0.753103,0.687484,0.345084,0.0911509,0.194174,0.076418,0.0247793,0.549518,0.094307,0.0839787,0.512649,0.485946,0.249609,0.273158,0.291561,0.754313,0.442196,0.471665,0.184414,0.0698753,0.774284,0.619574,0.224364,0.37169,0.103104,0.771323,0.332356,0.0297209,0.634273,0.778726,0.16611,0.387376,0.46621,0.511194,0.478527,0.660384,0.587612,0.503306,0.209902,0.681919,0.587285,0.722551,0.167865,0.836893,0.995709,0.459426,0.591207,0.437905,0.931091,0.775621,0.50778,0.705375,0.395195,0.732144,0.0770656,0.4983,0.503468,0.409422,0.528021,0.13774,0.188148,0.694131,0.525116,0.654357,0.205325,0.0036427,0.314741,0.792937,0.506949,0.524643,0.474856,0.0942331,0.247194,0.642722,0.931127,0.242902,0.102148,0.522333,0.680807,0.0332393,0.297954,0.188587,0.738615,0.69315,0.920731,0.81568,0.191449,0.424199,0.225102,0.71947,0.561939,0.41325,0.413601,0.0870553,0.0676074,0.618926,0.090698,0.382349,0.411863,0.597647,0.906991,0.88672,0.69188,0.154185,0.529441,0.623006,0.397087,0.631589,0.145339,0.077894,0.664829,0.443294,0.266481,0.403443,0.136443,0.187212,0.219123,0.327892,0.611411,0.444225,0.0473622,0.17335,0.857475,0.460963,0.260405,0.925083,0.079889,0.351103,0.307431,0.491752,0.94875,0.214423,0.378472,0.64063,0.368608,0.907913,0.263636,0.765695,0.539502,0.408975,0.843589,0.204331,0.852269,0.11007,0.607774,0.988712,0.297282,0.826897,0.316605,0.908693,0.271123,0.363967,0.0820435,0.128598,0.82493,0.342449,0.0536812,0.904819,0.693552,0.361113,0.396571,0.642302,0.575535,0.775043,0.282932,0.944143,0.682956,0.546568,0.709838,0.222458,0.955543,0.553427,0.426789,0.807812,0.663498,0.0345628,0.796524,0.96078,0.86146,0.113129,0.869473,0.132583,0.477095,0.951517,0.261182,0.302025,0.293966,0.314863,0.206844,0.987518,0.675975,0.603415,0.62982,0.251511,0.378457,0.912752,0.195654,0.0614128,0.45932,0.905493,0.283871,0.414863,0.45892,0.71066,0.222675,0.122418,0.745222,0.019199,0.0831975,0.606683,0.132328,0.952671,0.739266,0.609423,0.904187,0.000447384,0.911448,0.198153,0.31531,0.118292,0.185671,0.991286,0.721707,0.815491,0.242797,0.100164,0.728244,0.438451,0.161577,0.187564,0.343943,0.445447,0.602426,0.802863,0.156107,0.825101,0.925281,0.901329,0.8443,0.00847853,0.508012,0.976628,0.961149,0.247278,0.586051,0.865337,0.247725,0.497499,0.0634897,0.563035,0.615791,0.249161,0.554321,0.337498,0.0646523,0.797117,0.437662,0.792896,0.235568,0.599239,0.98046,0.579512,0.0446865,0.582886,0.382375,0.200793,0.407987,0.307656,0.102123,0.252288,0.316135,0.610134,0.228915,0.277284,0.857412,0.814967,0.142621,0.105137,0.312466,0.20611,0.668172,0.928257,0.455271,0.222493,0.265756,0.519924,0.0196106,0.703418,0.31282,0.255179,0.302657,0.293279,0.834691,0.347343,0.876165,0.217066,0.548137,0.284152,0.524722,0.65026,0.53644,0.840857,0.260394,0.765355,0.118141,0.117806,0.580322,0.260762,0.222943,0.892788,0.466872,0.891115,0.821045,0.922144,0.113609,0.086801,0.442067,0.133219,0.790219,0.754887,0.388398,0.092876,0.0481657,0.223089,0.44022,0.924331,0.440155,0.988356,0.208483,0.964877,0.638616,0.744923,0.805734,0.89901,0.510278,0.923876,0.0168163,0.0905994,0.184637,0.23976,0.983387,0.65151,0.130875,0.804432,0.573653,0.244484,0.891233,0.0157206,0.377703,0.681452,0.770607,0.766101,0.774328,0.818773,0.98919,0.214548,0.743104,0.429345,0.202904,0.951587,0.394222,0.84152,0.696509,0.199956,0.740531,0.206787,0.123832,0.757347,0.297386,0.308469,0.997106,0.280773,0.959979,0.127981,0.0852059,0.533632,0.372465,0.976439,0.549353,0.750168,0.657892,0.31996,0.516269,0.43222,0.138733,0.505458,0.646768,0.881837,0.934803,0.849673,0.833424,0.329025,0.691193,0.529933,0.528981,0.431724,0.736719,0.652812,0.18907,0.0341058,0.961281,0.186177,0.314879,0.92126,0.314158,0.400085,0.454893,0.686623,0.376524,0.00424538,0.436791,0.0344162,0.324206,0.953059,0.466636,0.462939,0.458517,0.113404,0.344776,0.39332,0.963077,0.178199,0.722345,0.65427,0.708132,0.251326,0.0859936,0.444851,0.904138,0.275064,0.478957,0.86542,0.461241,0.793836,0.78668,0.775399,0.193922,0.241572,0.462022,0.570446,0.245818,0.898813,0.604862,0.570023,0.851872,0.0714985,0.0329621,0.310389,0.184903,0.377738,0.70371,0.14798,0.555937,0.426055,0.80225,0.264069,0.677381,0.888244,0.70892,0.581519,0.163308,0.187877,0.446938,0.624549,0.981714,0.233618,0.399948,0.175635,0.475191,0.86197,0.746081,0.721009,0.760782,0.350944,0.291032,0.612654,0.422442,0.323994,0.923043,0.607345,0.701732,0.626753,0.755325,0.257669,0.0528078,0.557575,0.521738,0.730188,0.445819,0.230658,0.311707,0.609127,0.418535,0.758646,0.233675,0.400249,0.992264,0.633623,0.575884,0.467455,0.495593,0.321966,0.188464,0.256375,0.672909,0.479496,0.869029,0.0953512,0.80349,0.792072,0.702696,0.505222,0.418825,0.458021,0.76289,0.471633,0.0155966,0.284628,0.201821,0.461416,0.515286,0.513529,0.0705424,0.933821,0.272174,0.304218,0.334069,0.264438,0.937841,0.909954,0.731893,0.433433,0.231919,0.920357,0.689808,0.904828,0.399853,0.558837,0.000179436,0.203342,0.350909,0.702876,0.708564,0.769734,0.160897,0.471454,0.241367,0.176494,0.756082,0.443189,0.637909,0.271368,0.956717,0.708452,0.205189,0.228892,0.0126694,0.539258,0.49333,0.95051,0.449212,0.225223,0.383944,0.681131,0.14558,0.0737519,0.585959,0.545433,0.632589,0.586139,0.748775,0.983498,0.289014,0.457339,0.753233,0.449911,0.928794,0.9946,0.626405,0.684876,0.437789,0.264314,0.956244,0.394506,0.972766,0.161433,0.623398,0.985436,0.700691,0.116728,0.935946,0.149903,0.341951,0.319889,0.831034,0.487531,0.393641,0.416993,0.0329637,0.0262302,0.00313184,0.781739,0.00972839,0.292146,0.239078,0.762961,0.742058,0.167872,0.757561,0.368463,0.852748,0.195349,0.632777,0.808992,0.589855,0.605543,0.970425,0.213253,0.590979,0.671116,0.329981,0.526925,0.821019,0.671931,0.846814,0.652053,0.159462,0.240456,0.0690461,0.192426,0.266686,0.0721779,0.974165,0.276414,0.364324,0.213243,0.0393752,0.106382,0.381114,0.796936,0.474845,0.233862,0.992286,0.107622,0.0428539,0.582141,0.713165,0.0132787,0.795394,0.304144,0.684395,0.125374,0.831069,0.505414,0.797306,0.677884,0.157467,0.956768,0.918339,0.226513,0.149194,0.185025,0.298691,0.123359,0.46144,0.663015,0.336602,0.500815,0.769396,0.717716,0.297751,0.244241,0.951578,0.290036,0.351863,0.994432,0.872177,0.0650284,0.00771079,0.667571,0.369173,0.692105,0.792946,0.200242,0.197519,0.590252,0.878126,0.354986,0.54702,0.796465,0.581498,0.696214,0.981491,0.880189,0.819573,0.44293,0.543203,0.156175,0.943745,0.3126,0.873891,0.241496,0.556841,0.825469,0.531533,0.908704,0.819901,0.40371,0.973733,0.827612,0.0712814,0.342905,0.519717,0.864227,0.543148,0.717236,0.454479,0.421274,0.0722218,0.00149893,0.217739,0.65372,0.697713,0.19923,0.533909,0.517286,0.64216,0.0771123,0.67346,0.585905,0.389712,0.547351,0.827401,0.946553,0.37282,0.358934,0.855258,0.192721,0.762644,0.82899,0.0203322,0.833925,0.171896,0.540049,0.698152,0.715043,0.257285,0.152631,0.136317,0.329507,0.15413,0.354056,0.983227,0.851843,0.553285,0.517136,0.369129,0.195445,0.594248,0.0425893,0.78135,0.983961,0.58994,0.608751,0.930514,0.96276,0.967685,0.785772,0.155481,0.730328,0.614762,0.175813,0.564253,0.786657,0.715862,0.262405,0.501701,0.973148,0.415037,0.638017,0.302655,0.569167,0.992073,0.285882,0.42101,0.545359,0.803018,0.790139,0.740804,0.397267,0.832728,0.522154,0.381227,0.422668,0.130905,0.311742,0.385429,0.0985901,0.0975133,0.540909,0.828918,0.712275,0.716722,0.393171,0.498933,0.432585,0.655577,0.000633158,0.405732,0.0706133,0.638651,0.708387,0.63978,0.630724,0.99427,0.0607899,0.176083,0.797288,0.850929,0.916887,0.194555,0.683657,0.439041,0.575782,0.106325,0.569946,0.887524,0.491754,0.668536,0.985037,0.0326632,0.497454,0.697312,0.749386,0.890626,0.196245,0.18197,0.546202,0.196878,0.587703,0.616816,0.835529,0.29609,0.256596,0.466253,0.29036,0.317386,0.642335,0.0876476,0.168314,0.559222,0.282202,0.851971,0.998262,0.857985,0.958296,0.568208,0.745508,0.45005,0.236744,0.730546,0.482713,0.734199,0.427858,0.232099,0.624824,0.624103,0.414069,0.171027,0.820981,0.00177184,0.787843,0.656509,0.297862,0.0444384,0.122762,0.588221,0.361824,0.765097,0.675869,0.530138,0.324319,0.958071,0.38211,0.322581,0.816056,0.340406,0.890789,0.561564,0.790456,0.127534,0.29211,0.27317,0.861732,0.719968,0.505269,0.486557,0.344071,0.919338,0.657583,0.165052,0.92111,0.445426,0.821561,0.218972,0.489864,0.944323,0.807193,0.851689,0.70942,0.483062,0.381827,0.0337392,0.441134,0.763937,0.35632,0.25719,0.104343,0.24711,0.818754,0.894799,0.374644,0.110864,0.167969,0.236376,0.830832,0.673238,0.722932,0.174903,0.592576,0.380516,0.339954,0.513686,0.825942,0.161515,0.732658,0.315806,0.105839,0.539851,0.167495,0.815259,0.0229137,0.549322,0.848998,0.464047,0.313259,0.205319,0.721237,0.417602,0.452428,0.539991,0.312401,0.827072,0.650855,0.48037,0.0634478,0.481687,0.153608,0.78638,0.65659,0.746184,0.166896,0.996544,0.259871,0.992838,0.15806,0.992529,0.308644,0.263898,0.53238,0.476139,0.0791572,0.555294,0.025461,0.928155,0.0193414,0.33872,0.133474,0.740578,0.756321,0.585903,0.280569,0.0687221,0.412975,0.931424,0.549092,0.476422,0.413111,0.7027,0.262803,0.0697009,0.448885,0.429699,0.066245,0.708755,0.422537,0.224305,0.701284,0.731181,0.488203,0.233665,0.20732,0.56736,0.788959,0.232781,0.495515,0.8083,0.5715,0.628989,0.548878,0.327822,0.214892,0.829448,0.396544,0.627867,0.760872,0.945636,0.104289,0.173983,0.648336,0.367092,0.243684,0.0972206,0.79679,0.309929,0.805976,0.219327,0.534234,0.50726,0.950508,0.0224364,0.740925,0.157828,0.589796,0.529884,0.390608,0.0853118,0.338184,0.962109,0.714301,0.887062,0.28993,0.929193,0.71651,0.686474,0.55706,0.477382,0.63211,0.661349,0.651365,0.280446,0.0284406,0.895049,0.377666,0.825231,0.204978,0.183642,0.0445579,0.739212,0.690903,0.995066,0.761648,0.431827,0.152893,0.351445,0.961711,0.543502,0.436757,0.299895,0.50561,0.151058,0.186957,0.795541,0.0802512,0.903467,0.482015,0.637311,0.380849,0.114125,0.29866,0.0322142,0.39457,0.327101,0.927263,0.772237,0.152332,0.132242,0.955879,0.19689,0.871454,0.646782,0.191955,0.633102,0.0786092,0.344849,0.984547,0.0403202,0.88835,0.421304,0.340215,0.39396,0.572362,0.527172,0.189501,0.652613,0.430639,0.671516,0.289924,0.811488,0.785641,0.588584,0.843702,0.180211,0.915685,0.770966,0.952448,0.0680167,0.903208,0.908327,0.264906,0.774661,0.555109,0.456862,0.407764,0.633718,0.80171,0.392311,0.674038,0.69006,0.813615,0.0142534,0.0840209,0.385976,0.541425,0.273522,0.038589,0.972064,0.945038,0.328513,0.783552,0.730679,0.917097,0.627255,0.91089,0.832782,0.39822,0.863338,0.900799,0.301428,0.771665,0.165705,0.0760894,0.326774,0.622567,0.483853,0.960492,0.424277,0.876164,0.634531,0.114338,0.689779,0.648784,0.198358,0.0757549,0.19021,0.471881,0.114344,0.162274,0.416919,0.442857,0.945826,0.147597,0.359954,0.573081,0.058487,0.192736,0.971301,0.921825,0.0935344,0.272729,0.69349,0.259239,0.348819,0.0202642,0.881806,0.832672,0.980757,0.306083,0.708836,0.615287,0.420421,0.398615,0.264072,0.618779,0.474369,0.454281,0.0906598,0.588713,0.616555,0.507578,0.0315702,0.562382,0.655176,0.391524,0.135463,0.713663,0.58426,0.106764,0.635487,0.677794,0.379493,0.328977,0.937034,0.728312,0.349242,0.81884,0.560984,0.329998,0.124923,0.26982,0.945286,0.545344,0.668434,0.209357,0.164123,0.142804,0.663639,0.254783,0.731517,0.280194,0.762361,0.763087,0.842576,0.417537,0.154611,0.978039,0.1312,0.738871,0.0848028,0.766687,0.416666,0.464296,0.0956644,0.3537,0.192608,0.444906,0.17254,0.753592,0.774904,0.297463,0.0234114,0.72019,0.842807,0.691846,0.929547,0.00693023,0.83465,0.593186,0.261713,0.566167,0.873381,0.0240748,0.329254,0.715957,0.441612,0.483865,0.693995,0.572811,0.222737,0.778798,0.339498,0.639403,0.243094,0.435163,0.993102,0.435702,0.880069,0.165642,0.189294,0.654973,0.463105,0.212705,0.375163,0.305912,0.904551,0.304711,0.312842,0.7392,0.897897,0.574555,0.305367,0.771277,0.59863,0.634621,0.487234,0.0402418,0.118486,0.181229,0.613053,0.341223,0.960027,0.952551,0.980626,0.203122,0.387714,0.973728,0.638824,0.267783,0.13937,0.828117,0.922756,0.602474,0.0408225,0.297919,0.908386,0.945373,0.60263,0.221228,0.684574,0.500527,0.795783,0.989941,0.271804,0.394413,0.624562,0.759038,0.434654,0.743048,0.940267,0.0477076,0.0842713,0.900295,0.000259054,0.0648972,0.103416,0.387973,0.0386253,0.74224,0.655756,0.177995,0.570357,0.578512,0.78047,0.61118,0.876432,0.688856,0.556553,0.479061,0.910083,0.241127,0.979588,0.705866,0.231068,0.251392,0.100279,0.855629,0.0104299,0.534933,0.598677,0.950697,0.582641,0.682949,0.850992,0.5829,0.747846,0.954408,0.970873,0.786471,0.696648,0.626629,0.964466,0.267005,0.205142,0.744936,0.878184,0.0815734,0.433791,0.434737,0.560635,0.343875,0.675864,0.540223,0.0497404,0.906932,0.791615,0.150019,0.762561,0.802045,0.684952,0.361238,0.752742,0.267593,0.044187,0.603733,0.850492,0.792033,0.558141,0.821365,0.578504,0.254789,0.447995,0.54297,0.521794,0.653136,0.287906,0.399978,0.73471,0.721697,0.834716,0.295344,0.065572,0.51058,0.835567,0.115312,0.417512,0.627182,0.265331,0.180073,0.429226,0.950284,0.541311,0.181968,0.217876,0.585498,0.785701,0.0683686,0.377531,0.343843,0.889734,0.956035,0.598632,0.337729,0.499005,0.120426,0.990865,0.786912,0.520404,0.725575,0.508609,0.35512,0.020919,0.574181,0.8657,0.856486,0.689493,0.283212,0.483668,0.954825,0.463285,0.912894,0.905108,0.00459592,0.0948619,0.122985,0.590094,0.880563,0.191353,0.967625,0.224406,0.0810873,0.923661,0.823038,0.418816,0.422666,0.943464,0.409681,0.209578,0.463868,0.135255,0.718187,0.818988,0.156174,0.292368,0.684688,0.0126604,0.981861,0.9679,0.496328,0.936686,0.431184,0.409222,0.841795,0.43578,0.504084,0.964779,0.0258743,0.384647,0.156133,0.9935,0.609053,0.23722,0.91716,0.432091,0.656036,0.339826,0.375555,0.0657165,0.549404,0.839423,0.200972,0.267591,0.65841,0.357146,0.559958,0.343098,0.369806,0.54182,0.310998,0.866135,0.478506,0.742182,0.275357,0.3203,0.177962,0.779441,0.28508,0.203836,0.164088,0.441212,0.197336,0.773141,0.678432,0.114496,0.205232,0.334468,0.454322,0.580787,0.400185,0.00372588,0.420209,0.601156,0.271316,0.0786199,0.958302,0.831275,0.421718,0.328109,0.373094,0.732716,0.194243,0.8516,0.474898,0.4696,0.171901,0.652859,0.249041,0.45698,0.856696,0.413128,0.898193,0.0540315,0.186269,0.576625,0.168527,0.391501,0.911093,0.62285,0.972288,0.311278,0.626575,0.392497,0.912434,0.897892,0.471117,0.870737,0.729167,0.892835,0.198846,0.102261,0.625551,0.393089,0.953861,0.100448,0.86269,0.125762,0.753308,0.11173,0.582742,0.610003,0.524858,0.480935,0.664035,0.711128,0.0575605,0.832562,0.102629,0.968654,0.455412,0.0749163,0.279932,0.0819872,0.467413,0.192366,0.979879,0.938531,0.0631034,0.709046,0.831366,0.261949,0.811307,0.456916,0.655039,0.765168,0.557365,0.517728,0.89093,0.310672,0.629459,0.473672,0.920676,0.154317,0.954608,0.58471,0.865445,0.0121681,0.417273,0.968073,0.980822,0.872685,0.0429896,0.260754,0.954672,0.510403,0.45312,0.934551,0.448934,0.516224,0.643597,0.280299,0.778173,0.454903,0.737215,0.433212,0.220071,0.29458,0.95094,0.111001,0.605252,0.580399,0.584674,0.525928,0.734716,0.539281,0.110638,0.600161,0.551449,0.527911,0.568234,0.532271,0.400596,0.611223,0.793025,0.355268,0.121626,0.246145,0.289819,0.57056,0.762369,0.933415,0.850859,0.540542,0.388319,0.588075,0.973754,0.60839,0.882654,0.924694,0.719391,0.487907,0.505093,0.304065,0.0138342,0.239809,0.843346,0.124472,0.83997,0.394796,0.652384,0.408204,0.927067,0.0529792,0.0194269,0.720092,0.408247,0.141053,0.966238,0.698065,0.711613,0.728607,0.631481,0.562473,0.269149,0.0197992,0.150547,0.242904,0.628189,0.0332015,0.167598,0.347581,0.521108,0.672691,0.651646,0.534942,0.912501,0.494992,0.659415,0.75247,0.889788,0.311798,0.160674,0.816855,0.364777,0.180101,0.536947,0.773024,0.321154,0.503185,0.47109,0.0327676,0.231792,0.10257,0.59524,0.500942,0.122369,0.745787,0.743845,0.750558,0.778989,0.911443,0.098139,0.300097,0.584135,0.749785,0.835039,0.496635,0.244777,0.494454,0.249105,0.134565,0.806252,0.409779,0.95142,0.171029,0.58988,0.488367,0.944054,0.911034,0.991553,0.415143,0.943802,0.223345,0.517713,0.539042,0.724287,0.640083,0.284829,0.468132,0.390641,0.063818,0.379575,0.48878,0.363915,0.963709,0.238565,0.198954,0.460345,0.483342,0.693408,0.70945,0.617907,0.49966,0.119229,0.569327,0.670689,0.709109,0.0576941,0.614743,0.620144,0.0492468,0.0298861,0.563946,0.272592,0.5476,0.102988,0.996879,0.187682,0.387817,0.46501,0.578324,0.451635,0.844585,0.0671041,0.81555,0.808295,0.305669,0.0145037,0.268639,0.789011,0.707911,0.978089,0.406918,0.207571,0.0973184,0.976245,0.87826,0.806428,0.0339386,0.493003,0.426571,0.0831854,0.522889,0.990517,0.355777,0.0704887,0.0935048,0.352656,0.258171,0.481322,0.817666,0.836495,0.932957,0.662251,0.903599,0.748507,0.470546,0.209268,0.76301,0.739185,0.998279,0.470922,0.717274,0.405197,0.678493,0.814593,0.381442,0.556753,0.62102,0.41538,0.0497559,0.0475918,0.498566,0.572645,0.0381089,0.854343,0.643134,0.131614,0.206999,0.901305,0.612936,0.0246646,0.7378,0.545892,0.686916,0.641399,0.294399,0.157462,0.850667,0.0574095,0.896647,0.848946,0.528331,0.613921,0.254143,0.206824,0.428514,0.635585,0.763577,0.049534,0.0509654,0.813333,0.0971258,0.549531,0.385978,0.135235,0.403874,0.0291115,0.266849,0.610873,0.930416,0.879784,0.635537,0.668216,0.425677,0.322453,0.309614,0.720076,0.479915,0.160281,0.777485,0.376562,0.0092273,0.305816,0.990483,0.263371,0.51264,0.418996,0.898956,0.276217,0.46853,0.949921,0.0895503,0.565656,0.499452,0.475528,0.700891,0.903326,0.50464,0.96774,0.514199,0.435056,0.847524,0.149736,0.103272,0.2732,0.472189,0.412886,0.993276,0.952104,0.573167,0.770761,0.328665,0.582394,0.0765776,0.319148,0.845765,0.589218,0.738145,0.744721,0.865435,0.206675,0.694642,0.954986,0.772331,0.194094,0.430514,0.473222,0.0974198,0.935153,0.440962,0.611618,0.370209,0.288485,0.761354,0.473481,0.561685,0.233543,0.886367,0.554961,0.185647,0.459534,0.325722,0.514312,0.0419279,0.4023,0.83346,0.887693,0.991518,0.571605,0.632414,0.856954,0.77828,0.327055,0.811939,0.550611,0.521149,0.242453,0.023833,0.618569,0.177607,0.464795,0.230187,0.547816,0.75328,0.991542,0.0212967,0.314965,0.225085,0.907663,0.869927,0.410732,0.367197,0.195649,0.925044,0.409125,0.597949,0.758504,0.296818,0.589467,0.330109,0.929231,0.446421,0.108389,0.256286,0.25836,0.659,0.777435,0.500814,0.682833,0.396004,0.67842,0.147627,0.626191,0.226236,0.900907,0.617733,0.247533,0.215872,0.842817,0.155196,0.085799,0.253549,0.522393,0.281448,0.178593,0.931518,0.879397,0.937097,0.228336,0.468864,0.267206,0.157567,0.915285,0.375595,0.413853,0.173646,0.0345947,0.191288,0.67446,0.717427,0.587292,0.35288,0.865055,0.213483,0.579116,0.765962,0.831216,0.82665,0.981834,0.674033,0.981846,0.0676333,0.927582,0.504239,0.349081,0.106175,0.435758,0.228479,0.0432716,0.664093,0.697343,0.310478,0.82166,0.612628,0.686073,0.235513,0.786274,0.720667,0.426802,0.460734,0.438095,0.0140938,0.813614,0.30315,0.227577,0.39273,0.0691117,0.0587926,0.21938,0.0509461,0.732826,0.201226,0.118579,0.660407,0.705465,0.467661,0.766582,0.141223,0.696139,0.809854,0.805316,0.393482,0.120331,0.626976,0.00611096,0.806404,0.86249,0.792385,0.527072,0.289291,0.253119,0.965167,0.303385,0.0667329,0.268316,0.530962,0.459463,0.337428,0.589755,0.678843,0.388374,0.32258,0.880069,0.506954,0.982988,0.585534,0.974614,0.74957,0.726757,0.670754,0.559423,0.532074,0.0642363,0.679754,0.15905,0.0703473,0.486158,0.0215398,0.862733,0.01323,0.310831,0.115852,0.978397,0.614216,0.182585,0.246713,0.145179,0.642048,0.584141,0.734933,0.320891,0.972515,0.0575137,0.20096,0.479469,0.0405014,0.786494,0.454083,0.790071,0.513252,0.124837,0.349494,0.0453251,0.189073,0.0292487,0.204375,0.259421,0.515407,0.225915,0.122153,0.528637,0.536746,0.238005,0.507034,0.150963,0.420589,0.753747,0.296141,0.0626371,0.337888,0.0310747,0.383528,0.310403,0.0885885,0.584487,0.789871,0.12909,0.370982,0.243955,0.919161,0.884233,0.368792,0.268655,0.929558,0.557865,0.297904,0.133933,0.817286,0.813311,0.359848,0.939439,0.341948,0.896595,0.177444,0.848982,0.0475575,0.598033,0.602729,0.343699,0.66067,0.940616,0.374774,0.0441981,0.251019,0.463362,0.628686,0.0408901,0.592452,0.999667,0.284845,0.511613,0.8839,0.653636,0.780268,0.813459,0.211501,0.0781718,0.947392,0.028787,0.891483,0.307241,0.968226,0.233431,0.203836,0.14567,0.0824128,0.251393,0.743703,0.685141,0.595092,0.404373,0.625757,0.969865,0.448571,0.876776,0.433227,0.0772569,0.917666,0.0256793,0.0769241,0.202511,0.537292,0.960825,0.856147,0.31756,0.774283,0.0676485,0.395732,0.721676,0.0964355,0.287215,0.0289165,0.0646614,0.520646,0.232752,0.210331,0.603059,0.484145,0.954034,0.2882,0.0792369,0.358407,0.913957,0.0491024,0.806979,0.790733,0.48233,0.884236,0.7084,0.508009,0.96116,0.910911,0.0453013,0.921984,0.767058,0.362862,0.696268,0.834706,0.758594,0.417943,0.931142,0.0458084,0.44686,0.995803,0.566454,0.679612,0.206134,0.169513,0.163757,0.160168,0.457713,0.242994,0.518576,0.37167,0.292096,0.325554,0.162403,0.774426,0.20979,0.870803,0.282435,0.17095,0.781714,0.327736,0.0929338,0.548772,0.690598,0.789201,0.383478,0.449192,0.207145,0.31462,0.495,0.654005,0.310423,0.0614543,0.333617,0.516558,0.230967,0.497373,0.676726,0.68868,0.740367,0.195302,0.0603496,0.0324636,0.520856,0.222753,0.80689,0.730646,0.0935562,0.0893248,0.901596,0.87527,0.417061,0.99453,0.424042,0.107659,0.783731,0.80752,0.556851,0.990876,0.12214,0.051851,0.644881,0.432564,0.113305,0.978497,0.949122,0.344272,0.475871,0.625848,0.0329519,0.216238,0.82115,0.0933015,0.248702,0.342006,0.316054,0.0555912,0.0726523,0.409611,0.144916,0.974248,0.284881,0.561977,0.968778,0.708923,0.669637,0.752509,0.516443,0.226487,0.743385,0.638583,0.278339,0.388265,0.0711473,0.391644,0.366763,0.020269,0.735916,0.842633,0.646117,0.768868,0.0588712,0.467267,0.86217,0.307573,0.809273,0.178224,0.363164,0.881925,0.587835,0.50808,0.856173,0.872715,0.0700573,0.824951,0.581638,0.739694,0.57746,0.098081,0.966181,0.320844,0.736664,0.24452,0.70911,0.807812,0.636164,0.0758721,0.828081,0.37208,0.918505,0.474198,0.140948,0.977377,0.941464,0.00311739,0.284949,0.750737,0.181341,0.648113,0.632663,0.769176,0.156193,0.488836,0.641891,0.226251,0.313787,0.22353,0.965945,0.891246,0.321611,0.932126,0.21209,0.058275,0.176646,0.9212,0.866087,0.812809,0.997072,0.694168,0.184889,0.915577,0.168365,0.325837,0.892954,0.10983,0.328954,0.177903,0.860567,0.510296,0.826016,0.49323,0.279472,0.98221,0.982065,0.921363,0.208461,0.295852,0.144893,0.174405,0.187098,0.466503,0.106531,0.399188,0.524778,0.283177,0.320388,0.390865,0.0959859,0.31746,0.0850325,0.280875,0.233037,0.253398,0.606712,0.125991,0.363228,0.935666,0.303894,0.223795,0.445962,0.12991,0.717024,0.725434,0.11212,0.69909,0.646797,0.320581,0.994941,0.79169,0.494986,0.182039,0.258193,0.601517,0.581227,0.782971,0.884694,0.901616,0.173836,0.98068,0.219075,0.258869,0.261555,0.452113,0.512266,0.868267,0.578103,0.875494,0.803933,0.881997,0.0992886,0.249895,0.0119079,0.816313,0.975328,0.124028,0.515402,0.622125,0.444609,0.510344,0.413815,0.939595,0.692383,0.672008,0.541113,0.273611,0.454979,0.425806,0.175226,0.628815,0.406486,0.394302,0.887683,0.668041,0.846414,0.39995,0.536307,0.424518,0.275444,0.34024,0.306515,0.374732,0.590135,0.318423,0.191045,0.565463,0.442451,0.706448,0.187588,0.887061,0.216791,0.601403,0.826656,0.909175,0.273411,0.367769,0.182785,0.728389,0.793575,0.358012,0.357204,0.200061,0.752313,0.244888,0.868102,0.598728,0.644837,0.404409,0.0232452,0.920281,0.744649,0.32976,0.295013,0.334784,0.648183,0.486059,0.900247,0.0906345,0.192506,0.087835,0.977695,0.409298,0.689238,0.804351,0.318472,0.962649,0.17212,0.501258,0.691038,0.965694,0.859269,0.048242,0.165755,0.611583,0.29313,0.0338572,0.21031,0.937967,0.438266,0.233555,0.858248,0.182915,0.563316,0.153261,0.517699,0.211499,0.63932,0.417946,0.302133,0.831826,0.505781,0.279828,0.241124,0.195019,0.0841792,0.559596,0.157667,0.256299,0.0608537,0.848705,0.221993,0.920123,0.896947,0.387749,0.531706,0.190077,0.421606,0.742016,0.128044,0.859872,0.975571,0.986292,0.0427874,0.538887,0.139553,0.560486,0.750386,0.778873,0.978432,0.0525193,0.610699,0.484213,0.332348,0.851823,0.679232,0.416527,0.411419,0.836899,0.672826,0.472272,0.685604,0.894819,0.392396,0.582551,0.282567,0.924101,0.772628,0.704173,0.666117,0.900672,0.564045,0.641689,0.886964,0.606833,0.180576,0.026517,0.167319,0.930962,0.80539,0.145752,0.983481,0.416089,0.629965,0.315828,0.267912,0.309196,0.732355,0.679331,0.146095,0.405181,0.151603,0.8317,0.3,0.543999,0.414251,0.582567,0.4681,0.186879,0.28674,0.134217,0.0875513,0.850786,0.775906,0.974515,0.457618,0.956482,0.00103207,0.624938,0.887443,0.806422,0.770689,0.870924,0.222511,0.400654,0.186753,0.490423,0.70985,0.919108,0.169754,0.855946,0.324289,0.321357,0.687645,0.624288,0.865356,0.101896,0.206855,0.333456,0.288776,0.493595,0.467673,0.376327,0.344381,0.243579,0.350842,0.801999,0.20006,0.351874,0.426937,0.0875037,0.158296,0.197626,0.958428,0.380808,0.59828,0.145181,0.871231,0.30813,0.0642888,0.040985,0.164075,0.388578,0.362342,0.851721,0.0128659,0.227698,0.953617,0.219721,0.561153,0.242393,0.713317,0.0288259,0.61872,0.0576974,0.272405,0.969562,0.859697,0.472465,0.321436,0.286633,0.559969,0.479732,0.484259,0.518397,0.86054,0.0825388,0.663577,0.731771,0.390668,0.727866,0.772756,0.554744,0.116444,0.135098,0.406464,0.12931,0.362796,0.360081,0.349031,0.923949,0.602474,0.0623473,0.952775,0.221194,0.120045,0.22518,0.190756,0.979741,0.697645,0.512192,0.266375,0.257613,0.991924,0.750634,0.77601,0.852464,0.833173,0.439587,0.584235,0.223841,0.167454,0.356991,0.778585,0.283898,0.492089,0.185049,0.413207,0.854885,0.54513,0.762238,0.778834,0.147604,0.824585,0.731609,0.368798,0.94463,0.956789,0.559554,0.924372,0.654433,0.0717452,0.190746,0.912046,0.0636692,0.941381,0.688056,0.916133,0.774553,0.127644,0.500369,0.998395,0.295097,0.85736,0.77698,0.578995,0.349449,0.962029,0.992202,0.204335,0.50716,0.75444,0.983169,0.654764,0.579026,0.714778,0.0235623,0.523656,0.671567,0.583116,0.448028,0.326,0.654861,0.638774,0.238046,0.71853,0.580154,0.926103,0.634664,0.354708,0.0537463,0.135032,0.353103,0.348844,0.992392,0.130083,0.927839,0.341841,0.092112,0.920041,0.546176,0.599272,0.674482,0.529345,0.254036,0.253507,0.244123,0.277598,0.777164,0.915689,0.860714,0.225191,0.241689,0.515576,0.863965,0.479735,0.234106,0.44412,0.405838,0.86877,0.798827,0.459584,0.00380189,0.15193,0.808428,0.996194,0.282013,0.736267,0.338035,0.374125,0.656308,0.884211,0.973397,0.330789,0.413556,0.227433,0.584297,0.657679,0.505031,0.36146,0.573368,0.365746,0.586652,0.815057,0.881321,0.450617,0.294792,0.115427,0.894736,0.700629,0.984197,0.693564,0.160213,0.987998,0.845494,0.968641,0.984192,0.127507,0.704908,0.322228,0.501632,0.361215,0.206439,0.475028,0.692005,0.619995,0.702461,0.276302,0.277674,0.207493,0.637762,0.851041,0.573238,0.224414,0.666098,0.454559,0.67503,0.960889,0.569986,0.569767,0.661519,0.554183,0.26333,0.821732,0.542181,0.108824,0.790373,0.526374,0.236331,0.49528,0.848602,0.737962,0.856496,0.055041,0.212991,0.5485,0.675036,0.915452,0.824802,0.95271,0.122945,0.462564,0.803751,0.696183,0.686978,0.469849,0.150742,0.362009,0.430739,0.720728,0.931775,0.0922576,0.274911,0.195105,0.913989,0.817092,0.303929,0.704362,0.343466,0.54026,0.199642,0.192068,0.278222,0.0561379,0.247109,0.491213,0.604638,0.922145,0.406665,0.42944,0.874855,0.529609,0.892005,0.678607,0.225792,0.578983,0.148456,0.376534,0.940991,0.579195,0.0972619,0.872767,0.671452,0.372173,0.0678718,0.585442,0.189265,0.371801,0.289804,0.532731,0.912061,0.489446,0.724799,0.190283,0.545584,0.971908,0.681495,0.150222,0.894053,0.0881599,0.579663,0.768908,0.617769,0.471668,0.447514,0.843561,0.0506507,0.59597,0.220095,0.991642,0.175165,0.317357,0.864409,0.846618,0.689529,0.932281,0.432059,0.878795,0.304082,0.721863,0.411526,0.216142,0.211309,0.136325,0.406425,0.756894,0.108232,0.08792,0.907116,0.00228518,0.17608,0.486779,0.771193,0.793849,0.958447,0.218708,0.63741,0.00909734,0.814678,0.857505,0.000739471,0.989843,0.174861,0.865148,0.836461,0.864391,0.797429,0.268521,0.743185,0.10151,0.990384,0.154711,0.317652,0.201693,0.291036,0.724077,0.958587,0.399268,0.811997,0.865703,0.401553,0.988077,0.352482,0.172746,0.781926,0.310929,0.391454,0.419336,0.320026,0.206132,0.27684,0.320765,0.195975,0.451702,0.185914,0.0324362,0.316092,0.983342,0.300957,0.0592777,0.0848526,0.29134,0.213989,0.402505,0.493034,0.505025,0.126582,0.451621,0.904293,0.938579,0.317324,0.305846,0.926656,0.669806,0.478592,0.708582,0.980734,0.870046,0.127918,0.30076,0.0761781,0.404759,0.621525,0.272153,0.85646,0.807439,0.304589,0.172553,0.790781,0.605546,0.23183,0.875634,0.896887,0.445819,0.278139,0.38992,0.950844,0.404721,0.841541,0.855136,0.343301,0.158865,0.160982,0.269957,0.82867,0.639575,0.97854,0.809404,0.509621,0.106458,0.110164,0.585799,0.511216,0.73169,0.857952,0.367677,0.539128,0.162542,0.540229,0.32991,0.768088,0.77206,0.205544,0.664974,0.217879,0.483682,0.0548948,0.168722,0.888404,0.896436,0.0238588,0.231704,0.0553005,0.184841,0.501661,0.883971,0.824416,0.480201,0.693375,0.334037,0.586659,0.803539,0.919836,0.0978751,0.535229,0.777788,0.465552,0.0743572,0.94033,0.00578086,0.404267,0.708418,0.77784,0.60981,0.373392,0.995719,0.093493,0.428287,0.164441,0.981897,0.324723,0.1883,0.213601,0.380023,0.373141,0.715262,0.263994,0.197557,0.195463,0.957369,0.531594,0.782122,0.760908,0.45143,0.879997,0.296137,0.229218,0.345549,0.370494,0.169547,0.35133,0.774761,0.877965,0.12917,0.384571,0.251357,0.124889,0.478064,0.679644,0.289331,0.459961,0.00436639,0.477631,0.673562,0.38439,0.850772,0.388824,0.648383,0.0483293,0.584287,0.605752,0.579923,0.366409,0.36666,0.0313526,0.246406,0.662797,0.26057,0.591955,0.0332908,0.430118,0.943285,0.808052,0.308082,0.0724546,0.192623,0.559439,0.197344,0.670687,0.239083,0.486674,0.130648,0.24345,0.964305,0.80421,0.627839,0.815077,0.193034,0.276222,0.863406,0.777321,0.881975,0.443329,0.14373,0.248635,0.474682,0.390136,0.911432,0.735252,0.982091,0.944723,0.16537,0.925376,0.752774,0.473452,0.997831,0.945397,0.0328917,0.195174,0.616084,0.271975,0.681848,0.746732,0.515424,0.646153,0.550942,0.143263,0.461231,0.743976,0.419486,0.324637,0.521297,0.301461,0.767966,0.665027,0.550096,0.242648,0.055163,0.461527,0.977901,0.0372544,0.40625,0.143271,0.96263,0.159024,0.616723,0.960461,0.104421,0.649615,0.155635,0.720505,0.921589,0.837483,0.467237,0.437014,0.483637,0.0181793,0.580277,0.944867,0.762155,0.999763,0.269504,0.283451,0.301224,0.0374706,0.948478,0.85132,0.280119,0.00364113,0.312847,0.25802,0.0408955,0.719097,0.40129,0.00352588,0.878121,0.0180132,0.963987,0.982542,0.667628,0.119622,0.703047,0.589217,0.957105,0.170285,0.0262308,0.440742,0.188464,0.606508,0.38561,0.950619,0.606271,0.655114,0.23407,0.907495,0.692584,0.182548,0.758815,0.972703,0.18619,0.0716618,0.230723,0.227085,0.790759,0.632014,0.230611,0.668879,0.650027,0.194598,0.651421,0.317654,0.31422,0.354469,0.906872,0.271325,0.524753,0.933102,0.712067,0.713217,0.53961,0.0976766,0.663836,0.145882,0.75279,0.897906,0.0533771,0.445375,0.0804547,0.812192,0.418078,0.266644,0.883854,0.648802,0.493729,0.674612,0.280815,0.72434,0.343492,0.930842,0.918938,0.994913,0.248496,0.233157,0.349382,0.155368,0.504482,0.874135,0.0884704,0.216549,0.587352,0.628081,0.314226,0.251188,0.773963,0.0670164,0.149094,0.82734,0.512391,0.229549,0.639532,0.93047,0.496193,0.523385,0.579271,0.989923,0.197997,0.860086,0.714263,0.541489,0.790928,0.633201,0.536402,0.0394247,0.866358,0.885784,0.194793,0.37084,0.759919,0.283263,0.58739,0.347271,0.911344,0.901615,0.598459,0.685306,0.968632,0.747554,0.512646,0.481023,0.977103,0.152178,0.411493,0.473297,0.675563,0.990764,0.463219,0.87356,0.85085,0.177482,0.41505,0.641778,0.810682,0.951452,0.681203,0.67704,0.837236,0.875996,0.0478806,0.597155,0.159259,0.63527,0.944426,0.0706027,0.536886,0.542886,0.755909,0.505517,0.29044,0.268555,0.98654,0.267543,0.420733,0.398033,0.74084,0.0962958,0.388797,0.204059,0.969856,0.239647,0.381541,0.384906,0.881425,0.192223,0.336358,0.562629,0.869264,0.173594,0.438624,0.917144,0.770749,0.597883,0.552414,0.715176,0.668486,0.0892998,0.258061,0.424395,0.594817,0.548501,0.69295,0.581358,0.816045,0.113683,0.979391,0.556884,0.209979,0.368188,0.760943,0.179835,0.607835,0.142484,0.564741,0.48926,0.334707,0.901099,0.0518888,0.20397,0.0746925,0.490513,0.121115,0.845442,0.0883962,0.673529,0.560617,0.756882,0.762829,0.818678,0.181277,0.357646,0.36718,0.874227,0.939003,0.183224,0.98791,0.918394,0.740109,0.197889,0.286582,0.501051,0.377724,0.894417,0.643535,0.942465,0.383677,0.978242,0.843564,0.435566,0.182212,0.918256,0.926079,0.303327,0.763698,0.0144752,0.976856,0.324315,0.771357,0.739684,0.142993,0.952634,0.09733,0.510173,0.826861,0.0363333,0.693398,0.814771,0.954727,0.433506,0.0126605,0.241309,0.934558,0.390385,0.135726,0.578093,0.33285,0.519403,0.556335,0.176414,0.954969,0.738547,0.0946702,0.881048,0.0418742,0.858368,0.895523,0.0187299,0.182683,0.666881,0.758414,0.325676,0.619515,0.855744,0.83585,0.446376,0.892077,0.529247,0.261147,0.846805,0.962753,0.273808,0.0881142,0.897311,0.664193,0.22384,0.475404,0.997043,0.743244,0.0317386,0.173456,0.698213,0.770286,0.268127,0.579261,0.81216,0.126495,0.474784,0.83089,0.309178,0.141665,0.589304,0.634854,0.761179,0.445048,0.470704,0.207555,0.337125,0.999951,0.468703,0.18393,0.962704,0.742511,0.272044,0.860015,0.406703,0.495885,0.335419,0.403746,0.239128,0.367158,0.577203,0.937341,0.137443,0.845329,0.516602,0.949603,0.971824,0.991386,0.780493,0.281002,0.133051,0.369797,0.915856,0.89423,0.814845,0.386559,0.101785,0.151971,0.38651,0.570488,0.335901,0.349214,0.312999,0.607946,0.209229,0.719702,0.10383,0.544648,0.123448,0.342959,0.911806,0.700651,0.2803,0.0492492,0.54598,0.796902,0.998853,0.517804,0.788288,0.779346,0.798806,0.921339,0.149143,0.714662,0.815569,0.963989,0.101221,0.917354,0.115959,0.487731,0.487842,0.45186,0.836946,0.800841,0.0598059,0.0461752,0.520543,0.163636,0.590824,0.643991,0.506595,0.502629,0.344642,0.786895,0.551879,0.890623,0.583797,0.550731,0.408427,0.372085,0.330077,0.207233,0.293424,0.47922,0.921895,0.108992,0.443209,0.023116,0.0263461,0.559168,0.510847,0.514188,0.0110286,0.347793,0.315029,0.0708345,0.393968,0.835571,0.234471,0.984792,0.479563,0.741066,0.487421,0.824205,0.527961,0.0392998,0.714828,0.111759,0.590031,0.123255,0.483844,0.920108,0.330488,0.777268,0.399328,0.252383,0.88626,0.842537,0.275499,0.912606,0.401705,0.786346,0.426794,0.412734,0.134139,0.741823,0.483568,0.528108,0.577394,0.718039,0.512899,0.0569567,0.459105,0.000320699,0.881162,0.987067,0.0396205,0.595989,0.0988256,0.629652,0.719244,0.58267,0.54976,0.0497318,0.359937,0.949088,0.302114,0.246197,0.791625,0.577613,0.158804,0.193331,0.363959,0.585598,0.606064,0.498098,0.32742,0.0896329,0.0262056,0.904814,0.807672,0.539105,0.961771,0.266778,0.539426,0.842933,0.253845,0.579046,0.438922,0.35267,0.208698,0.158166,0.93534,0.758457,0.207898,0.295277,0.707546,0.510012,0.541475,0.499171,0.0876253,0.700278,0.692501,0.451584,0.285876,0.298566,0.949682,0.613296,0.388199,0.975888,0.518111,0.195871,0.514993,0.479882,0.462649,0.0544186,0.322814,0.716493,0.633465,0.761736,0.0691633,0.842163,0.919902,0.00450312,0.60062,0.1278,0.29978,0.308166,0.637813,0.841255,0.807336,0.725438,0.541533,0.499838,0.177022,0.827409,0.798403,0.126704,0.440705,0.186602,0.102592,0.958816,0.382473,0.617585,0.438698,0.845121,0.672004,0.761512,0.561614,0.305469,0.523248,0.630778,0.147632,0.44315,0.635281,0.748252,0.570951,0.935061,0.0564175,0.208763,0.776316,0.863754,0.934201,0.317849,0.363591,0.111223,0.145258,0.161995,0.237928,0.585964,0.348597,0.34052,0.54478,0.73107,0.958106,0.983478,0.576191,0.63011,0.74499,0.137805,0.935579,0.268238,0.768583,0.0832101,0.711388,0.403864,0.831462,0.282339,0.338925,0.887879,0.491102,0.115241,0.751633,0.425303,0.43309,0.115225,0.536527,0.578349,0.27722,0.774455,0.164313,0.625816,0.114975,0.709093,0.356886,0.0730808,0.69257,0.933077,0.70319,0.43756,0.070882,0.638769,0.705797,0.839465,0.721979,0.417185,0.243329,0.553441,0.699524,0.582253,0.44132,0.190626,0.697494,0.192954,0.615929,0.130584,0.308178,0.152456,0.708933,0.585398,0.92691,0.873246,0.211214,0.0418854,0.582338,0.5681,0.114966,0.274908,0.501177,0.818157,0.712468,0.572059,0.456925,0.418265,0.411524,0.178905,0.835451,0.654852,0.732346,0.534974,0.237106,0.173666,0.7256,0.9346,0.36662,0.341529,0.0651845,0.674798,0.493985,0.774118,0.260196,0.420895,0.647363,0.471411,0.46278,0.229701,0.0395108,0.577747,0.50461,0.540688,0.395903,0.217078,0.112747,0.852829,0.635343,0.524271,0.0317331,0.470794,0.179123,0.764079,0.00576867,0.416229,0.937745,0.731369,0.350829,0.304364,0.072898,0.416013,0.979162,0.566883,0.190131,0.239359,0.987778,0.837494,0.710769,0.450558,0.0671954,0.75028,0.0283048,0.571805,0.290968,0.424208,0.788883,0.403715,0.277036,0.424227,0.927985,0.308769,0.895021,0.107109,0.0728481,0.90079,0.523337,0.0105928,0.632158,0.874166,0.314957,0.705056,0.29018,0.29412,0.271939,0.480311,0.533478,0.259717,0.317805,0.244248,0.710275,0.385,0.994528,0.73858,0.956806,0.285496,0.162788,0.745689,0.68921,0.439824,0.169915,0.617196,0.748594,0.0649363,0.724304,0.821442,0.965726,0.247642,0.832035,0.597884,0.121808,0.146992,0.302941,0.411988,0.441111,0.57488,0.892299,0.97459,0.834597,0.210104,0.218837,0.544872,0.595105,0.213365,0.283452,0.55191,0.49886,0.44624,0.297599,0.188071,0.886064,0.467514,0.805266,0.634657,0.532451,0.529571,0.456099,0.498176,0.777212,0.288133,0.0960605,0.899021,0.435125,0.399001,0.311009,0.876236,0.973881,0.203308,0.850826,0.808478,0.413413,0.0696632,0.353349,0.00851721,0.283028,0.636801,0.560427,0.781888,0.0830407,0.858026,0.969959,0.969104,0.325541,0.775225,0.603762,0.857991,0.304795,0.0598605,0.356167,0.0820079,0.347994,0.452228,0.981029,0.783119,0.851229,0.292038,0.659355,0.82511,0.495346,0.510181,0.633587,0.908759,0.579845,0.986937,0.917276,0.862873,0.623738,0.477703,0.644761,0.706779,0.33573,0.614719,0.675883,0.66127,0.389944,0.279645,0.519261,0.69474,0.339505,0.875429,0.776748,0.687499,0.327657,0.757776,0.470618,0.178886,0.0498141,0.129973,0.00399589,0.54516,0.640155,0.637583,0.453919,0.22,0.62452,0.371195,0.0828721,0.248258,0.848898,0.727633,0.955037,0.184628,0.342352,0.63092,0.845898,0.732296,0.910565,0.365159,0.427036,0.25007,0.240588,0.203784,0.937569,0.568245,0.96156,0.408187,0.747131,0.0113741,0.53816,0.751127,0.556534,0.178315,0.38871,0.0104532,0.398315,0.0132301,0.381648,0.481187,0.261488,0.230546,0.20882,0.216525,0.415174,0.551172,0.847445,0.261072,0.283468,0.75801,0.626231,0.710504,0.00807916,0.866819,0.914288,0.945648,0.435063,0.875848,0.353834,0.182194,0.887222,0.891995,0.933321,0.443756,0.0703097,0.322031,0.45421,0.468624,0.335261,0.835858,0.949811,0.596749,0.0664036,0.158631,0.813274,0.481577,0.709803,0.660719,0.742649,0.993271,0.418729,0.36888,0.703775,0.426808,0.235698,0.618063,0.372456,0.670762,0.493911,0.72629,0.852956,0.381133,0.618285,0.786276,0.82489,0.688594,0.108307,0.279099,0.157219,0.443567,0.114957,0.10703,0.0403162,0.18136,0.265661,0.85359,0.662937,0.975464,0.514309,0.405586,0.968735,0.933038,0.774466,0.67251,0.359845,0.010164,0.290573,0.732301,0.680925,0.784485,0.458591,0.533881,0.165618,0.0768756,0.320157,0.990508,0.76547,0.428464,0.269607,0.922689,0.872031,0.384563,0.0297189,0.912347,0.565923,0.29538,0.765938,0.228861,0.270844,0.280247,0.634447,0.239579,0.213285,0.408913,0.912089,0.57313,0.419077,0.202662,0.305431,0.100002,0.987147,0.764022,0.633883,0.152765,0.840898,0.954041,0.143272,0.606368,0.382504,0.412879,0.529056,0.254536,0.797442,0.558775,0.166883,0.363366,0.854155,0.932821,0.592227,0.124999,0.213068,0.226674,0.364578,0.426352,0.635587,0.276666,0.999482,0.0546639,0.479329,0.304913,0.154666,0.466475,0.0689356,0.78855,0.61924,0.909833,0.74259,0.762512,0.516201,0.125095,0.175391,0.0452573,0.379631,0.972833,0.604033,0.546514,0.336199,0.458188,0.479334,0.928426,0.583187,0.692402,0.155099,0.947764,0.118754,0.790686,0.224431,0.118236,0.84535,0.703759,0.42315,1.65561e-05,0.170235,0.492085,0.788566,0.789475,0.401919,0.531157,0.551987,0.918119,0.656252,0.727379,0.963377,0.0358825,0.700212,0.567409,0.582396,0.0364111,0.0255971,0.0617306,0.964837,0.608784,0.754132,0.119936,0.556548,0.872886,0.910622,0.780979,0.991123,0.755973,0.484738,0.414273,0.755989,0.654973,0.906358,0.544556,0.444448,0.308277,0.0757125,0.996435,0.226396,0.731964,0.723814,0.189773,0.767847,0.424026,0.757182,0.350243,0.460437,0.782779,0.411974,0.425274,0.391563,0.166106,0.54521,0.948111,0.0389927,0.455832,0.72909,0.0301155,0.211805,0.213829,0.444388,0.967794,0.868802,0.350746,0.51235,0.31325,0.659023,0.588062,0.309685,0.885419,0.320027,0.0334989,0.0751919,0.0878736,0.457525,0.832374,0.438117,0.917962,0.615154,0.850091,0.343235,0.00671699,0.0161968,0.888445,0.954829,0.0551895,0.344277,0.683919,0.085305,0.556082,0.897748,0.529693,0.523876,0.766549,0.880439,0.036226,0.079799,0.539462,0.624288,0.389484,0.424881,0.944315,0.422983,0.500073,0.0321883,0.880508,0.332447,0.470305,0.79847,0.947601,0.320396,0.141705,0.954318,0.336592,0.0301502,0.909146,0.391782,0.374428,0.593065,0.477087,0.93051,0.490813,0.00678011,0.454386,0.257362,0.887219,0.490612,0.337161,0.426682,0.1149,0.726645,0.851563,0.0592152,0.149628,0.351636,0.0914036,0.030136,0.684083,0.561709,0.828605,0.631684,0.882104,0.97031,0.586002,0.218697,0.000460631,0.495148,0.610479,0.374888,0.0882135,0.0875655,0.305398,0.579026,0.0943457,0.759785,0.836388,0.981565,0.250397,0.17355,0.408247,0.365297,0.900195,0.25981,0.424512,0.0498229,0.611446,0.515916,0.0799588,0.295529,0.0776247,0.908564,0.927213,0.959729,0.878875,0.513215,0.178426,0.879335,0.00836279,0.788904,0.254224,0.0965763,0.87647,0.559622,0.675603,0.970816,0.319406,0.511991,0.952381,0.569803,0.68554,0.360628,0.9351,0.585735,0.620437,0.359613,0.635558,0.231883,0.875529,0.715517,0.527412,0.953154,0.624081,0.454624,0.912883,0.502956,0.967839,0.0913084,0.382292,0.976202,0.880213,0.636515,0.0727779,0.756683,0.196137,0.74838,0.727498,0.515543,0.260371,0.679879,0.0853467,0.945912,0.0405066,0.0204471,0.531647,0.660944,0.38006,0.167205,0.892827,0.255589,0.882722,0.420238,0.208743,0.506804,0.874863,0.121625,0.00975974,0.842702,0.212934,0.392051,0.818903,0.0931464,0.0285665,0.891681,0.849829,0.224704,0.640062,0.577327,0.740247,0.900433,0.257206,0.825594,0.846345,0.297713,0.846041,0.377992,0.958657,0.226101,0.545198,0.851484,0.48169,0.42792,0.271722,0.690432,0.934724,0.146585,0.812058,0.944483,0.989287,0.0249914,0.336535,0.80819,0.118138,0.365101,0.699871,0.967967,0.589805,0.339933,0.545294,0.330052,0.240366,0.802501,0.155645,0.0867107,0.100214,0.00168622,0.464703,0.0588705,0.227787,0.00990063,0.910354,0.709477,0.437821,0.182077,0.399909,0.372544,0.328662,0.211967,0.317027,0.317948,0.236959,0.653562,0.126138,0.355096,0.0186632,0.826009,0.323063,0.608468,0.165942,0.868358,0.93852,0.406308,0.670858,0.094165,0.493018,0.771072,0.0958512,0.957721,0.829942,0.323638,0.967622,0.740297,0.0331151,0.405443,0.922373,0.433024,0.777987,0.251035,0.644992,0.0950142,0.568983,0.88195,0.748576,0.695122,0.237047,0.76724,0.521131,0.56011,0.375707,0.687073,0.428468,0.314227,0.0933806,0.0993258,0.408392,0.586399,0.870398,0.504243,0.54412,0.70034,0.827881,0.511742,0.440637,0.860996,0.917185,0.36301,0.294021,0.695172,0.614045,0.939012,0.790186,0.183028,0.820962,0.538762,0.87815,0.0580089,0.306002,0.399281,0.618119,0.681709,0.086354,0.0465863,0.995936,0.179735,0.145912,0.404328,0.766134,0.0163098,0.908571,0.310254,0.71665,0.736452,0.821996,0.157286,0.597449,0.739181,0.520296,0.891469,0.434353,0.134341,0.830482,0.224539,0.31737,0.651444,0.763301,0.19552,0.709453,0.0693033,0.594801,0.327572,0.751012,0.681155,0.374158,0.746948,0.860889,0.52007,0.151276,0.627023,0.53638,0.0598473,0.937277,0.25303,0.796299,0.759273,0.410316,0.393748,0.498454,0.930613,0.285217,0.932808,0.0649542,0.115699,0.157347,0.382324,0.767143,0.920648,0.577844,0.476596,0.989951,0.172644,0.804168,0.740964,0.853799,0.178326,0.487912,0.714688,0.698396,0.639189,0.341711,0.234776,0.699036,0.278988,0.487806,0.495335,0.0382611,0.898123,0.889083,0.536716,0.828736,0.174301,0.469523,0.89369,0.29,0.62687,0.276014,0.0571426,0.547518,0.853857,0.533739,0.537469,0.0265016,0.337906,0.278433,0.880301,0.516232,0.766345,0.594989,0.214629,0.405533,0.9367,0.449405,0.104569,0.215688,0.937212,0.599905,0.253949,0.835334,0.488988,0.790665,0.66407,0.663288,0.260188,0.55776,0.953288,0.887057,0.833773,0.0104306,0.434575,0.68763,0.544169,0.972044,0.714132,0.882076,0.250476,0.594433,0.398308,0.0168212,0.189422,0.612937,0.422355,0.126122,0.062342,0.526924,0.34181,0.999554,0.126828,0.59576,0.834888,0.615816,0.386424,0.498958,0.279105,0.646612,0.0567171,0.232393,0.533669,0.89049,0.242823,0.968244,0.578121,0.786993,0.940288,0.292253,0.669068,0.190765,0.886685,0.0673764,0.207586,0.0761069,0.680313,0.629941,0.202229,0.742655,0.156864,0.544039,0.742209,0.283693,0.139798,0.577097,0.899509,0.526223,0.0760541,0.178613,0.172835,0.132771,0.411006,0.706504,0.0232615,0.653829,0.674749,0.601382,0.440822,0.615037,0.893635,0.10989,0.805802,0.78032,0.177267,0.0133878,0.856427,0.85758,0.643328,0.0586556,0.600235,0.800193,0.602695,0.342444,0.0838853,0.742493,0.919541,0.983394,0.268716,0.995595,0.162008,0.441551,0.128366,0.573014,0.148055,0.151628,0.226843,0.822804,0.75301,0.667665,0.437841,0.646645,0.777556,0.243643,0.426965,0.954822,0.257031,0.283391,0.812403,0.900359,0.342047,0.412638,0.700552,0.944742,0.755082,0.784437,0.687235,0.674623,0.767831,0.955951,0.670218,0.929839,0.397502,0.798584,0.502853,0.545557,0.950212,0.729696,0.368361,0.703222,0.397361,0.806202,0.349867,0.174917,0.0498456,0.776831,0.129739,0.306877,0.0602225,0.942142,0.207236,0.40227,0.35478,0.907788,0.347011,0.109862,0.692225,0.0342459,0.784485,0.460057,0.990197,0.454704,0.389896,0.387698,0.253288,0.892748,0.933255,0.2035,0.622444,0.301616,0.906722,0.0198051,0.107819,0.256589,0.194722,0.157664,0.0334197,0.324461,0.464541,0.0936422,0.266603,0.671777,0.495912,0.621382,0.579565,0.842923,0.731245,0.27179,0.877169,0.51573,0.731847,0.867365,0.970433,0.121742,0.255064,0.223721,0.0144904,0.188319,0.427221,0.636934,0.489935,0.333943,0.656739,0.597753,0.590532,0.851461,0.755417,0.623952,0.175922,0.219958,0.717594,0.442525,0.891735,0.213506,0.0639073,0.4713,0.0564285,0.795152,0.74309,0.933597,0.310882,0.474937,0.800963,0.281315,0.596679,0.0560264,0.505037,0.611169,0.244345,0.932258,0.248103,0.73428,0.266201,0.904843,0.332033,0.856733,0.756304,0.0874505,0.480685,0.932226,0.307409,0.198279,0.374751,0.199144,0.411785,0.438659,0.670444,0.468213,0.23381,0.413534,0.40181,0.544692,0.88847,0.202773,0.826007,0.485149,0.2588,0.331044,0.0963179,0.503145,0.263302,0.344421,0.237424,0.529503,0.249264,0.569458,0.386237,0.0055685,0.656908,0.866922,0.937795,0.964317,0.0652008,0.312546,0.163461,0.476985,0.751205,0.833904,0.945198,0.985015,0.247438,0.347009,0.529707,0.135908,0.549782,0.355715,0.621057,0.808582,0.686759,0.717375,0.311726,0.950061,0.0617962,0.549151,0.479564,0.311061,0.118609,0.865801,0.316629,0.775517,0.732723,0.254424,0.739833,0.797924,0.56697,0.903294,0.274909,0.318175,0.737199,0.220108,0.30319,0.984637,0.567117,0.832897,0.120545,0.116899,0.188612,0.741602,0.92548,0.875371,0.458976,0.237207,0.825432,0.520773,0.786358,0.304997,0.831833,0.904966,0.170798,0.148462,0.680483,0.903521,0.402886,0.420316,0.701445,0.969856,0.323611,0.976354,0.288031,0.0608091,0.196462,0.591221,0.0454456,0.763578,0.424118,0.16599,0.880477,0.61273,0.907592,0.805957,0.488102,0.366568,0.0431641,0.313534,0.887341,0.829522,0.618531,0.719174,0.734488,0.789329,0.867636,0.414971,0.69285,0.270522,0.835288,0.394294,0.240378,0.158898,0.370648,0.528408,0.219707,0.56711,0.119629,0.265153,0.330688,0.543747,0.431143,0.211165,0.156477,0.338735,0.0171221,0.644579,0.705303,0.0602862,0.958113,0.592644,0.889808,0.576643,0.311817,0.624297,0.365972,0.179453,0.0392678,0.0588219,0.449975,0.874555,0.453116,0.690353,0.0334537,0.823765,0.218761,0.253161,0.390875,0.33839,0.518314,0.721563,0.882137,0.949457,0.932728,0.0386141,0.288192,0.94985,0.683193,0.993495,0.0101359,0.641306,0.586139,0.899944,0.217949,0.897956,0.524241,0.583921,0.077409,0.563508,0.642743,0.527384,0.438064,0.0958595,0.217737,0.471518,0.919624,0.436498,0.724679,0.310499,0.774888,0.242993,0.0320616,0.657025,0.19245,0.964789,0.695639,0.480641,0.914639,0.378832,0.474136,0.924775,0.0201371,0.0602748,0.824719,0.238086,0.958231,0.34896,0.822007,0.0356397,0.912468,0.464751,0.563024,0.350532,0.56061,0.78076,0.82205,0.480234,0.217258,0.546728,0.790733,0.992146,0.789721,0.822795,0.64917,0.98217,0.787584,0.344809,0.462812,0.702223,0.72364,0.936948,0.626998,0.743778,0.997223,0.451717,0.981864,0.955454,0.800676,0.803871,0.991093,0.713144,0.268622,0.554117,0.0636764,0.829232,0.334877,0.885726,0.309466,0.552135,0.432454,0.100199,0.544281,0.222175,0.922993,0.193451,0.204346,0.710577,0.53826,0.667157,0.4128,0.2619,0.604105,0.0397979,0.00567783,0.601328,0.491515,0.987541,0.556782,0.292191,0.791413,0.547875,0.00533523,0.0600341,0.101992,0.0690116,0.889266,0.43687,0.954738,0.198732,0.989005,0.387192,0.29893,0.533286,0.609367,0.221924,0.726737,0.813713,0.932501,0.264997,0.48087,0.345301,0.526897,0.0849758,0.385099,0.532575,0.686304,0.876614,0.520116,0.243086,0.168805,0.311529,0.790961,0.17414,0.371563,0.892954,0.243152,0.260829,0.329823,0.197889,0.45956,0.318828,0.585081,0.758491,0.852114,0.194449,0.980415,0.578851,0.00816162,0.912916,0.843848,0.489032,0.258217,0.370745,0.574008,0.643316,0.90332,0.260312,0.51993,0.423436,0.503398,0.688735,0.734965,0.294359,0.862875,0.106529,0.187313,0.106027,0.367357,0.517137,0.303916,0.826918,0.835965,0.888997,0.585408,0.688079,0.0834462,0.565823,0.26693,0.0916078,0.478739,0.110778,0.58064,0.736956,0.481523,0.154648,0.380272,0.384843,0.414959,0.900202,0.80828,0.918358,0.588937,0.543245,0.212717,0.451812,0.649774,0.40003,0.557839,0.0171311,0.917167,0.861755,0.844049,0.753132,0.750753,0.429457,0.441211,0.834199,0.99528,0.708142,0.925807,0.474019,0.81892,0.506447,0.210974,0.300443,0.661094,0.591246,0.685287,0.0760538,0.491448,0.493566,0.994411,0.0803858,0.0368115,0.207128,0.532198,0.686585,0.607159,0.0900375,0.703716,0.524326,0.951793,0.547765,0.277458,0.702546,0.977222,0.718669,0.536745,0.972502,0.426811,0.462552,0.446521,0.24573,0.968999,0.657495,0.546174,0.630093,0.248742,0.23146,0.706147,0.74019,0.725027,0.700558,0.820576,0.761838,0.907687,0.352774,0.448423,0.514845,0.442812,0.15214,0.0391711,0.394605,0.699905,0.316629,0.0971505,0.677127,0.0352975,0.633896,0.64963,0.462108,0.0964478,0.0961509,0.707838,0.0654467,0.753646,0.254012,0.69554,0.00238802,0.485473,0.401687,0.742578,0.210499,0.102245,0.563154,0.972337,0.00993235,0.915928,0.420761,0.524778,0.35874,0.572901,0.563949,0.753344,0.272806,0.880578,0.850495,0.949933,0.915875,0.48439,0.599562,0.377983,0.580838,0.695713,0.0858215,0.646285,0.44936,0.339834,0.341825,0.451748,0.825306,0.743512,0.194326,0.0358055,0.845757,0.75748,0.00814293,0.85569,0.673408,0.428904,0.380467,0.0321475,0.00180444,0.944416,0.785492,0.27461,0.824994,0.635986,0.224543,0.740869,0.120377,0.824105,0.118852,0.701215,0.519819,0.204673,0.3475,0.969178,0.544507,0.689325,0.420926,0.369813,0.432836,0.615252,0.405619,0.278594,0.372732,0.413762,0.134283,0.0461397,0.842665,0.514751,0.0782872,0.84447,0.459167,0.863779,0.11908,0.284161,0.499765,0.343623,0.0250295,0.620142,0.167728,0.143881,0.321357,0.687546,0.348555,0.668857,0.656725,0.893061,0.358182,0.0776508,0.262875,0.791018,0.692903,0.668493,0.0696114,0.0656347,0.0822551,0.203894,0.111774,0.924921,0.718645,0.190062,0.769391,0.177812,0.0538405,0.88847,0.461972,0.553606,0.232093,0.487002,0.173748,0.399821,0.630883,0.495105,0.087367,0.979438,0.163962,0.744092,0.872499,0.522144,0.821742,0.135374,0.313162,0.514645,0.803867,0.382773,0.58028,0.886123,0.586668,0.692054,0.811043,0.305313,0.882116,0.580434,0.483124,0.935957,0.468904,0.945097,0.489562,0.700997,0.432099,0.66331,0.100818,0.062982,0.158416,0.188185,0.0424197,0.322378,0.932276,0.914919,0.844522,0.754019,0.0502927,0.157683,0.268664,0.85416,0.540456,0.848944,0.740283,0.127124,0.540998,0.551326,0.432436,0.423115,0.131759,0.915561,0.359071,0.600663,0.860658,0.848633,0.301661,0.292756,0.511944,0.402478,0.355738,0.67036,0.590663,0.398158,0.992737,0.522939,0.313077,0.837259,0.276958,0.36337,0.994942,0.545622,0.21753,0.535399,0.394566,0.957812,0.662523,0.935565,0.509138,0.0949592,0.358679,0.640898,0.0105201,0.71775,0.241561,0.871178,0.566384,0.543222,0.163934,0.0783277,0.9457,0.519673,0.748687,0.536363,0.917831,0.741425,0.059302,0.230908,0.578684,0.33626,0.594277,0.573626,0.881882,0.811807,0.109025,0.276448,0.76962,0.771548,0.212013,0.278758,0.866507,0.570692,0.919655,0.877027,0.288443,0.161216,0.748205,0.854826,0.704438,0.912139,0.933154,0.650138,0.431812,0.681841,0.1865,0.349642,0.423266,0.245802,0.58055,0.00194989,0.582062,0.174828,0.575576,0.463945,0.986635,0.684601,0.740393,0.756254,0.456149,0.952406,0.0350121,0.322656,0.523098,0.954667,0.199683,0.811541,0.115884,0.947888,0.666368,0.820322,0.860027,0.599522,0.470459,0.291839,0.281363,0.65696,0.641481,0.704629,0.902762,0.222031,0.706579,0.484825,0.396859,0.282155,0.948769,0.383494,0.966756,0.689162,0.139748,0.422905,0.641568,0.17476,0.745561,0.164667,0.129427,0.945244,0.976208,0.245311,0.893132,0.642575,0.065633,0.753159,0.242097,0.536092,0.044998,0.52346,0.193052,0.686479,0.228089,0.0958144,0.90851,0.934669,0.580639,0.305369,0.216824,0.529408,0.688863,0.18358,0.218571,0.828611,0.606486,0.860139,0.00337063,0.352047,0.0248058,0.132798,0.297291,0.00101373,0.378109,0.190424,0.643589,0.443742,0.943583,0.885686,0.979835,0.988581,0.409147,0.172887,0.67506,0.637236,0.268702,0.583571,0.571905,0.849341,0.88894,0.788729,0.378749,0.577802,0.972309,0.59732,0.406413,0.578794,0.457459,0.409784,0.930841,0.482264,0.542582,0.228133,0.483278,0.920691,0.418557,0.126867,0.364434,0.36214,0.0125536,0.344269,0.350721,0.4217,0.517156,0.0257814,0.0589362,0.785857,0.609352,0.630841,0.635198,0.498292,0.419569,0.0139472,0.0760944,0.391878,0.611267,0.482507,0.970673,0.0687252,0.892291,0.901514,0.55099,0.434873,0.129647,0.0342676,0.355564,0.548203,0.161135,0.719998,0.910343,0.173688,0.0642668,0.261064,0.595389,0.581423,0.286845,0.654325,0.36728,0.896197,0.285166,0.00247832,0.394489,0.704735,0.0164255,0.470584,0.0966133,0.627692,0.953091,0.0672859,0.696417,0.845382,0.9688,0.247407,0.280256,0.0984464,0.281675,0.63582,0.64665,0.442809,0.355818,0.556993,0.616498,0.420085,0.818057,0.211886,0.00150746,0.104902,0.866211,0.368788,0.00109931,0.151377,0.371266,0.395589,0.856112,0.387691,0.866172,0.952725,0.0153836,0.819264,0.020011,0.711801,0.664646,0.988811,0.959208,0.944902,0.0872571,0.240883,0.580721,0.733907,0.683692,0.93654,0.290899,0.30019,0.356624,0.108956,0.512076,0.358132,0.213858,0.378287,0.726919,0.214957,0.529664,0.0981853,0.610545,0.385776,0.485877,0.476718,0.338501,0.50126,0.295981,0.358512,0.213061,0.960627,0.347322,0.172269,0.905529,0.434579,0.413152,0.486251,0.168486,0.096844,0.42279,0.459385,0.397034,0.779414,0.568341,0.90911,0.137546,0.782198,0.287397,0.864466,0.997155,0.817061,0.962651,0.607701,0.202837,0.448528,0.0844184,0.541337,0.949788,0.3804,0.899849,0.162849,0.341027,0.247171,0.335119,0.246556,0.681751,0.748271,0.732807,0.850237,0.845115,0.155597,0.309622,0.242149,0.935011,0.877963,0.151258,0.0725574,0.660161,0.438656,0.937023,0.657317,0.255716,0.899674,0.265017,0.458553,0.348202,0.349436,0.99989,0.29799,0.729836,0.899739,0.460839,0.0708628,0.146911,0.795958,0.317419,0.828662,0.544228,0.050226,0.678899,0.389343,0.205823,0.988521,0.631492,0.140834,0.866484,0.78275,0.213391,0.526645,0.221406,0.150414,0.183962,0.477122,0.0500884,0.448979,0.935675,0.39829,0.798415,0.935566,0.696279,0.52825,0.835305,0.157118,0.599113,0.982215,0.953076,0.916532,0.810877,0.497304,0.966758,0.489776,0.886648,0.172581,0.478296,0.518139,0.313415,0.34478,0.30089,0.526807,0.871426,0.522296,0.677221,0.0553876,0.999418,0.72731,0.504367,0.935093,0.1256,0.302782,0.870659,0.821879,0.831032,0.705963,0.978997,0.430146,0.688179,0.932073,0.346678,0.499056,0.429378,0.313436,0.988832,0.316025,0.486017,0.467128,0.834165,0.799433,0.811908,0.135055,0.326239,0.683334,0.65735,0.00346062,0.738721,0.656768,0.73077,0.243088,0.591861,0.85637,0.54587,0.46252,0.678249,0.376902,0.168483,0.657246,0.807048,0.856662,0.589319,0.153726,0.355718,0.0186971,0.467162,0.34455,0.334723,0.953179,0.811677,0.168887,0.752612,0.623586,0.303942,0.0788514,0.306919,0.961292,0.082312,0.0456405,0.61806,0.813082,0.288729,0.209921,0.669452,0.834599,0.672441,0.347701,0.211501,0.840924,0.00494667,0.0185484,0.697586,0.594266,0.172274,0.0533042,0.612963,0.639436,0.397854,0.947686,0.592615,0.209531,0.116573,0.345227,0.833117,0.420515,0.424079,0.140036,0.381808,0.506391,0.185676,0.999868,0.319473,0.474405,0.209789,0.988925,0.309004,0.88223,0.336625,0.520504,0.723154,0.341572,0.539053,0.42074,0.935838,0.711327,0.474044,0.548801,0.350763,0.871898,0.496487,0.943378,0.0814292,0.61306,0.288605,0.914546,0.0335754,0.712683,0.0545815,0.415383,0.219074,0.240258,0.415251,0.538547,0.714663,0.62504,0.527472,0.0236665,0.50727,0.864097,0.544171,0.230424,0.205669,0.0832236,0.651164,0.141507,0.79455,0.125208,0.690309,0.145313,0.997106,0.186796,0.0886908,0.0785357,0.799856,0.377296,0.993081,0.833431,0.0899792,0.047663,0.248814,0.309053,0.287921,0.664065,0.8476,0.00258374,0.289105,0.375072,0.0262502,0.796374,0.239169,0.570421,0.0267982,0.444838,0.653645,0.677962,0.586346,0.448195,0.803171,0.276654,0.593508,0.800277,0.46345,0.682199,0.878813,0.263306,0.0594945,0.871894,0.0967367,0.149474,0.919557,0.345551,0.458527,0.207478,0.00961551,0.306127,0.210062,0.29872,0.681199,0.236312,0.0950944,0.920368,0.806733,0.121893,0.365206,0.460378,0.799855,0.951552,0.908573,0.603026,0.228206,0.502081,0.403303,0.691656,0.18428,0.282116,0.954962,0.243774,0.15401,0.0516986,0.393248,0.0735675,0.397249,0.851775,0.281046,0.406865,0.157902,0.491108,0.705585,0.839101,0.72742,0.800679,0.759469,0.534153,0.922572,0.124675,0.994531,0.722427,0.0762269,0.903104,0.325453,0.304433,0.405185,0.728756,0.996089,0.589464,0.0108714,0.951051,0.833239,0.164882,0.00274978,0.226487,0.238449,0.399999,0.0782615,0.519495,0.806864,0.236163,0.0106022,0.512449,0.0752643,0.738022,0.313128,0.834733,0.272175,0.2357,0.959408,0.266706,0.958127,0.0356349,0.169809,0.28358,0.340068,0.574994,0.0123352,0.336157,0.164459,0.0232066,0.287209,0.997697,0.188088,0.289958,0.224184,0.426537,0.689957,0.302446,0.946032,0.496821,0.538609,0.956634,0.00927022,0.613873,0.694656,0.322398,0.448606,0.966831,0.558099,0.408014,0.233536,0.516226,0.443649,0.403346,0.799805,0.783717,0.97834,0.812141,0.119875,0.142798,0.835347,0.407083,0.140496,0.0234354,0.697042,0.36468,0.449973,0.386999,0.667125,0.396004,0.88382,0.205734,0.352639,0.893091,0.819607,0.0472947,0.215489,0.268214,0.0141256,0.773588,0.676228,0.247662,0.289813,0.119877,0.651008,0.0896186,0.903595,0.629348,0.901759,0.0234694,0.772146,0.737106,0.430553,0.912642,0.760542,0.127594,0.277321,0.210514,0.514593,0.944447,0.606519,0.398414,0.150181,0.959157,0.291504,0.969788,0.00645218,0.506993,0.238002,0.0205778,0.280581,0.91423,0.26824,0.570394,0.0341079,0.919248,0.660013,0.937703,0.548595,0.561772,0.961172,0.320741,0.298878,0.391725,0.233383,0.0594199,0.519319,0.510704,0.269934,0.0339121,0.455151,0.876453,0.432326,0.605332,0.835611,0.72383,0.57512,0.842063,0.230823,0.813122,0.86264,0.511404,0.727353,0.13088,0.0817984,0.761461,0.0501279,0.741811,0.699163,0.598723,0.303583,0.660335,0.919465,0.602461,0.0520599,0.152847,0.661881,0.571379,0.663552,0.931815,0.605291,0.118703,0.808268,0.0376167,0.724034,0.643879,0.761447,0.299154,0.485941,0.99227,0.112277,0.348582,0.503675,0.839629,0.479462,0.585473,0.60109,0.52959,0.327284,0.300253,0.128313,0.630867,0.960588,0.0477778,0.233328,0.0126481,0.200625,0.895209,0.584027,0.864177,0.827024,0.189318,0.98288,0.635292,0.226934,0.706914,0.279171,0.988381,0.00606859,0.765113,0.980652,0.118345,0.113694,0.484326,0.957974,0.593157,0.0697992,0.559064,0.122747,0.397083,0.859317,0.25106,0.0279502,0.819905,0.298838,0.261278,0.832554,0.499463,0.156487,0.41658,0.36364,0.983511,0.605898,0.34652,0.618804,0.832833,0.0534342,0.897975,0.821214,0.0595028,0.663087,0.801866,0.177848,0.776781,0.286192,0.135823,0.369938,0.355991,0.694887,0.492685,0.753074,0.554204,0.743745,0.781024,0.37411,0.0425826,0.0423025,0.206663,0.542046,0.19879,0.623244,0.905686,0.182301,0.229142,0.252206,0.801104,0.0619743,0.30564,0.699079,0.883188,0.365143,0.362166,0.685054,0.542991,0.138947,0.971245,0.678814,0.508885,0.327236,0.373701,0.00156994,0.0803104,0.927905,0.745315,0.861335,0.302014,0.787897,0.903637,0.508678,0.329943,0.102427,0.131921,0.235629,0.284728,0.361063,0.487835,0.0858318,0.423037,0.793475,0.784911,0.306225,0.158618,0.147076,0.991279,0.701609,0.286024,0.962524,0.380423,0.794909,0.289761,0.754124,0.796479,0.370071,0.682029,0.541793,0.231406,0.984043,0.329691,0.135043,0.492721,0.659634,0.23747,0.624642,0.895262,0.522197,0.985705,0.383097,0.608029,0.408742,0.176573,0.39294,0.714967,0.335191,0.540016,0.706246,0.0368004,0.82604,0.668771,0.417224,0.620949,0.958532,0.171347,0.417427,0.328603,0.853376,0.959221,0.560009,0.837419,0.288912,0.695052,0.33014,0.948545,0.932521,0.954781,0.843808,0.454719,0.940486,0.226905,0.0627479,0.349228,0.403478,0.455688,0.0641953,0.738669,0.995704,0.770442,0.775469,0.821743,0.439213,0.192692,0.442692,0.397744,0.36404,0.86012,0.726347,0.217416,0.81934,0.286355,0.0548345,0.108252,0.981407,0.384974,0.0567972,0.913928,0.339755,0.900605,0.368647,0.280242,0.12751,0.431395,0.62947,0.530988,0.887082,0.693665,0.269656,0.882786,0.464107,0.0451251,0.70453,0.903319,0.237818,0.147222,0.301063,0.601857,0.00734107,0.0274101,0.819273,0.826681,0.313765,0.874108,0.934933,0.295172,0.259082,0.991731,0.209101,0.598837,0.892336,0.577748,0.879079,0.0198454,0.00914254,0.508548,0.550833,0.896225,0.202213,0.820489,0.779011,0.66632,0.865614,0.483541,0.569639,0.103432,0.630762,0.870703,0.705289,0.638103,0.898113,0.524562,0.464785,0.211878,0.39867,0.399718,0.50705,0.657751,0.391449,0.716151,0.256588,0.283785,0.293899,0.135667,0.30363,0.303041,0.644216,0.854463,0.199266,0.846429,0.674952,0.978277,0.512749,0.540566,0.461818,0.0823885,0.643998,0.0925804,0.953091,0.349287,0.730684,0.851204,0.87385,0.195469,0.0630819,0.272519,0.595187,0.570132,0.930271,0.986636,0.286283,0.186859,0.270421,0.580182,0.322527,0.574051,0.883223,0.966742,0.428514,0.0824895,0.813171,0.103466,0.0607669,0.32592,0.644032,0.522585,0.408309,0.28803,0.615165,0.3614,0.637318,0.345849,0.212604,0.511167,0.541318,0.275686,0.783687,0.136505,0.845818,0.713957,0.12314,0.132102,0.900817,0.393561,0.712284,0.223343,0.967612,0.595507,0.190085,0.396125,0.677997,0.0032565,0.499591,0.738763,0.329177,0.143623,0.261348,0.737486,0.431653,0.876514,0.0988859,0.0689706,0.222363,0.31149,0.580138,0.763681,0.587176,0.363824,0.900185,0.432994,0.0777818,0.0233256,0.565095,0.978598,0.416887,0.277379,0.201942,0.384498,0.872886,0.392027,0.780623,0.550883,0.395283,0.280214,0.289646,0.72446,0.423837,0.550995,0.461946,0.85549,0.427508,0.560832,0.924461,0.649871,0.872322,0.504598,0.413552,0.459497,0.868423,0.313737,0.892491,0.946205,0.337063,0.457587,0.924803,0.753949,0.734966,0.126744,0.138447,0.607852,0.518771,0.919071,0.158735,0.914055,0.199285,0.448381,0.638515,0.623122,0.999376,0.100461,0.478612,0.426884,0.661293,0.403072,0.0767552,0.533615,0.907671,0.490307,0.993112,0.776093,0.804044,0.885604,0.722298,0.141106,0.34319,0.647101,0.895056,0.0781565,0.773845,0.033503,0.686009,0.292617,0.952574,0.844744,0.206671,0.151859,0.293125,0.845186,0.77498,0.2925,0.945647,0.253592,0.719384,0.60694,0.656664,0.796139,0.140554,0.564335,0.286446,0.133666,0.340428,0.0904903,0.01927,0.0627262,0.231597,0.36246,0.709827,0.126652,0.440617,0.483672,0.160155,0.126626,0.776289,0.112729,0.971369,0.98296,0.264588,0.264494,0.828146,0.039568,0.556994,0.773793,0.29316,0.276378,0.380732,0.949824,0.0725178,0.521287,0.514159,0.358964,0.654953,0.854587,0.449454,0.674223,0.917314,0.681051,0.0366837,0.627141,0.807703,0.477301,0.110813,0.967859,0.603926,0.887102,0.0805878,0.575296,0.870062,0.345175,0.839789,0.698208,0.384743,0.396784,0.472001,0.677903,0.673162,0.852733,0.627728,0.74568,0.37402,0.141887,0.104644,0.0289731,0.996474,0.554099,0.703196,0.913788,0.23515,0.73988,0.540929,0.0428532,0.217181,0.651742,0.0107119,0.821107,0.538843,0.0912997,0.396402,0.408905,0.436475,0.236192,0.107113,0.821218,0.632976,0.579114,0.499122,0.306138,0.431847,0.126849,0.0518175,0.805867,0.268736,0.156462,0.83484,0.265211,0.71056,0.538036,0.178999,0.94571,0.277916,0.719927,0.988563,0.495097,0.371669,0.999275,0.316204,0.910512,0.0905746,0.712606,0.319418,0.52705,0.948798,0.426531,0.348268,0.581773,0.00564439,0.84739,0.887911,0.437491,0.974239,0.939728,0.243358,0.242975,0.0961901,0.0781975,0.508186,0.80675,0.616234,0.687184,0.75246,0.89415,0.407111,0.741023,0.389246,0.77878,0.740298,0.70545,0.689292,0.830873,0.418056,0.0087099,0.357922,0.366854,0.435241,0.70619,0.948627,0.440885,0.55358,0.836538,0.878376,0.527819,0.776266,0.121734,0.770794,0.872456,0.199931,0.278979,0.679207,0.816165,0.966164,0.431667,0.710315,0.373275,0.17269,0.0995608,0.152055,0.912988,0.805011,0.841348,0.74386,0.223066,0.850058,0.101782,0.58992,0.285298,0.807972,0.538547,0.726183,0.361552,0.375085,0.604559,0.889371,0.151351,0.726293,0.660165,0.0238073,0.926224,0.939144,0.703014,0.742389,0.905308,0.134681,0.452704,0.278583,0.30737,0.552265,0.430638,0.220358,0.357275,0.271986,0.964218,0.580342,0.122043,0.0660004,0.170262,0.407341,0.873973,0.708808,0.133524,0.235525,0.083893,0.738084,0.124896,0.235244,0.464377,0.78506,0.259051,0.390601,0.724204,0.962065,0.132991,0.629512,0.0967456,0.585694,0.908095,0.404116,0.137959,0.338733,0.624474,0.495234,0.610718,0.588692,0.0755761,0.732762,0.654692,0.245838,0.140103,0.528665,0.954646,0.273628,0.76419,0.0385391,0.0117111,0.889086,0.273783,0.476088,0.674146,0.532834,0.866689,0.39835,0.494899,0.99968,0.0278624,0.591645,0.585374,0.935957,0.995761,0.723333,0.27469,0.620234,0.218568,0.885409,0.208926,0.294144,0.61817,0.863618,0.539981,0.758273,0.392283,0.494628,0.0319009,0.156473,0.533167,0.043612,0.0455584,0.80695,0.5197,0.719704,0.339784,0.386389,0.118055,0.834683,0.386068,0.145917,0.426328,0.971442,0.0818745,0.422089,0.694775,0.356565,0.0423229,0.913343,0.241973,0.251249,0.207487,0.860143,0.114867,0.747468,0.618417,0.50715,0.242096,0.650317,0.663623,0.775262,0.693929,0.709181,0.582212,0.213629,0.428886,0.921996,0.600018,0.54694,0.756679,0.986086,0.692858,0.183007,0.957528,0.774732,0.605095,0.652304,0.131297,0.647418,0.565647,0.37327,0.898667,0.773133,0.233413,0.0135341,0.520601,0.85183,0.520684,0.762697,0.502147,0.184307,0.537959,0.196076,0.893488,0.120171,0.409706,0.322374,0.0421669,0.00972357,0.869314,0.798846,0.99581,0.562172,0.981852,0.953338,0.336904,0.586948,0.605641,0.468201,0.234366,0.171288,0.841471,0.133033,0.944421,0.0748836,0.146567,0.465022,0.926713,0.667251,0.227719,0.42886,0.851558,0.765679,0.624937,0.745046,0.88585,0.0346425,0.0674193,0.928017,0.044366,0.936733,0.726862,0.0401757,0.498905,0.708715,0.993514,0.835809,0.295662,0.599155,0.30401,0.530028,0.770443,0.145481,0.66306,0.714864,0.220364,0.809627,0.179886,0.147077,0.476878,0.407606,0.575938,0.328436,0.173284,0.200874,0.0734812,0.0591342,0.235517,0.1409,0.987151,0.279883,0.0776338,0.714013,0.320059,0.576539,0.422728,0.313572,0.412348,0.71839,0.912727,0.716358,0.248417,0.68317,0.861839,0.911477,0.398034,0.0822028,0.721105,0.57792,0.22928,0.197982,0.985526,0.805218,0.526418,0.158811,0.00609231,0.599899,0.217945,0.241609,0.7408,0.205096,0.521492,0.818433,0.919109,0.841551,0.394972,0.341836,0.155123,0.80732,0.0602258,0.0678502,0.523678,0.308643,0.75102,0.385517,0.22012,0.149054,0.46772,0.941225,0.726975,0.697,0.139207,0.712501,0.502218,0.665625,0.871312,0.50831,0.265524,0.0892567,0.749919,0.00632405,0.294352,0.271411,0.824757,0.213461,0.112962,0.21973,0.555297,0.268085,0.0270498,0.615523,0.335936,0.550728,0.924166,0.0869559,0.936245,0.144286,0.23601,0.403965,0.0855115,0.962985,0.100964,0.224719,0.675486,0.603182,0.890344,0.546798,0.111492,0.155869,0.636055,0.861411,0.162193,0.930407,0.132822,0.98695,0.143868,0.245785,0.20668,0.699165,0.51387,0.23373,0.314688,0.849806,0.784458,0.238854,0.936762,0.720703,0.383141,0.172772,0.124667,0.468652,0.135757,0.225632,0.693371,0.811243,0.828814,0.583715,0.358041,0.940305,0.739584,0.994096,0.801716,0.901777,0.924503,0.934539,0.888727,0.0683706,0.180324,0.0954065,0.767536,0.694194,0.329136,0.0822239,0.544,0.113594,0.321078,0.480762,0.834296,0.704219,0.653534,0.958964,0.172871,0.78929,0.184595,0.866242,0.600534,0.0134087,0.449957,0.958575,0.953714,0.189541,0.95267,0.755431,0.091318,0.877173,0.689969,0.980045,0.945544,0.870293,0.0754513,0.713079,0.564487,0.404587,0.795303,0.108487,0.518181,0.116381,0.589248,0.352477,0.8206,0.242782,0.311441,0.993471,0.0320723,0.496036,0.859713,0.632606,0.509445,0.309671,0.591181,0.463159,0.499212,0.543851,0.218589,0.59053,0.421024,0.908559,0.570575,0.366567,0.778852,0.646026,0.0796466,0.343339,0.0506135,0.87495,0.451826,0.568795,0.991331,0.0410743,0.921272,0.811931,0.283856,0.232713,0.805403,0.315928,0.728749,0.665116,0.948534,0.238194,0.974787,0.539715,0.701353,0.473999,0.0835659,0.919943,0.0645286,0.50459,0.828502,0.635103,0.871157,0.607354,0.28113,0.950804,0.950693,0.331743,0.825754,0.402519,0.900538,0.817085,0.443593,0.82181,0.629016,0.727449,0.0545229,0.434418,0.0433779,0.783272,0.0995344,0.991912,0.0214666,0.0743209,0.531627,0.72282,0.54832,0.615193,0.642763,0.612848,0.119783,0.471264,0.247952,0.99094,0.0786181,0.529081,0.941744,0.029311,0.860824,0.767497,0.43183,0.761362,0.584582,0.875423,0.583172,0.213598,0.602873,0.637695,0.648016,0.646251,0.420967,0.747551,0.638163,0.442434,0.821872,0.16979,0.165253,0.370191,0.784983,0.808016,0.983039,0.904766,0.27928,0.230991,0.895707,0.357898,0.760072,0.83745,0.387209,0.620896,0.604948,0.819039,0.382258,0.18953,0.694462,0.96543,0.403128,0.297335,0.603125,0.051144,0.943586,0.0240916,0.798695,0.581749,0.466525,0.620566,0.751539,0.631779,0.990758,0.536522,0.439795,0.973797,0.441289,0.719075,0.204788,0.336995,0.0769731,0.96486,0.174446,0.464182,0.585756,0.779393,0.283222,0.968014,0.968923,0.977684,0.933444,0.372051,0.275019,0.536569,0.423195,0.218605,0.56066,0.221889,0.800353,0.0271852,0.842456,0.551892,0.658964,0.833213,0.0884144,0.0987583,0.80701,0.529703,0.817833,0.0117975,0.866698,0.894806,0.976657,0.0411437,0.358989,0.562413,0.820537,0.64221,0.530427,0.78946,0.619894,0.463871,0.161511,0.894914,0.000439881,0.584705,0.113519,0.5611,0.806594,0.913872,0.588285,0.64905,0.465764,0.247249,0.482263,0.554179,0.346007,0.289273,0.0838818,0.16384,0.30107,0.95058,0.0586464,0.277728,0.991724,0.417635,0.840141,0.812261,0.0598452,0.370568,0.601721,0.67974,0.834439,0.763231,0.574653,0.834879,0.347936,0.688172,0.395979,0.154531,0.602044,0.984264,0.80358,0.0678083,0.231514,0.285843,0.621987,0.577521,0.575116,0.705869,0.741361,0.876186,0.656449,0.800008,0.153914,0.648172,0.217642,0.994055,0.460433,0.277488,0.364623,0.0621536,0.957227,0.199062,0.825385,0.53188,0.0339414,0.173321,0.220052,0.429921,0.327851,0.822096,0.414185,0.131432,0.889905,0.645699,0.417275,0.511892,0.22322,0.992391,0.21776,0.964581,0.868578,0.874209,0.764588,0.0224917,0.522382,0.982231,0.0165465,0.982815,0.259718,0.381169,0.0449683,0.216945,0.580232,0.870353,0.748826,0.614173,0.0436739,0.968878,0.0440937,0.371525,0.790974,0.458279,0.502957,0.680879,0.103978,0.920232,0.192771,0.327197,0.912624,0.410531,0.291778,0.781201,0.28474,0.056366,0.803693,0.807122,0.0385966,0.82024,0.789937,0.298315,0.201409,0.834905,0.51526,0.78164,0.705258,0.264086,0.395813,0.748932,0.232964,0.439907,0.120457,0.0239385,0.898186,0.623414,0.704817,0.00216358,0.543647,0.897588,0.329361,0.45627,0.308119,0.621139,0.237472,0.59286,0.677504,0.0411649,0.399982,0.716101,0.861404,0.189918,0.0144159,0.0628134,0.0248232,0.529676,0.844454,0.730081,0.793762,0.240267,0.479013,0.0267262,0.680174,0.59947,0.0506647,0.57836,0.222884,0.755482,0.580524,0.766531,0.65307,0.909885,0.222802,0.961189,0.531023,0.460274,0.554049,0.208528,0.501438,0.954031,0.924629,0.362843,0.143949,0.939045,0.425656,0.168772,0.468721,0.27011,0.898853,0.262483,0.510377,0.377866,0.289209,0.190552,0.977336,0.339874,0.768912,0.200221,0.0953558,0.349436,0.966752,0.748426,0.25932,0.189554,0.709616,0.790343,0.649827,0.263665,0.998871,0.151266,0.217695,0.9235,0.514108,0.361644,0.862544,0.939765,0.530416,0.331265,0.209875,0.429269,0.593748,0.720252,0.807135,0.882957,0.910803,0.784471,0.22283,0.679715,0.984692,0.318186,0.0291508,0.951444,0.0666123,0.288471,0.140998,0.776228,0.0788144,0.790825,0.0398924,0.0776854,0.94209,0.257588,0.00118508,0.456199,0.619232,0.863729,0.395963,0.149648,0.194994,0.605838,0.578917,0.788742,0.326089,0.386052,0.671699,0.236893,0.170524,0.894529,0.916608,0.155216,0.212715,0.945759,0.10666,0.279328,0.23423,0.247657,0.0555555,0.313044,0.0384818,0.0954479,0.390729,0.980572,0.353035,0.391914,0.43677,0.972267,0.255644,0.832734,0.121915,0.450638,0.438571,0.700832,0.23938,0.764661,0.0868844,0.911079,0.00155319,0.257408,0.805608,0.918161,0.412624,0.0183236,0.863919,0.519284,0.297651,0.0981489,0.766941,0.353207,0.411193,0.805423,0.448655,0.801922,0.785995,0.80169,0.193836,0.222765,0.773957,0.44948,0.0554987,0.895872,0.900118,0.49407,0.596704,0.139498,0.25873,0.683589,0.0505773,0.260284,0.940997,0.856185,0.178445,0.353621,0.874509,0.042364,0.872904,0.17216,0.140513,0.639845,0.525367,0.551706,0.445268,0.974022,0.353628,0.231262,0.775712,0.547464,0.454027,0.549669,0.996945,0.509526,0.445541,0.897063,0.00359614,0.0422455,0.0365611,0.262327,0.725834,0.0871385,0.52261,0.666831,0.943324,0.701055,0.0204512,0.817833,0.743419,0.893355,0.989994,0.883932,0.5332,0.515361,0.435637,0.978468,0.489383,0.789265,0.20973,0.265095,0.33673,0.663758,0.814764,0.333674,0.173284,0.260305,0.230737,0.17688,0.302551,0.267298,0.439207,0.028385,0.354437,0.961817,0.695216,0.297761,0.662872,0.715667,0.115594,0.40629,0.609022,0.105587,0.290222,0.142222,0.620948,0.72586,0.12069,0.110331,0.515125,0.33042,0.375425,0.851855,0.994178,0.19019,0.185529,0.167462,0.450495,0.416266,0.344342,0.753046,0.683564,0.783548,0.781431,0.0380006,0.745365,0.476647,0.335761,0.408237,0.192314,0.451355,0.814528,0.801336,0.556942,0.10475,0.943558,0.17789,0.830609,0.0642483,0.288221,0.345734,0.394669,0.663646,0.197589,0.388847,0.853836,0.383118,0.556309,0.304331,0.799384,0.900651,0.0573769,0.482948,0.684199,0.838808,0.520949,0.429564,0.315455,0.85671,0.837801,0.507768,0.308065,0.652329,0.309104,0.865007,0.757079,0.252662,0.0428966,0.587688,0.31691,0.331117,0.933423,0.711579,0.994763,0.131012,0.100426,0.848599,0.51413,0.656735,0.15293,0.313514,0.557385,0.210307,0.796462,0.241584,0.0491149,0.31741,0.671149,0.36457,0.17412,0.50895,0.872338,0.482185,0.161279,0.181442,0.347191,0.918358,0.434104,0.390088,0.506046,0.751014,0.721205,0.439468,0.462593,0.715968,0.57048,0.563019,0.564568,0.0846097,0.219753,0.717498,0.398123,0.777139,0.927805,0.194585,0.0187229,0.97692,0.511995,0.689872,0.341489,0.686116,0.198822,0.213827,0.1683,0.360101,0.395269,0.515491,0.278459,0.829373,0.905579,0.784504,0.580387,0.626784,0.223973,0.0429796,0.342752,0.794453,0.605999,0.90732,0.879062,0.825752,0.624818,0.277186,0.602891,0.552623,0.471771,0.621614,0.529542,0.983767,0.311485,0.871032,0.669882,0.510307,0.0848588,0.838182,0.870408,0.480128,0.353673,0.148866,0.3095,0.259252,0.93337,0.889887,0.886036,0.157343,0.932866,0.228788,0.951796,0.538865,0.136108,0.830858,0.364617,0.760926,0.108044,0.967508,0.313548,0.579815,0.589121,0.843091,0.563582,0.900606,0.714122,0.233464,0.410913,0.798981,0.0716456,0.28132,0.279109,0.425319,0.430186,0.588609,0.684571,0.363557,0.478496,0.570607,0.5209,0.411362,0.799394,0.472696,0.950227,0.935502,0.303554,0.314844,0.696428,0.411598,0.282351,0.00997657,0.991413,0.871472,0.853067,0.554994,0.772079,0.56719,0.788458,0.182991,0.366171,0.860103,0.464312,0.64528,0.285422,0.894498,0.233888,0.969993,0.258055,0.712384,0.540599,0.778955,0.123746,0.339994,0.25165,0.0739731,0.275496,0.555204,0.388817,0.971924,0.966802,0.671169,0.981901,0.958214,0.542641,0.834968,0.513208,0.31472,0.402158,0.301666,0.497711,0.768329,0.161769,0.962023,0.413608,0.447191,0.856521,0.647497,0.417184,0.114576,0.359881,0.957783,0.89353,0.483627,0.297777,0.145181,0.5576,0.573273,0.700385,0.946417,0.545198,0.667186,0.617586,0.527099,0.6254,0.160227,0.362067,0.138609,0.474946,0.764225,0.440274,0.972657,0.532554,0.602044,0.93468,0.946162,0.0492346,0.7912,0.593659,0.466419,0.905776,0.95354,0.424202,0.799306,0.437167,0.721979,0.944487,0.994767,0.295252,0.644871,0.941184,0.84045,0.312057,0.558769,0.367549,0.937458,0.718996,0.729616,0.0760664,0.193942,0.493841,0.516341,0.166599,0.0263949,0.118384,0.101279,0.972557,0.167619,0.892479,0.566216,0.634038,0.798255,0.519756,0.0582393,0.597561,0.956922,0.780218,0.542047,0.951689,0.0754701,0.186919,0.892873,0.91592,0.498976,0.451642,0.283468,0.436434,0.170638,0.0130843,0.5125,0.364581,0.506925,0.0288414,0.53118,0.53332,0.147226,0.632459,0.505877,0.314845,0.524938,0.0720931,0.948883,0.323193,0.591849,0.00712188,0.920753,0.548771,0.78734,0.462801,0.50046,0.86281,0.649719,0.393333,0.77873,0.148696,0.844975,0.0621983,0.58513,0.0156139,0.0752826,0.09763,0.380195,0.582208,0.126471,0.911375,0.115528,0.273697,0.543833,0.621405,0.588542,0.0687712,0.693498,0.537425,0.391964,0.285347,0.544547,0.312717,0.834118,0.331887,0.775518,0.334578,0.194697,0.425237,0.727911,0.973426,0.573933,0.572886,0.0356247,0.159062,0.5885,0.110907,0.256692,0.968695,0.693115,0.383164,0.880069,0.808643,0.656861,0.423903,0.430048,0.245403,0.492674,0.123546,0.782828,0.884638,0.408893,0.327374,0.197355,0.243011,0.659261,0.972872,0.577589,0.853958,0.39811,0.3055,0.827384,0.972042,0.878386,0.863009,0.131105,0.466886,0.973916,0.387797,0.435581,0.667031,0.770961,0.315651,0.475674,0.427821,0.739553,0.905722,0.673225,0.232227,0.029268,0.456052,0.116865,0.438161,0.783427,0.31422,0.681171,0.442688,0.287092,0.25876,0.296646,0.685202,0.56426,0.12403,0.657244,0.442646,0.987038,0.788349,0.909532,0.960954,0.176146,0.345113,0.627985,0.947106,0.660764,0.103659,0.374928,0.400317,0.00938107,0.0481525,0.632545,0.038649,0.504205,0.74941,0.47681,0.287632,0.0636297,0.157981,0.73032,0.350722,0.416741,0.0269655,0.0359241,0.981001,0.150995,0.693169,0.423647,0.138033,0.481517,0.333179,0.0989872,0.657663,0.678292,0.726972,0.60477,0.339056,0.830631,0.979698,0.739373,0.840013,0.0278502,0.371918,0.878662,0.532055,0.121328,0.355471,0.819687,0.184957,0.513453,0.550007,0.535679,0.930194,0.576973,0.571604,0.911195,0.727968,0.264772,0.334842,0.866001,0.74629,0.66802,0.964988,0.403953,0.346313,0.69196,0.00872269,0.685368,0.522592,0.98842,0.424742,0.362604,0.0162706,0.796659,0.241266,0.548326,0.917987,0.596737,0.368013,0.102944,0.11019,0.91802,0.638624,0.0403838,0.494993,0.210227,0.951579,0.22296,0.474999,0.28642,0.0889608,0.221289,0.954441,0.0539488,0.625242,0.300753,0.745909,0.633964,0.986122,0.268501,0.622385,0.410863,0.631105,0.638655,0.207523,0.872371,0.186981,0.12551,0.469108,0.554994,0.228454,0.579298,0.473014,0.867077,0.619682,0.968007,0.0773042,0.571261,0.190967,0.552303,0.857681,0.279928,0.773592,0.812122,0.333876,0.398833,0.112875,0.0797855,0.0327976,0.0989973,0.348286,0.655182,0.509861,0.979392,0.293837,0.717383,0.851763,0.480818,0.842893,0.320871,0.0358125,0.0713466,0.900169,0.508827,0.938424,0.519852,0.476833,0.015728,0.0911123,0.6678,0.568031,0.948793,0.947728,0.341623,0.760915,0.281604,0.740457,0.873791,0.36139,0.773254,0.972788,0.709676,0.428436,0.482649,0.689068,0.722274,0.200032,0.54083,0.203092,0.0429253,0.861702,0.238905,0.114272,0.761871,0.747731,0.0526956,0.281722,0.224565,0.0684236,0.372835,0.892365,0.636455,0.321628,0.840093,0.978078,0.0825436,0.121697,0.718535,0.956335,0.483087,0.491789,0.929123,0.192763,0.920225,0.411772,0.881831,0.642499,0.611804,0.422661,0.845591,0.65473,0.284363,0.0844957,0.769001,0.0462339,0.832227,0.821697,0.327956,0.0567916,0.890121,0.700791,0.949157,0.526576,0.0224191,0.789249,0.504654,0.104963,0.910946,0.223188,0.0612972,0.394033,0.714977,0.99042,0.586797,0.635203,0.402192,0.468628,0.277701,0.0139962,0.891289,0.123293,0.668726,0.175652,0.207788,0.437727,0.221886,0.0400153,0.259424,0.549842,0.0968069,0.149545,0.250633,0.0459634,0.67612,0.273052,0.835213,0.180774,0.378015,0.746159,0.403962,0.439312,0.140193,0.11894,0.429732,0.726989,0.754142,0.831924,0.195617,0.0318438,0.84592,0.0869058,0.155136,0.514646,0.262558,0.362925,0.952373,0.484443,0.40294,0.211797,0.0342854,0.499747,0.361342,0.284918,0.54571,0.0374621,0.557971,0.380923,0.218236,0.935985,0.127082,0.622198,0.375298,0.267275,0.741138,0.80503,0.994264,0.49528,0.636954,0.189881,0.527124,0.482874,0.276787,0.682261,0.99752,0.539344,0.0451851,0.949893,0.0237877,0.448125,0.16169,0.0580731,0.947872,0.523032,0.342992,0.493582,0.560494,0.900962,0.874505,0.77873,0.836948,0.00158699,0.400929,0.212245,0.268862,0.142067,0.0172749,0.263126,0.637347,0.654229,0.453006,0.164471,0.137103,0.729793,0.846732,0.134623,0.269137,0.891917,0.0845163,0.292925,0.340042,0.246207,0.350998,0.287914,0.769239,0.69399,0.781496,0.329733,0.594952,0.656001,0.108464,0.431899,0.657588,0.509393,0.644144,0.92645,0.65146,0.661419,0.189575,0.288807,0.315648,0.642582,0.453278,0.452751,0.372375,0.30001,0.587374,0.641512,0.191927,0.67189,0.934437,0.531969,0.918097,0.285435,0.819883,0.687336,0.979425,0.601379,0.0170694,0.574377,0.25738,0.125533,0.00627622,0.914968,0.634926,0.650421,0.841418,0.286385,0.31184,0.030993,0.575192,0.627488,0.673575,0.0284706,0.0802389,0.0459494,0.328481,0.667613,0.687462,0.520408,0.339503,0.621899,0.0523777,0.2576,0.907334,0.872261,0.944936,0.88676,0.473641,0.962006,0.461137,0.731021,0.087539,0.467413,0.645989,0.722465,0.117833,0.487407,0.00885034,0.429673,0.5184,0.584043,0.0571612,0.191975,0.612513,0.1374,0.237924,0.940994,0.805013,0.925386,0.461402,0.144516,0.547285,0.51378,0.402116,0.454619,0.386041,0.347053,0.341379,0.859682,0.309058,0.802515,0.590703,0.396597,0.269928,0.236692,0.119062,0.387761,0.724099,0.127913,0.817435,0.242498,0.711955,0.874596,0.434473,0.324469,0.011996,0.672397,0.265463,0.817009,0.597783,0.726865,0.961525,0.145067,0.240645,0.363642,0.599686,0.626687,0.710694,0.941065,0.486369,0.0197527,0.74358,0.0770713,0.41635,0.0135079,0.313763,0.535413,0.401269,0.0378617,0.663325,0.218704,0.28036,0.375281,0.0932997,0.714833,0.699749,0.105296,0.38723,0.965212,0.922305,0.985013,0.692078,0.88383,0.13008,0.932723,0.247471,0.729766,0.55941,0.958165,0.670831,0.0457784,0.977918,0.414411,0.12285,0.394268,0.427919,0.436613,0.929681,0.829188,0.474475,0.593006,0.0478917,0.754835,0.968287,0.141191,0.469668,0.668036,0.246487,0.856898,0.633248,0.168792,0.841911,0.325326,0.0526213,0.971991,0.258049,0.300093,0.701757,0.817459,0.258258,0.372587,0.863237,0.236176,0.786998,0.986087,0.630445,0.214917,0.4227,0.560126,0.0441045,0.897174,0.153132,0.0919962,0.652009,0.121419,0.233188,0.121677,0.789455,0.479675,0.978575,0.422704,0.648466,0.820486,0.748029,0.701088,0.792476,0.00607849,0.00118011,0.494233,0.823537,0.259438,0.86682,0.686774,0.495614,0.653818,0.672861,0.126059,0.868735,0.0955609,0.686184,0.912839,0.992735,0.839316,0.00483555,0.644744,0.960735,0.238023,0.766421,0.75019,0.717698,0.744996,0.172893,0.366164,0.565481,0.920923,0.0672516,0.357958,0.927001,0.0684317,0.85219,0.750539,0.32787,0.719011,0.437313,0.823484,0.372829,0.110174,0.949543,0.241564,0.205735,0.635727,0.154403,0.19847,0.475043,0.159239,0.843214,0.435778,0.397262,0.609635,0.185968,0.11496,0.35463,0.358861,0.481124,0.920111,0.279784,0.548375,0.278069,0.206785,0.616807,0.13026,0.957324,0.944677,0.84927,0.394637,0.768161,0.222099,0.504811,0.717703,0.463663,0.710546,0.35343,0.618067,0.909016,0.828473,0.777306,0.75223,0.264251,0.174568,0.361865,0.450219,0.289527,0.716495,0.80908,0.770651,0.636607,0.0888638,0.319026,0.914676,0.295649,0.935833,0.0449354,0.252973,0.88051,0.894206,0.647609,0.648671,0.116305,0.15242,0.366374,0.579969,0.862966,0.719805,0.198035,0.771983,0.548278,0.975341,0.524213,0.812529,0.149909,0.886078,0.262748,0.439436,0.602573,0.0718282,0.210087,0.23918,0.160692,0.529114,0.153856,0.456341,0.464947,0.198791,0.709314,0.345457,0.0929967,0.356923,0.994128,0.209302,0.509343,0.360502,0.78927,0.37231,0.0803069,0.987306,0.144292,0.628585,0.962647,0.668505,0.441114,0.112555,0.554583,0.703862,0.551992,0.157156,0.775691,0.762079,0.396336,0.936383,0.291193,0.550192,0.392724,0.75614,0.748983,0.102037,0.101597,0.84198,0.45896,0.0957252,0.0512814,0.968304,0.456227,0.840552,0.340613,0.536534,0.827858,0.484906,0.165119,0.790504,0.153411,0.606233,0.90306,0.707994,0.310096,0.455051,0.865151,0.0857866,0.21713,0.261487,0.0221693,0.508323,0.811679,0.414893,0.264463,0.560662,0.51693,0.36606,0.402642,0.975891,0.461785,0.453923,0.944194,0.918013,0.294475,0.284807,0.454547,0.122332,0.769713,0.619666,0.912837,0.923124,0.2259,0.815896,0.631118,0.535996,0.270948,0.496269,0.621782,0.488078,0.757756,0.643952,0.996401,0.569435,0.0588445,0.260864,0.130097,0.575775,0.626924,0.532739,0.551665,0.0887097,0.986662,0.495859,0.00672261,0.281137,0.780667,0.46127,0.403469,0.55038,0.0809362,0.316306,0.473504,0.306836,0.132202,0.104622,0.842832,0.40315,0.600891,0.464614,0.891228,0.358647,0.108566,0.887629,0.928083,0.16741,0.148493,0.05818,0.743185,0.775418,0.590919,0.29485,0.864127,0.577581,0.79071,0.87085,0.858717,0.571376,0.33212,0.262186,0.121756,0.413056,0.578492,0.595259,0.719892,0.710694,0.699881,0.562724,0.113843,0.300772,0.0273379,0.00507105,0.659419,0.135904,0.8927,0.587502,0.303314,0.0411931,0.645682,0.0464987,0.816611,0.236601,0.341349,0.680738,0.814182,0.132058,0.551588,0.672899,0.703435,0.883707,0.935085,0.82519,0.296763,0.513577,0.42045,0.0166554,0.22427,0.120331,0.579379,0.338114,0.421103,0.606717,0.343185,0.0805221,0.742621,0.235885,0.668024,0.0459344,0.277078,0.313706,0.0924331,0.0936886,0.550307,0.433782,0.774426,0.364489,0.56584,0.326014,0.0373874,0.269275,0.209722,0.972472,0.0944653,0.506485,0.486049,0.514915,0.523141,0.71032,0.635245,0.10252,0.0484333,0.0563481,0.709237,0.391618,0.13687,0.451858,0.627503,0.804894,0.497792,0.904581,0.118601,0.590225,0.998269,0.668908,0.0240071,0.772696,0.0333965,0.589848,0.0987102,0.0707839,0.859123,0.308432,0.0432563,0.953588,0.814917,0.529306,0.468503,0.338058,0.239625,0.103748,0.440578,0.288058,0.160096,0.149815,0.679677,0.296967,0.601672,0.30718,0.101861,0.0994642,0.21176,0.220462,0.689689,0.21003,0.88937,0.713696,0.982726,0.922766,0.303544,0.0814359,0.99355,0.162667,0.389868,0.0368063,0.116254,0.204785,0.566112,0.584757,0.542843,0.805737,0.688505,0.98342,0.0937953,0.848602,0.133235,0.773472,0.145568,0.734907,0.0806514,0.247429,0.834371,0.292412,0.467891,0.52406,0.502442,0.357261,0.237757,0.485167,0.280027,0.541301,0.566603,0.273577,0.703967,0.956471,0.310383,0.820222,0.161256,0.876495,0.404979,0.704099,0.682231,0.0934844,0.687519,0.776027,0.942086,0.820753,0.549499,0.0876547,0.55566,0.63015,0.335084,0.390031,0.922562,0.802975,0.914092,0.425003,0.160236,0.151848,0.910171,0.440263,0.693149,0.476774,0.713839,0.397116,0.433245,0.0242224,0.217338,0.594501,0.900717,0.622317,0.2986,0.582949,0.715801,0.986118,0.358975,0.657888,0.806872,0.908474,0.745542,0.362532,0.538624,0.0806265,0.752563,0.461186,0.883602,0.666655,0.886189,0.0438379,0.818503,0.79636,0.484101,0.511652,0.273134,0.19794,0.908768,0.706379,0.222163,0.126106,0.30088,0.12288,0.748423,0.599479,0.705828,0.464224,0.585597,0.0648034,0.122112,0.392469,0.973277,0.867654,0.755001,0.511901,0.94828,0.507564,0.973087,0.831882,0.174219,0.859276,0.87572,0.992722,0.655636,0.359821,0.504374,0.92877,0.557761,0.413142,0.635148,0.779924,0.539248,0.936028,0.902803,0.287671,0.535507,0.608631,0.751895,0.121104,0.673435,0.874006,0.513573,0.646712,0.74166,0.268574,0.158613,0.68994,0.776139,0.1317,0.521822,0.950358,0.990976,0.397542,0.94308,0.646612,0.757363,0.447454,0.575382,0.315124,0.860596,0.21053,0.0950478,0.399844,0.146557,0.997851,0.687515,0.682064,0.606482,0.43941,0.803168,0.279917,0.313416,0.316741,0.926629,0.0550758,0.585316,0.0852421,0.745016,0.361455,0.216942,0.266838,0.311813,0.207919,0.664381,0.254893,0.854531,0.421744,0.702347,0.429912,0.736868,0.562943,0.640442,0.831916,0.962787,0.786999,0.829767,0.650302,0.469063,0.436249,0.0897112,0.272231,0.716166,0.403127,0.588973,0.642795,0.458203,0.174288,0.728037,0.203219,0.535743,0.94498,0.470057,0.847555,0.152898,0.134438,0.102448,0.00742898,0.556182,0.804794,0.437341,0.29305,0.367737,0.0777835,0.124966,0.330524,0.864783,0.954733,0.980826,0.333846,0.390983,0.0705368,0.606078,0.107149,0.473664,0.19505,0.749945,0.931867,0.369338,0.477982,0.135086,0.905081,0.422962,0.605143,0.752636,0.57586,0.739581,0.855084,0.583289,0.295763,0.659879,0.0206305,0.588813,0.0276161,0.098414,0.71378,0.35814,0.963197,0.668513,0.338966,0.297043,0.0594962,0.409503,0.903121,0.166645,0.883166,0.0981707,0.91659,0.815033,0.467509,0.394572,0.950118,0.37259,0.817534,0.555261,0.125227,0.393394,0.294842,0.980311,0.976683,0.590606,0.64019,0.997314,0.179419,0.667806,0.0957279,0.893199,0.0259463,0.0589248,0.561712,0.364912,0.355968,0.621208,0.774415,0.259088,0.787854,0.657581,0.357259,0.704444,0.472614,0.824768,0.0990163,0.422732,0.197358,0.91655,0.977994,0.322585,0.309945,0.272836,0.302896,0.286628,0.863442,0.943086,0.283942,0.0428607,0.610892,0.37967,0.93606,0.636839,0.438595,0.497772,0.00175077,0.794563,0.11898,0.776165,0.053651,0.906834,0.433746,0.41091,0.611278,0.90636,0.235678,0.710294,0.329092,0.433037,0.626845,0.307086,0.755622,0.936789,0.579922,0.0585177,0.223418,0.443364,0.00160398,0.50736,0.486225,0.612496,0.88703,0.422284,0.249335,0.325625,0.920056,0.251086,0.120187,0.0390363,0.0272513,0.173838,0.94587,0.460998,0.584749,0.557148,0.367358,0.820427,0.267442,0.69645,0.253463,0.894286,0.00353636,0.00908475,0.831076,0.583459,0.0676025,0.0544932,0.0268229,0.0692065,0.561853,0.513048,0.681703,0.448883,0.935332,0.931038,0.774507,0.855388,0.182124,0.894695,0.894425,0.209375,0.068533,0.840295,0.670373,0.653282,0.397443,0.0377304,0.473708,0.664885,0.734181,0.727171,0.559171,0.737717,0.736256,0.390247,0.321176,0.803859,0.44474,0.347999,0.873065,0.00659275,0.861046,0.554768,0.455475,0.796379,0.485806,0.229983,0.651767,0.66793,0.124677,0.546192,0.877305,0.19321,0.386487,0.547678,0.846492,0.783929,0.585408,0.3202,0.448814,0.319589,0.0473716,0.00798482,0.0573059,0.783628,0.398231,0.378482,0.587486,0.842971,0.72648,0.460551,0.849564,0.587526,0.0153194,0.30504,0.383905,0.501125,0.535022,0.0356721,0.169055,0.6597,0.581864,0.0463607,0.85291,0.96835,0.594039,0.699402,0.75228,0.179447,0.0196025,0.201094,0.499036,0.0669742,0.209078,0.556342,0.850602,0.60731,0.934824,0.438088,0.450281,0.661304,0.89864,0.299845,0.24883,0.913959,0.604885,0.632735,0.415085,0.139907,0.668407,0.58414,0.799607,0.250271,0.630501,0.652517,0.218622,0.22454,0.35192,0.970901,0.403987,0.371522,0.171995,0.903023,0.438496,0.381073,0.459365,0.289098,0.988383,0.394189,0.727186,0.438664,0.0554928,0.625826,0.738509,0.304323,0.539785,0.343394,0.937058,0.95487,0.483301,0.605466,0.53901,0.282908,0.855737,0.169511,0.935425,0.074359,0.39405,0.287345,0.0452604,0.798037,0.658867,0.217255,0.70106,0.0973632,0.598329,0.160425,0.386461,0.586712,0.554614,0.113648,0.0253759,0.610107,0.739474,0.763885,0.91443,0.279259,0.10728,0.851488,0.234129,0.590581,0.456954,0.773139,0.873489,0.312692,0.94265,0.808915,0.387051,0.3367,0.0962595,0.432311,0.134737,0.755127,0.649566,0.835797,0.85249,0.247895,0.996222,0.238951,0.834607,0.550837,0.352599,0.859982,0.160944,0.0920726,0.623868,0.0753736,0.371332,0.731147,0.926862,0.605461,0.321728,0.383816,0.3786,0.195218,0.696508,0.32125,0.00413211,0.0835587,0.65795,0.100392,0.51587,0.792687,0.855518,0.165436,0.628484,0.708008,0.413331,0.624707,0.946959,0.247937,0.175543,0.299558,0.10792,0.336487,0.39163,0.731788,0.41186,0.762962,0.462935,0.338722,0.368423,0.784663,0.722539,0.747023,0.979881,0.419047,0.0682727,0.984013,0.502606,0.726222,0.0844046,0.0184753,0.518909,0.939923,0.183911,0.147394,0.647931,0.597242,0.7721,0.59489,0.845179,0.947644,0.894447,0.953099,0.28413,0.286078,0.684887,0.695991,0.0490399,0.147822,0.0347131,0.417463,0.932485,0.757252,0.164486,0.912366,0.176299,0.232759,0.896379,0.678905,0.958981,0.980784,0.69738,0.47789,0.920707,0.881291,0.625284,0.568637,0.478533,0.397384,0.163527,0.323712,0.345028,0.0579745,0.276812,0.629158,0.344052,0.961699,0.325149,0.393092,0.109521,0.359862,0.810555,0.0420061,0.117114,0.975041,0.954372,0.293413,0.2078,0.850752,0.972317,0.166781,0.831535,0.669697,0.644671,0.752242,0.550988,0.269955,0.320879,0.0295213,0.667339,0.484406,0.353234,0.012367,0.542381,0.630046,0.641525,0.886433,0.591744,0.966674,0.279525,0.701265,0.326536,0.0900806,0.743271,0.44365,0.0651217,0.697643,0.737063,0.272921,0.548395,0.70938,0.439702,0.37993,0.379077,0.0843727,0.132172,0.930065,0.354328,0.453052,0.959587,0.0216668,0.937458,0.312821,0.0340338,0.479839,0.942866,0.675559,0.366272,0.534611,0.642233,0.645798,0.235876,0.968769,0.735878,0.979147,0.412418,0.801,0.67679,0.149481,0.0739212,0.225185,0.858861,0.513623,0.605115,0.237938,0.597996,0.737287,0.168004,0.952323,0.190339,0.12759,0.97399,0.127797,0.440411,0.00802385,0.607636,0.383277,0.683583,0.973908,0.917888,0.325816,0.619706,0.153763,0.294584,0.355584,0.13291,0.707003,0.156584,0.8097,0.856484,0.230505,0.0348848,0.715344,0.744128,0.64,0.953283,0.342123,0.377287,0.121286,0.294447,0.567626,0.248877,0.268437,0.695423,0.689288,0.27646,0.303059,0.0725648,0.960043,0.276967,0.990452,0.285859,0.896673,0.144216,0.580443,0.252257,0.277125,0.287446,0.40884,0.0868252,0.143929,0.639346,0.12171,0.859274,0.383473,0.76171,0.812557,0.725597,0.138997,0.933843,0.0200435,0.706624,0.18272,0.28848,0.402047,0.872007,0.564941,0.705106,0.944572,0.524984,0.982073,0.935025,0.810843,0.878746,0.0792402,0.391286,0.131002,0.356366,0.678732,0.539843,0.443191,0.822661,0.179188,0.564901,0.681935,0.562662,0.326611,0.494491,0.288259,0.465608,0.428334,0.308302,0.172232,0.611054,0.596782,0.574279,0.483061,0.161723,0.279385,0.427633,0.686707,0.261458,0.362658,0.497549,0.140204,0.441898,0.888835,0.271206,0.798263,0.567567,0.811048,0.241454,0.390227,0.990237,0.806355,0.0721621,0.552898,0.132966,0.566653,0.841157,0.598574,0.994987,0.149459,0.770806,0.606041,0.746241,0.345085,0.0891015,0.907964,0.62447,0.516734,0.59467,0.885928,0.879392,0.0922196,0.0261319,0.32129,0.981055,0.297338,0.119553,0.548621,0.108386,0.361007,0.938849,0.0986228,0.167362,0.0110108,0.651521,0.300327,0.577664,0.492678,0.898901,0.572651,0.642136,0.669707,0.178692,0.388377,0.0147921,0.267793,0.296341,0.639262,0.784528,0.891011,0.525191,0.66392,0.983231,0.551322,0.985209,0.964285,0.84866,0.104762,0.512907,0.957046,0.465769,0.451755,0.0556692,0.633131,0.462766,0.70719,0.933458,0.04043,0.199868,0.832359,0.613081,0.842004,0.502066,0.791773,0.230381,0.516858,0.0595659,0.526722,0.15612,0.844093,0.417733,0.681311,0.508013,0.400964,0.232633,0.493222,0.365249,0.0812936,0.597985,0.878156,0.0383401,0.0637537,0.329911,0.0940093,0.696884,0.792677,0.801199,0.630342,0.833107,0.00106708,0.462701,0.446189,0.843071,0.964767,0.237961,0.0734523,0.481625,0.297527,0.600174,0.637745,0.141621,0.0179076,0.319056,0.649634,0.418872,0.55169,0.142856,0.784121,0.632983,0.740841,0.662277,0.671323,0.804594,0.992188,0.765333,0.501479,0.784865,0.566532,0.131821,0.617973,0.567599,0.594522,0.0641615,0.41067,0.559289,0.302123,0.484123,0.0409144,0.59965,0.0842968,0.67866,0.74127,0.102204,0.997716,0.390904,0.521076,0.549406,0.53376,0.305197,0.182389,0.274601,0.967474,0.853713,0.0791949,0.959662,0.619045,0.580674,0.744527,0.185578,0.712495,0.3625,0.753177,0.307017,0.426662,0.163847,0.866306,0.728784,0.647969,0.907221,0.328434,0.732266,0.58588,0.0697044,0.834471,0.583597,0.460608,0.355547,0.133003,0.994368,0.660743,0.315392,0.268969,0.628217,0.169105,0.348164,0.587879,0.78815,0.928838,0.332406,0.973728,0.641332,0.694906,0.726905,0.948349,0.121567,0.890752,0.814655,0.850351,0.538721,0.721876,0.178785,0.270987,0.307756,0.24849,0.105458,0.891353,0.709098,0.461005,0.0243556,0.703467,0.121748,0.339748,0.972436,0.749965,0.508853,0.3206,0.337843,0.297003,0.249437,0.670249,0.270731,0.890769,0.365155,0.997636,0.839118,0.486722,0.888387,0.653773,0.337073,0.427108,0.375649,0.515859,0.698095,0.683405,0.764349,0.803553,0.574758,0.473447,0.264558,0.599114,0.176914,0.386306,0.938861,0.149349,0.13627,0.447714,0.469949,0.474114,0.744717,0.719386,0.144363,0.0154481,0.610155,0.509517,0.0130837,0.449274,0.996239,0.901471,0.103047,0.333313,0.328579,0.478697,0.849171,0.0266744,0.162102,0.61352,0.830228,0.73686,0.0869671,0.0947855,0.335974,0.263881,0.481091,0.274835,0.41323,0.617362,0.722549,0.883179,0.0914755,0.467266,0.602565,0.235838,0.482714,0.21272,0.745356,0.495798,0.661994,0.741595,0.397269,0.765042,0.0749073,0.725848,0.243738,0.924079,0.752522,0.40584,0.537599,0.58275,0.1427,0.624566,0.677536,0.478673,0.888447,0.158627,0.753508,0.301677,0.775989,0.476057,0.184855,0.867464,0.943323,0.78742,0.103302,0.426037,0.000140906,0.848658,0.921835,0.662135,0.590252,0.319104,0.427177,0.66516,0.0449518,0.670915,0.589238,0.797474,0.0767554,0.126837,0.380224,0.219455,0.751403,0.0577595,0.698129,0.63985,0.216386,0.451637,0.941526,0.992375,0.927694,0.126382,0.859839,0.871017,0.913802,0.963141,0.297055,0.913943,0.811799,0.21889,0.576078,0.402051,0.537994,0.0032552,0.0672111,0.582946,0.674171,0.656449,0.38042,0.750926,0.783287,0.760644,0.970381,0.53469,0.818403,0.66851,0.174539,0.0347897,0.120147,0.116065,0.0271645,0.0478413,0.242447,0.887003,0.918859,0.156249,0.850145,0.215914,0.0701919,0.661943,0.434804,0.64627,0.0639947,0.972798,0.649525,0.131206,0.555743,0.323696,0.787655,0.936163,0.0746218,0.570942,0.696807,0.0450031,0.105632,0.51521,0.713513,0.280171,0.55,0.83366,0.396236,0.577165,0.881501,0.638683,0.464168,0.80036,0.794932,0.314312,0.0162736,0.865124,0.976256,0.451077,0.511394,0.0402505,0.423875,0.16092,0.171456,0.979618,0.484615,0.959112,0.915781,0.559237,0.530054,0.612588,0.60424,0.635685,0.127799,0.317753,0.915856,0.677799,0.151413,0.312093,0.254963,0.0329148,0.950776,0.719131,0.833275,0.745709,0.0334437,0.849549,0.610833,0.00969943,0.300626,0.122227,0.0499499,0.7245,0.283147,0.221406,0.704119,0.767762,0.180518,0.6199,0.326999,0.710571,0.232488,0.93124,0.346257,0.360287,0.248993,0.262113,0.0380855,0.400406,0.574206,0.293049,0.433321,0.524982,0.0121801,0.266596,0.270691,0.0456237,0.116144,0.881523,0.0553232,0.41677,0.00375043,0.105273,0.141271,0.286897,0.326679,0.845389,0.0546593,0.507197,0.465289,0.381659,0.217768,0.697777,0.312898,0.564025,0.0580639,0.561891,0.826138,0.0961495,0.962297,0.400344,0.389198,0.395618,0.925326,0.401378,0.662214,0.196016,0.447002,0.778359,0.0775393,0.502325,0.195129,0.0812898,0.607598,0.336399,0.368187,0.934277,0.181789,0.422846,0.441474,0.647078,0.804505,0.659243,0.344855,0.117403,0.223267,0.402919,0.679294,0.0494052,0.499068,0.641591,0.449749,0.888266,0.0372092,0.375074,0.289645,0.699423,0.57109,0.736647,0.477782,0.64863,0.238972,0.672911,0.729919,0.84657,0.00931048,0.0981063,0.780848,0.191099,0.520952,0.222322,0.838177,0.325457,0.881565,0.183032,0.44286,0.104832,0.58595,0.122154,0.154237,0.0850184,0.763745,0.603986,0.973285,0.800954,0.97906,0.26293,0.500377,0.550151,0.999577,0.978159,0.19878,0.238549,0.65107,0.9287,0.0851192,0.660381,0.026806,0.865967,0.85148,0.547758,0.0882895,0.689657,0.873216,0.969854,0.872688,0.316076,0.0746867,0.458639,0.43823,0.228924,0.543657,0.201974,0.83291,0.516942,0.00292837,0.811971,0.779872,0.503306,0.362121,0.779448,0.481465,0.560902,0.017997,0.132536,0.489601,0.103116,0.792916,0.516407,0.969083,0.644397,0.0641659,0.0573728,0.334053,0.937382,0.0272272,0.206742,0.253457,0.101914,0.665381,0.691687,0.330838,0.209038,0.893661,0.163748,0.72598,0.89659,0.975719,0.505851,0.399896,0.33784,0.2853,0.881361,0.898742,0.303297,0.0138963,0.388344,0.406413,0.806813,0.904751,0.375496,0.451209,0.968917,0.432869,0.785263,0.906299,0.460096,0.992005,0.159756,0.56201,0.657385,0.851443,0.892848,0.866423,0.745104,0.0565964,0.592403,0.641694,0.0323155,0.0982539,0.0415898,0.370156,0.383554,0.922951,0.268898,0.68685,0.936847,0.657242,0.0932628,0.74366,0.561993,0.468759,0.194869,0.530909,0.901628,0.980132,0.437208,0.361724,0.972136,0.596964,0.923734,0.629522,0.448407,0.816582,0.495945,0.193511,0.873178,0.0883471,0.835206,0.905494,0.186601,0.876795,0.27565,0.570155,0.799746,0.544548,0.257005,0.736593,0.201789,0.350267,0.480253,0.763782,0.819026,0.675122,0.294691,0.720654,0.655254,0.731899,0.0823776,0.62739,0.328863,0.00611138,0.256912,0.77727,0.822693,0.752856,0.970782,0.695872,0.841204,0.805987,0.601365,0.0278047,0.682783,0.877015,0.597959,0.482529,0.421563,0.854964,0.219122,0.623352,0.205231,0.699374,0.387134,0.0242573,0.374496,0.681825,0.744911,0.02975,0.413724,0.827289,0.65714,0.742588,0.8334,0.914052,0.519858,0.656093,0.666909,0.490639,0.351965,0.508112,0.296627,0.95333,0.535917,0.979409,0.830345,0.133876,0.461938,0.251908,0.98884,0.68106,0.87526,0.194071,0.380434,0.262394,0.218328,0.75493,0.944219,0.963239,0.78468,0.357944,0.790528,0.441821,0.100531,0.623928,0.355873,0.620389,0.280021,0.0227811,0.111029,0.631986,0.530893,0.407655,0.585317,0.06681,0.387064,0.415662,0.200686,0.849002,0.66757,0.189526,0.530062,0.54283,0.383597,0.910496,0.805225,0.601925,0.665426,0.749444,0.565164,0.450107,0.107388,0.355692,0.891927,0.207919,0.97962,0.2478,0.828308,0.259642,0.270581,0.939337,0.891628,0.801474,0.346992,0.476945,0.868284,0.734056,0.892607,0.0689698,0.583058,0.560177,0.258495,0.11312,0.103007,0.642092,0.0236161,0.908232,0.244017,0.689042,0.657676,0.809181,0.139149,0.765063,0.164873,0.0310761,0.972982,0.144493,0.278876,0.80129,0.404135,0.549456,0.740627,0.295763,0.35093,0.0876192,0.772708,0.219214,0.821675,0.665314,0.288184,0.404734,0.225491,0.546679,0.517854,0.328499,0.188772,0.54147,0.23673,0.432788,0.230513,0.894406,0.24197,0.369662,0.659469,0.406843,0.400738,0.632451,0.551336,0.679614,0.433742,0.955471,0.22907,0.174369,0.251234,0.58,0.261988,0.0239415,0.799214,0.0836638,0.689256,0.0873985,0.488398,0.914747,0.634078,0.00625171,0.243245,0.82285,0.547722,0.479976,0.255638,0.778235,0.374382,0.497608,0.147896,0.0338514,0.90445,0.548634,0.666303,0.455786,0.228247,0.100045,0.411257,0.457317,0.274414,0.662491,0.0373178,0.536402,0.686433,0.836532,0.620066,0.375688,0.923931,0.108464,0.290435,0.558009,0.114715,0.533681,0.380858,0.662437,0.0136567,0.636496,0.440672,0.388039,0.134104,0.588568,0.42189,0.0385541,0.137202,0.0881931,0.49434,0.36545,0.188238,0.905598,0.822767,0.462652,0.568089,0.860085,0.999054,0.254522,0.696617,0.61912,0.63021,0.620548,0.727584,0.920646,0.178557,0.842299,0.454326,0.559415,0.504736,0.467983,0.195911,0.945408,0.856022,0.330015,0.533976,0.277912,0.368569,0.671179,0.366105,0.86291,0.0366281,0.554343,0.768508,0.859395,0.016995,0.336597,0.71948,0.0160491,0.591118,0.416097,0.635169,0.221329,0.0366449,0.362753,0.141974,0.215201,0.205052,0.596301,0.774616,0.709789,0.0642836,0.970528,0.655197,0.920305,0.300543,0.189173,0.198218,0.669112,0.860352,0.564323,0.532022,0.89698,0.118666,0.300529,0.756375,0.135661,0.637126,0.475855,0.15171,0.228244,0.891952,0.786879,0.449573,0.928597,0.149632,0.591547,0.143798,0.354684,0.187847,0.918415,0.0644728,0.252131,0.888942,0.71967,0.172437,0.189485,0.908843,0.370654,0.858597,0.769195,0.934977,0.390619,0.666175,0.0536429,0.691148,0.42255,0.189304,0.328274,0.898405,0.341014,0.556518,0.790357,0.127893,0.00609072,0.718954,0.277525,0.597638,0.862752,0.63221,0.785485,0.781167,0.696682,0.0376161,0.670109,0.416352,0.210053,0.859594,0.325195,0.580707,0.718192,0.0943898,0.515684,0.10881,0.760564,0.569327,0.799959,0.183114,0.758631,0.128232,0.0815187,0.0996443,0.68475,0.871875,0.227537,0.690841,0.590829,0.505063,0.288479,0.453581,0.137272,0.0739637,0.234748,0.833955,0.11158,0.904857,0.250307,0.321632,0.764452,0.575502,0.902339,0.482643,0.669891,0.418023,0.591454,0.430456,0.98735,0.391412,0.61357,0.74598,0.519645,0.695088,0.845625,0.204395,0.566964,0.0731619,0.895236,0.157793,0.578225,0.183715,0.611374,0.715497,0.257678,0.846122,0.549451,0.369258,0.750979,0.799758,0.690891,0.515431,0.375259,0.59323,0.998074,0.0451506,0.0112531,0.589528,0.475606,0.998603,0.980941,0.089176,0.744583,0.500585,0.784264,0.590208,0.70498,0.351228,0.663369,0.600217,0.509021,0.241594,0.783931,0.120395,0.957091,0.0416098,0.966517,0.506542,0.410868,0.717496,0.3063,0.101759,0.232927,0.681559,0.694989,0.231001,0.72671,0.706242,0.820529,0.202316,0.704845,0.80147,0.291492,0.449428,0.302055,0.0757563,0.0396352,0.00703587,0.426984,0.703005,0.607252,0.936005,0.944599,0.391184,0.0564002,0.90169,0.432793,0.0229168,0.408231,0.843662,0.740413,0.714531,0.94542,0.973339,0.39609,0.640409,0.20434,0.1228,0.346651,0.0248697,0.325116,0.0514957,0.82634,0.616608,0.500923,0.128395,0.692364,0.540559,0.135431,0.119348,0.243563,0.742683,0.0553537,0.188162,0.133867,0.111754,0.0898515,0.566661,0.134671,0.498083,0.410322,0.875083,0.212614,0.355742,0.848422,0.608704,0.996152,0.0527627,0.731504,0.342803,0.0776324,0.0566196,0.394298,0.903972,0.673227,0.895222,0.0323673,0.365591,0.43578,0.167798,0.484939,0.679343,0.910482,0.540293,0.867505,0.0443489,0.652047,0.957357,0.61101,0.786718,0.45544,0.0213317,0.661801,0.668054,0.377074,0.510223,0.276758,0.373226,0.562986,0.00826224,0.716028,0.640618,0.0648819,0.110327,0.54459,0.738109,0.00554823,0.576958,0.1037,0.441328,0.744756,0.58864,0.120672,0.655238,0.128933,0.988177,0.699587,0.78098,0.945534,0.310596,0.567698,0.400974,0.331928,0.229498,0.0690278,0.709002,0.739722,0.345786,0.082228,0.302708,0.354048,0.798256,0.943326,0.41893,0.908583,0.487916,0.157039,0.914131,0.0648743,0.260739,0.35546,0.80963,0.849379,0.476131,0.464868,0.978312,0.464308,0.164455,0.759292,0.409842,0.475052,0.326989,0.810816,0.80698,0.556488,0.879844,0.515982,0.296209,0.22563,0.59821,0.598917,0.579678,0.396466,0.542243,0.998608,0.305049,0.0301595,0.155648,0.21918,0.0950338,0.416387,0.57464,0.904664,0.265766,0.0507711,0.369533,0.244078,0.515079,0.533988,0.00336999,0.924922,0.00903933,0.330359,0.735738,0.816019,0.886847,0.615582,0.332001,0.183057,0.841212,0.930211,0.781974,0.42089,0.326677,0.324217,0.419498,0.631726,0.354376,0.575146,0.850906,0.44941,0.991533,0.425546,0.354074,0.2573,0.476317,0.723607,0.501378,0.991396,0.257595,0.504748,0.916318,0.266634,0.835107,0.652056,0.0826529,0.721954,0.267637,0.414654,0.905011,0.108849,0.344864,0.686985,0.529739,0.671541,0.0112014,0.949237,0.303267,0.365578,0.524383,0.154173,0.814988,0.515916,0.579719,0.169062,0.773216,0.0560362,0.892669,0.274594,0.0474327,0.150264,0.779342,0.963751,0.416898,0.614449,0.615807,0.49955,0.336403,0.883444,0.914204,0.241414,0.992293,0.259069,0.928399,0.522032,0.93061,0.9396,0.471269,0.233877,0.305178,0.995652,0.38805,0.120166,0.511568,0.967769,0.289228,0.284784,0.0238053,0.181897,0.559378,0.071238,0.33216,0.33872,0.0349888,0.749058,0.953169,0.650795,0.248608,0.289572,0.534239,0.162812,0.530986,0.526532,0.421881,0.459385,0.0485638,0.352491,0.398985,0.519833,0.586367,0.704163,0.515485,0.974417,0.824328,0.0270529,0.942186,0.113556,0.311837,0.965992,0.295453,0.871216,0.0372296,0.627613,0.209936,0.0722184,0.376671,0.163105,0.723014,0.625279,0.452677,0.257253,0.788092,0.983663,0.783785,0.209973,0.443048,0.832349,0.562463,0.842033,0.352181,0.148831,0.546196,0.867666,0.123248,0.370524,0.894719,0.0654343,0.48408,0.206556,0.0314259,0.779533,0.0777717,0.0686555,0.407146,0.287707,0.140874,0.783817,0.450812,0.863888,0.409097,0.903489,0.12114,0.197188,0.887152,0.904925,0.407161,0.3302,0.737274,0.969625,0.172233,0.0894557,0.118455,0.718428,0.957122,0.241703,0.0889523,0.85184,0.307138,0.573032,0.0583965,0.338564,0.352566,0.136168,0.407219,0.759712,0.423876,0.548093,0.543529,0.874687,0.411981,0.952626,0.778176,0.533121,0.149814,0.665328,0.438047,0.556976,0.995528,0.175321,0.5266,0.16776,0.264777,0.645056,0.886188,0.221898,0.886759,0.975141,0.0737385,0.193897,0.548173,0.132135,0.532461,0.900739,0.268303,0.93968,0.660451,0.692179,0.487773,0.20398,0.566866,0.899754,0.156606,0.345042,0.432875,0.306421,0.0103704,0.870921,0.863396,0.005898,0.046242,0.389997,0.173658,0.311018,0.0350526,0.0598466,0.532917,0.921812,0.0349873,0.606655,0.115709,0.583161,0.73879,0.64817,0.483899,0.0070933,0.58785,0.14435,0.699272,0.0756229,0.34833,0.266138,0.975377,0.504936,0.61118,0.408251,0.811357,0.621551,0.279172,0.674753,0.627449,0.325414,0.0647498,0.801107,0.636433,0.0998024,0.860954,0.16935,0.0216143,0.895941,0.776005,0.137323,0.479102,0.514795,0.785493,0.963001,0.521888,0.373343,0.107351,0.22116,0.448966,0.455682,0.487298,0.424343,0.960618,0.0984787,0.832594,0.771975,0.720029,0.111766,0.446728,0.347478,0.437181,0.511478,0.148585,0.0736137,0.61128,0.00953892,0.242963,0.632894,0.90548,0.018968,0.770218,0.384581,0.533763,0.555711,0.347582,0.0556511,0.929054,0.454934,0.276811,0.37802,0.910615,0.764109,0.802363,0.871233,0.862588,0.634956,0.643208,0.582618,0.746723,0.0899358,0.930096,0.183903,0.601414,0.0786813,0.257517,0.212694,0.0882202,0.50048,0.845588,0.9937,0.519448,0.615806,0.378281,0.0532112,0.171517,0.725864,0.108862,0.10057,0.180798,0.385674,0.47859,0.0914127,0.149783,0.280953,0.962646,0.0123713,0.915909,0.605854,0.594989,0.662632,0.69579,0.525085,0.846535,0.297203,0.603766,0.104052,0.509897,0.691986,0.604532,0.355485,0.685686,0.123981,0.97129,0.0639679,0.177192,0.142807,0.789832,0.286054,0.243377,0.970629,0.671728,0.721968,0.062042,0.821511,0.00292042,0.0246879,0.833882,0.91883,0.630542,0.428871,0.581461,0.326331,0.953956,0.427996,0.623534,0.557722,0.532048,0.133431,0.249708,0.136581,0.488916,0.935395,0.260561,0.460206,0.999363,0.437753,0.603013,0.789194,0.723807,0.84639,0.759824,0.395535,0.568358,0.821866,0.217046,0.571278,0.846554,0.050928,0.490108,0.477095,0.479799,0.0715692,0.803427,0.433755,0.499566,0.426961,0.991477,0.031614,0.560392,0.241185,0.168195,0.049308,0.17658,0.428756,0.509514,0.175943,0.866509,0.112527,0.965137,0.590317,0.958918,0.724961,0.985852,0.527276,0.546827,0.202898,0.0985539,0.393381,0.253826,0.588662,0.870476,0.733625,0.660231,0.673903,0.16738,0.159797,0.100864,0.158857,0.191411,0.661256,0.400042,0.359605,0.710564,0.576622,0.788362,0.220078,0.752565,0.654871,0.332605,0.717702,0.245188,0.291523,0.442663,0.23104,0.818799,0.98949,0.433938,0.917352,0.382871,0.687764,0.506014,0.253347,0.421388,0.166245,0.927249,0.588768,0.326042,0.0281132,0.747625,0.517453,0.689369,0.147667,0.877058,0.399933,0.724289,0.66542,0.620011,0.476853,0.320291,0.952617,0.194555,0.565479,0.24414,0.637219,0.796519,0.0629383,0.626709,0.230457,0.980291,0.00957944,0.91822,0.486305,0.262926,0.339609,0.65255,0.190176,0.928377,0.978592,0.218289,0.676001,0.496045,0.907658,0.823668,0.373103,0.307591,0.547957,0.0385222,0.927603,0.0248098,0.358813,0.880219,0.219365,0.924292,0.124359,0.856584,0.72081,0.187297,0.483293,0.951267,0.167588,0.492872,0.869487,0.653893,0.755798,0.209096,0.306443,0.945974,0.137473,0.285036,0.164263,0.813474,0.78108,0.0719208,0.637142,0.154183,0.379512,0.185099,0.192705,0.307115,0.209909,0.551518,0.187334,0.429274,0.47581,0.311693,0.285858,0.19662,0.498991,0.76915,0.147887,0.666579,0.262023,0.0173746,0.320472,0.0178208,0.226471,0.626916,0.963795,0.363943,0.911951,0.128057,0.177418,0.693031,0.199978,0.81456,0.847215,0.57949,0.999659,0.0399197,0.886605,0.209568,0.591438,0.0739393,0.638842,0.0672479,0.385633,0.9247,0.263868,0.884623,0.69385,0.411756,0.551202,0.955873,0.42913,0.871674,0.973693,0.655601,0.49859,0.937488,0.0195443,0.410541,0.0655454,0.196962,0.103573,0.265524,0.0115221,0.950787,0.845014,0.011181,0.990707,0.731619,0.220749,0.582145,0.805559,0.85959,0.649393,0.191191,0.78429,0.913261,0.0758147,0.47814,0.325017,0.627017,0.434013,0.754147,0.498692,0.407706,0.409748,0.997281,0.345194,0.429292,0.407823,0.410739,0.626254,0.511395,0.676263,0.637776,0.462182,0.521277,0.648957,0.452889,0.252896,0.869706,0.0350341,0.0584549,0.729296,0.684427,0.249646,0.513586,0.597688,0.325461,0.991726,0.922704,0.952478,0.425739,0.676851,0.451169,0.833445,0.086599,0.448451,0.178639,0.515891,0.856273,0.589378,0.142145,0.367669,0.265641,0.779921,0.829851,0.786919,0.428878,0.28274,0.039815,0.298584,0.317775,0.0982699,0.0278804,0.00220131,0.347916,0.541467,0.599889,0.673377,0.533193,0.522593,0.625855,0.958932,0.199445,0.0770241,0.792376,0.286044,0.525475,0.971015,0.801935,0.381748,0.560394,0.94408,0.749417,0.826035,0.724001,0.579268,0.612954,0.152879,0.862009,0.652769,0.451463,0.179783,0.751039,0.479344,0.181984,0.0989548,0.0208103,0.781873,0.772332,0.554003,0.304467,0.398186,0.512935,0.503912,0.47521,0.305311,0.789955,0.000685278,0.276327,0.59189,0.382434,0.83672,0.535969,0.131851,0.662755,0.25997,0.711119,0.275709,0.41285,0.573128,0.928478,0.864313,0.752911,0.679517,0.343657,0.934895,0.778471,0.364467,0.716769,0.550803,0.91847,0.0212356,0.948989,0.431405,0.525147,0.4242,0.736716,0.315102,0.424885,0.0130426,0.906992,0.807319,0.849763,0.442962,0.939169,0.512518,0.702932,0.650288,0.788227,0.115782,0.223416,0.716705,0.980094,0.976327,0.396222,0.323751,0.911222,0.174693,0.688218,0.627991,0.725496,0.606688,0.649226,0.674485,0.0380925,0.174373,0.0986844,0.774809,0.489476,0.523569,0.787851,0.396468,0.330888,0.637614,0.83943,0.270057,0.150132,0.542362,0.920345,0.938359,0.658143,0.143761,0.655064,0.638238,0.120088,0.0512856,0.961989,0.03131,0.225978,0.650207,0.659301,0.951474,0.256894,0.308527,0.625959,0.294987,0.4829,0.724643,0.0697954,0.972376,0.248213,0.857646,0.368844,0.5791,0.49526,0.208273,0.849157,0.645392,0.750635,0.769503,0.583751,0.408778,0.913264,0.238815,0.0470162,0.0333522,0.290101,0.00900493,0.0646622,0.516079,0.659212,0.723963,0.467554,0.916106,0.0324895,0.0935125,0.211093,0.51539,0.818156,0.280888,0.487765,0.0663686,0.138535,0.856609,0.645469,0.633795,0.0648821,0.494626,0.279187,0.815517,0.264129,0.862938,0.224296,0.177393,0.101754,0.271312,0.210745,0.391855,0.280317,0.275408,0.907934,0.939528,0.99937,0.375488,0.855634,0.0318597,0.469,0.0667267,0.547249,0.287156,0.347615,0.0350145,0.353525,0.486149,0.891623,0.998994,0.119944,0.956505,0.49362,0.399131,0.772023,0.757749,0.26207,0.996318,0.935143,0.363823,0.26763,0.145888,0.755678,0.547947,0.421295,0.663613,0.487475,0.420666,0.0391003,0.343109,0.452525,0.508101,0.409835,0.999775,0.795257,0.75745,0.0347891,0.148782,0.243599,0.926412,0.147776,0.363543,0.882918,0.641396,0.762675,0.654941,0.399145,0.0247443,0.651259,0.334288,0.388568,0.918889,0.480176,0.144246,0.466835,0.901471,0.807859,0.95431,0.322137,0.846959,0.297419,0.774662,0.355059,0.707254,0.774437,0.150316,0.464704,0.809226,0.299098,0.708304,0.735638,0.446874,0.0718472,0.618556,0.0882696,0.834522,0.273497,0.487415,0.859266,0.924756,0.821702,0.247834,0.843644,0.301878,0.39208,0.31048,0.203349,0.199938,0.26479,0.525486,0.0468972,0.562208,0.300148,0.401957,0.269462,0.0745847,0.552273,0.734167,0.883811,0.851371,0.44247,0.619449,0.298245,0.514318,0.238005,0.386515,0.34884,0.511502,0.873929,0.208106,0.436258,0.695632,0.455939,0.279902,0.99751,0.848019,0.590382,0.200859,0.0479575,0.855171,0.726345,0.0948547,0.417379,0.0264927,0.496811,0.686842,0.101077,0.0490843,0.421009,0.984888,0.900455,0.863479,0.604337,0.1987,0.377797,0.842342,0.585215,0.726636,0.353845,0.459144,0.934742,0.790103,0.154776,0.390682,0.0700047,0.152286,0.238701,0.660386,0.353145,0.286658,0.515558,0.0794902,0.381513,0.932937,0.105983,0.878324,0.619779,0.20706,0.927409,0.0407879,0.191948,0.827864,0.904267,0.796285,0.0265645,0.282064,0.638628,0.61178,0.00870059,0.992473,0.070924,0.943443,0.782575,0.2257,0.334124,0.85258,0.377987,0.572825,0.512966,0.731132,0.859483,0.0285238,0.810622,0.240996,0.961461,0.916605,0.11932,0.58124,0.123665,0.0467286,0.622028,0.315614,0.874593,0.526295,0.111899,0.901157,0.808359,0.750527,0.512937,0.81706,0.743,0.583861,0.760502,0.525575,0.809561,0.0946265,0.378155,0.187548,0.667451,0.891121,0.91868,0.526934,0.919645,0.729302,0.76793,0.881106,0.645907,0.88725,0.462346,0.769573,0.933979,0.0843737,0.0851866,0.808572,0.610669,0.197086,0.709729,0.419028,0.947613,0.222665,0.236087,0.690613,0.806526,0.99659,0.216188,0.616087,0.0912163,0.594342,0.803635,0.758668,0.485464,0.722314,0.285602,0.405108,0.451616,0.0535325,0.286214,0.0975237,0.940783,0.74856,0.867097,0.874762,0.832934,0.952283,0.683334,0.443603,0.149369,0.393062,0.86263,0.096982,0.615728,0.0987177,0.787595,0.422254,0.0953074,0.0037822,0.0383407,0.186524,0.598125,0.841975,0.945191,0.0835881,0.564289,0.230794,0.488697,0.0159058,0.284326,0.774911,0.11343,0.225109,0.523471,0.980526,0.0998709,0.356405,0.932809,0.783205,0.800007,0.0821784,0.176267,0.662638,0.17916,0.791995,0.761355,0.966755,0.214249,0.856663,0.970537,0.252589,0.0431864,0.568662,0.0945645,0.988378,0.65225,0.658854,0.219171,0.140946,0.67476,0.503497,0.915857,0.788189,0.728606,0.439328,0.768716,0.828477,0.795733,0.701525,0.611682,0.59574,0.783703,0.787949,0.258378,0.962864,0.579944,0.0197331,0.929618,0.794193,0.876396,0.900156,0.0467819,0.919582,0.468817,0.141346,0.90796,0.121067,0.8002,0.127131,0.262014,0.47496,0.630629,0.177871,0.26315,0.359235,0.617199,0.0318651,0.187712,0.412932,0.73339,0.799394,0.00867228,0.517093,0.587343,0.26705,0.479957,0.167287,0.286783,0.409575,0.961479,0.163179,0.309731,0.00826114,0.0827612,0.778548,0.149608,0.990721,0.899615,0.949808,0.117852,0.161628,0.424768,0.748481,0.339499,0.687918,0.107716,0.956698,0.719783,0.295428,0.36963,0.453173,0.0948215,0.378302,0.970266,0.682164,0.645353,0.450223,0.849451,0.932136,0.859797,0.81093,0.0953149,0.169528,0.819191,0.178076,0.948076,0.968799,0.168797,0.847691,0.918607,0.286649,0.00931933,0.343375,0.0351303,0.348819,0.0312932,0.142846,0.305517,0.751076,0.438274,0.675147,0.204249,0.533095,0.0534491,0.174515,0.215259,0.698802,0.624738,0.0647103,0.630938,0.484535,0.87564,0.726252,0.654063,0.694832,0.904328,0.602139,0.663631,0.0731256,0.44983,0.582238,0.359775,0.459149,0.925613,0.394905,0.807968,0.956906,0.537751,0.113484,0.707982,0.976025,0.788631,0.912231,0.50912,0.84208,0.0867464,0.724379,0.540882,0.711484,0.78909,0.171819,0.196019,0.66473,0.898072,0.850082,0.359562,0.8024,0.452221,0.0231926,0.875526,0.902051,0.60543,0.235301,0.3612,0.531043,0.630206,0.169168,0.487949,0.167958,0.282652,0.195931,0.143982,0.071283,0.108163,0.653102,0.913363,0.194909,0.377482,0.454245,0.906393,0.166572,0.626065,0.102412,0.831302,0.524136,0.952494,0.190864,0.326537,0.404715,0.214056,0.202062,0.306766,0.819487,0.437363,0.667966,0.35053,0.0675697,0.837133,0.838479,0.235527,0.119785,0.03441,0.37951,0.191068,0.142573,0.0326122,0.104432,0.337482,0.410094,0.558677,0.243875,0.576666,0.184741,0.346287,0.407968,0.708877,0.298781,0.598831,0.035414,0.703496,0.812888,0.237476,0.0102618,0.632374,0.67484,0.678228,0.982904,0.74241,0.515361,0.821383,0.977937,0.635146,0.855793,0.357447,0.826215,0.998365,0.390059,0.930646,0.335847,0.800153,0.489323,0.579722,0.376819,0.674064,0.926008,0.784786,0.382941,0.224789,0.383618,0.418355,0.928285,0.196506,0.655832,0.938546,0.82888,0.330672,0.616774,0.811784,0.0730815,0.132135,0.633167,0.0510184,0.767282,0.48896,0.408465,0.593496,0.487325,0.798524,0.524143,0.823172,0.598677,0.0134656,0.402894,0.975496,0.68753,0.328902,0.760283,0.0704711,0.553691,0.1439,0.488827,0.481975,0.340406,0.144659,0.420522,0.169286,0.47533,0.0372959,0.981071,0.548412,0.169431,0.614238,0.59943,0.936713,0.103197,0.00789549,0.530209,0.590523,0.80642,0.0543521,0.413695,0.405097,0.0678177,0.816588,0.380593,0.755347,0.14549,0.140876,0.825818,0.699181,0.284776,0.314645,0.181156,0.625183,0.459304,0.601678,0.794469,0.934634,0.638974,0.775539,0.483046,0.808405,0.389777,0.082476,0.745118,0.492974,0.0903715,0.275327,0.0834968,0.896791,0.329679,0.497191,0.301888,0.397497,0.31378,0.682482,0.152844,0.45927,0.823357,0.978663,0.158451,0.108134,0.293308,0.339607,0.733316,0.752611,0.941285,0.527785,0.687245,0.580259,0.303324,0.170291,0.388664,0.693101,0.252767,0.133782,0.186076,0.343138,0.409109,0.269572,0.23993,0.738788,0.766764,0.541818,0.136285,0.0805435,0.224299,0.289129,0.539814,0.0476569,0.267792,0.698264,0.155791,0.561099,0.0378716,0.889107,0.31371,0.979157,0.416892,0.000955501,0.559415,0.720216,0.171246,0.948079,0.413318,0.424013,0.0818607,0.599393,0.767152,0.490969,0.868965,0.00708137,0.229757,0.635729,0.548899,0.366042,0.716273,0.773199,0.655171,0.256086,0.820856,0.922962,0.954351,0.976646,0.484062,0.992222,0.865753,0.797772,0.971379,0.282645,0.798727,0.530794,0.00286168,0.969974,0.478873,0.416179,0.393987,0.560734,0.0155724,0.161139,0.0517036,0.884538,0.16822,0.281461,0.520267,0.717119,0.647503,0.23654,0.490318,0.302673,0.492626,0.311174,0.225636,0.446977,0.28782,0.709697,0.439199,0.153574,0.507469,0.410578,0.436219,0.306197,0.941372,0.439081,0.27617,0.420246,0.85526,0.670158,0.98098,0.870832,0.831296,0.0326835,0.75537,0.999516,0.314144,0.275637,0.716636,0.961647,0.512177,0.206954,0.26432,0.00480281,0.518128,0.489956,0.451779,0.805949,0.199654,0.890979,0.959522,0.707123,0.301556,0.395741,0.0133197,0.242929,0.834822,0.28949,0.663175,0.690082,0.959648,0.644154,0.560914,0.790944,0.676838,0.316285,0.79046,0.990982,0.591922,0.507096,0.952629,0.104099,0.71405,0.21695,0.108902,0.232179,0.706906,0.560681,0.038127,0.90656,0.45166,0.997649,0.613683,0.753216,0.393391,0.627002,0.996145,0.228213,0.916493,0.659319,0.918295,0.87614,0.303474,0.479209,0.667084,0.980312,0.795494,0.457545,0.971294,0.387416,0.964641,0.923923,0.491515,0.678691,0.140873,0.600417,0.91087,0.847779,0.161098,0.948997,0.754339,0.612758,0.946646,0.368022,0.365974,0.340037,0.995024,0.362119,0.568249,0.911517,0.0214382,0.486544,0.787657,0.324912,0.965753,0.454741,0.305224,0.761247,0.912286,0.276518,0.148663,0.876927,0.200441,0.640178,0.555618,0.341315,0.240594,0.466488,0.189094,0.401692,0.415485,0.943433,0.0144501,0.362131,0.311454,0.380424,0.702167,0.306478,0.742543,0.270417,0.217995,0.763981,0.75696,0.00565207,0.0888932,0.722713,0.460393,0.394117,0.48396,0.372679,0.670635,0.632623,0.249606,0.871077,0.272801,0.805224,0.212391,0.513395,0.271712,0.401485,0.915088,0.687196,0.344917,0.929538,0.0493268,0.656372,0.309962,0.751494,0.96285,0.0525047,0.0219108,0.180845,0.816486,0.778871,0.186497,0.905379,0.501585,0.646891,0.299496,0.985545,0.0195698,0.970131,0.618168,0.269176,0.841208,0.890969,0.0743995,0.0535991,0.404364,0.346111,0.455084,0.319452,0.0333072,0.800001,0.248989,0.082634,0.456373,0.558951,0.834128,0.419223,0.611456,0.856039,0.600069,0.427941,0.63491,0.786566,0.33332,0.136495,0.433457,0.632817,0.12204,0.453027,0.602948,0.740208,0.722202,0.444156,0.631176,0.796602,0.497755,0.0355402,0.142713,0.952839,0.354992,0.17602,0.75284,0.603981,0.258654,0.209213,0.162932,0.0927824,0.628437,0.774388,0.948821,0.228506,0.202329,0.583731,0.0150719,0.535649,0.720226,0.448529,0.168466,0.842266,0.901556,0.771414,0.582474,0.623758,0.21557,0.21365,0.42036,0.713325,0.24919,0.563073,0.666164,0.604182,0.739093,0.419004,0.208163,0.997748,0.628217,0.371095,0.09053,0.256654,0.145482,0.0393513,0.485159,0.347811,0.623083,0.500231,0.883461,0.343309,0.94876,0.051927,0.185575,0.850316,0.823341,0.768048,0.474074,0.0389109,0.981698,0.894435,0.752236,0.230888,0.457508,0.418399,0.83507,0.196601,0.837403,0.0432331,0.194349,0.46562,0.414328,0.284879,0.722274,0.55981,0.32423,0.207433,0.907622,0.947313,0.707665,0.791083,0.290622,0.656425,0.84301,0.476197,0.506741,0.666351,0.244245,0.980815,0.705262,0.225944,0.87525,0.457497,0.456832,0.332757,0.875897,0.291903,0.529359,0.7133,0.335136,0.723707,0.17892,0.749463,0.00858635,0.901194,0.309274,0.332817,0.108627,0.216895,0.28013,0.816292,0.00797793,0.570752,0.472717,0.850988,0.0469485,0.979458,0.517338,0.291194,0.960273,0.2226,0.517138,0.835523,0.680097,0.97397,0.16828,0.555994,0.265873,0.697639,0.269293,0.601008,0.421346,0.448213,0.350472,0.429933,0.349407,0.659745,0.762749,0.458034,0.876641,0.0428787,0.274326,0.884619,0.61363,0.747043,0.735606,0.660579,0.726501,0.252944,0.951773,0.686774,0.475544,0.46891,0.522296,0.155641,0.442881,0.690577,0.711635,0.708753,0.388215,0.980928,0.309761,0.809562,0.429141,0.660233,0.239494,0.778548,0.319979,0.00224324,0.236583,0.196619,0.0451219,0.510909,0.0812382,0.658752,0.257952,0.816844,0.319331,0.984452,0.069789,0.271104,0.671226,0.545333,0.740014,0.193523,0.700975,0.182895,0.884099,0.41261,0.891648,0.272315,0.393538,0.201409,0.0818762,0.822679,0.861643,0.32137,0.601228,0.181621,0.323614,0.83781,0.378241,0.368735,0.348719,0.459479,0.0274876,0.606671,0.276323,0.346819,0.591123,0.346112,0.617922,0.262349,0.891446,0.357936,0.455872,0.59242,0.540831,0.339971,0.00502998,0.432479,0.612286,0.398568,0.633888,0.694162,0.221247,0.495531,0.0155327,0.822475,0.677152,0.339146,0.660286,0.0553924,0.707882,0.00900449,0.514871,0.735369,0.615675,0.791195,0.0821878,0.206798,0.137307,0.70011,0.469148,0.0287527,0.0580462,0.92502,0.621173,0.598877,0.264991,0.626203,0.0313559,0.877277,0.0247709,0.665244,0.57144,0.246018,0.160774,0.586972,0.0684935,0.837926,0.926119,0.728779,0.893319,0.634,0.737783,0.40819,0.36937,0.353459,0.199385,0.451557,0.560257,0.336692,0.151667,0.0294044,0.365444,0.209714,0.954424,0.986617,0.808591,0.219415,0.61282,0.839947,0.0966927,0.637591,0.505191,0.668132,0.883609,0.665965,0.255105,0.952103,0.503891,0.181223,0.680882,0.39721,0.815224,0.418665,0.8054,0.184593,0.772124,0.00478469,0.63615,0.332381,0.341476,0.787818,0.361785,0.706921,0.997531,0.31621,0.693538,0.806122,0.535625,0.306358,0.646069,0.632318,0.943949,0.151259,0.30045,0.827559,0.817224,0.555555,0.779662,0.321116,0.736778,0.460544,0.718326,0.552002,0.879209,0.523726,0.736595,0.651333,0.52851,0.372745,0.983714,0.869987,0.160563,0.3455,0.576907,0.158094,0.661709,0.270445,0.964216,0.197334,0.576804,0.610285,0.829652,0.520753,0.761544,0.130102,0.348312,0.578769,0.685657,0.127973,0.899884,0.422435,0.588517,0.61821,0.974436,0.467726,0.141936,0.711031,0.119059,0.670446,0.0837764,0.102774,0.540433,0.244339,0.448273,0.11734,0.402434,0.109982,0.387785,0.36665,0.307316,0.964589,0.976935,0.136968,0.485342,0.738479,0.26707,0.833654,0.317248,0.952726,0.961627,0.217132,0.375161,0.550144,0.835342,0.349598,0.0178702,0.977277,0.0606288,0.13693,0.647723,0.144405,0.239703,0.188156,0.388744,0.687976,0.305496,0.791178,0.797959,0.693281,0.157828,0.105275,0.65787,0.134763,0.242243,0.143212,0.873242,0.509312,0.976865,0.19049,0.462038,0.938492,0.407622,0.8372,0.488636,0.242964,0.186797,0.506507,0.220241,0.247426,0.643436,0.867964,0.391831,0.883139,0.0561202,0.780575,0.571116,0.361616,0.571753,0.369074,0.0548973,0.729581,0.474349,0.712767,0.864344,0.716592,0.855979,0.737587,0.225904,0.832845,0.928077,0.687942,0.771337,0.335699,0.525142,0.259974,0.578662,0.711939,0.76648,0.798903,0.959365,0.409916,0.666868,0.351196,0.293056,0.722988,0.131771,0.864172,0.0846039,0.703525,0.233246,0.139501,0.433106,0.707595,0.852269,0.29745,0.424187,0.708248,0.035037,0.650091,0.541093,0.963114,0.338034,0.31243,0.298812,0.863176,0.572404,0.877474,0.575115,0.338884,0.676378,0.534479,0.7488,0.343245,0.885675,0.0418559,0.0662334,0.0174463,0.906028,0.150837,0.720971,0.139274,0.290338,0.154077,0.846869,0.142607,0.451528,0.271056,0.850855,0.486565,0.921148,0.391948,0.449678,0.259181,0.704378,0.74849,0.122357,0.276782,0.625965,0.697471,0.615666,0.302342,0.231951,0.364466,0.645588,0.117626,0.406322,0.711821,0.135072,0.312349,0.862658,0.856043,0.451623,0.152997,0.0101204,0.298492,0.295604,0.461648,0.569548,0.146459,0.948212,0.490696,0.538407,0.397891,0.749877,0.242785,0.146381,0.872234,0.519567,0.772345,0.569705,0.135232,0.0746876,0.801656,0.499698,0.720275,0.919282,0.90602,0.432097,0.0543548,0.218369,0.294755,0.910398,0.669992,0.447752,0.920519,0.968484,0.743356,0.382167,0.538032,0.889814,0.330379,0.0287283,0.428221,0.728269,0.778606,0.671006,0.87465,0.65084,0.190573,0.646995,0.220545,0.325805,0.721683,0.0222014,0.825503,0.441958,0.941484,0.731523,0.874055,0.995839,0.949893,0.16881,0.906237,0.619885,0.616562,0.826755,0.588369,0.359917,0.208922,0.126401,0.249732,0.539301,0.155129,0.677953,0.26757,0.933735,0.348959,0.142221,0.584574,0.539532,0.789216,0.80512,0.865337,0.510899,0.827321,0.690841,0.952857,0.768805,0.422364,0.826912,0.764643,0.372257,0.995722,0.67088,0.992141,0.612284,0.497636,0.58051,0.972202,0.706558,0.706911,0.221933,0.245859,0.86204,0.899887,0.513429,0.795775,0.248846,0.65565,0.38035,0.788378,0.444866,0.185469,0.653715,0.955765,0.0127901,0.344555,0.908622,0.781595,0.766919,0.735535,0.546238,0.139176,0.731257,0.217119,0.131317,0.343541,0.714755,0.711827,0.315743,0.421313,0.418738,0.537676,0.667171,0.280778,0.437563,0.180601,0.0765529,0.686409,0.83625,0.456902,0.474786,0.281116,0.642372,0.128501,0.236881,0.655162,0.473056,0.145503,0.436757,0.239975,0.881038,0.982995,0.379151,0.612295,0.200114,0.510468,0.955836,0.914869,0.222295,0.271579,0.336181,0.641032,0.809255,0.0033525,0.92181,0.246818,0.183953,0.998363,0.933226,0.0202034,0.455266,0.408013,0.301319,0.0976371,0.536513,0.5382,0.752799,0.00956949,0.683703,0.189555,0.249545,0.564741,0.17255,0.628696,0.177035,0.372664,0.139165,0.132871,0.287533,0.361459,0.40445,0.623714,0.00249198,0.213705,0.627066,0.924302,0.460523,0.81102,0.922665,0.393749,0.831223,0.377931,0.801762,0.132542,0.475568,0.338275,0.670743,0.228367,0.347844,0.354446,0.417922,0.597389,0.919187,0.590473,0.226086,0.0962219,0.963137,0.36525,0.229093,0.25067,0.72671,0.633543,0.874384,0.729202,0.847247,0.50145,0.653504,0.30777,0.31247,0.576169,0.701519,0.143693,0.9541,0.50328,0.276235,0.429668,0.841555,0.946978,0.658034,0.189399,0.301424,0.0759564,0.786789,0.220611,0.666429,0.0128742,0.316833,0.629566,0.378124,0.545926,0.880236,0.104834,0.179468,0.75462,0.834035,0.0267153,0.25607,0.487539,0.334485,0.56854,0.0637081,0.0360038,0.712233,0.0178078,0.539284,0.988468,0.447475,0.380839,0.935446,0.10551,0.570238,0.23687,0.181466,0.357027,0.457481,0.847895,0.369901,0.774314,0.477461,0.748025,0.320239,0.357697,0.852859,0.499707,0.112316,0.686895,0.526423,0.368386,0.174434,0.860908,0.936926,0.238142,0.896912,0.649159,0.25595,0.436195,0.637627,0.703425,0.817034,0.573073,0.808935,0.387273,0.809943,0.990401,0.7443,0.267424,0.838296,0.114201,0.041738,0.315757,0.862226,0.361977,0.673454,0.715086,0.861684,0.78577,0.40198,0.388107,0.154157,0.576414,0.249015,0.0910828,0.814556,0.145926,0.740242,0.0705057,0.582122,0.377869,0.773931,0.399156,0.950942,0.582866,0.786429,0.760885,0.573267,0.530729,0.0283098,0.411563,0.64493,0.0700478,0.72732,0.507156,0.432025,0.400773,0.222242,0.293709,0.186544,0.624222,0.681816,0.3407,0.200636,0.930831,0.431783,0.0151921,0.076757,0.172025,0.0856979,0.658879,0.549894,0.859629,0.0580348,0.500835,0.442494,0.844464,0.261721,0.0157611,0.375192,0.290031,0.427324,0.0201222,0.360078,0.154643,0.527278,0.792103,0.555417,0.74952,0.0858126,0.741961,0.373742,0.767629,0.082661,0.574378,0.69846,0.514444,0.58957,0.775217,0.686469,0.675268,0.434095,0.236363,0.534897,0.49213,0.737198,0.977391,0.336594,0.998919,0.993153,0.711786,0.288949,0.420476,0.731908,0.649028,0.57512,0.259187,0.441131,0.130536,0.00870671,0.526944,0.872497,0.382449,0.294572,0.955158,0.956827,0.993032,0.469602,0.546397,0.768249,0.156072,0.221665,0.202344,0.392434,0.756562,0.694475,0.129633,0.733954,0.0310684,0.128552,0.727106,0.742855,0.417501,0.147583,0.474763,0.0665289,0.722702,0.73395,0.50766,0.853239,0.742657,0.0346036,0.725736,0.125105,0.329176,0.680894,0.0819322,0.322208,0.150497,0.62833,0.0904571,0.306568,0.849995,0.292801,0.699003,0.606557,0.987276,0.828635,0.340511,0.0183444,0.957187,0.0676177,0.761199,0.374688,0.2152,0.235962,0.441217,0.937903,0.969912,0.948877,0.791141,0.712568,0.98348,0.516877,0.837674,0.312656,0.197771,0.919606,0.634865,0.348268,0.547935,0.725322,0.654836,0.39793,0.0181231,0.353839,0.00448784,0.00539908,0.182474,0.344999,0.0237434,0.139661,0.412617,0.784942,0.514349,0.627817,0.0209046,0.955565,0.56572,0.990817,0.904442,0.356861,0.703385,0.887922,0.873739,0.541059,0.200579,0.07151,0.460665,0.835443,0.419778,0.00860021,0.560765,0.0746143,0.406531,0.578888,0.428453,0.411018,0.584287,0.610927,0.756018,0.608031,0.750588,0.168634,0.392973,0.264936,0.796451,0.413878,0.220502,0.362171,0.404694,0.124944,0.719032,0.108079,0.0128662,0.592771,0.649138,0.213445,0.664281,0.109803,0.0488883,0.0840589,0.118403,0.609653,0.158673,0.524933,0.188541,0.587126,0.935952,0.772829,0.198053,0.69197,0.380859,0.948641,0.860604,0.773832,0.213577,0.657055,0.18771,0.434079,0.0192264,0.592404,0.559023,0.738259,0.700483,0.571889,0.33103,0.349621,0.785334,0.99531,0.459423,0.834222,0.0793694,0.577826,0.443876,0.238043,0.10276,0.632417,0.825169,0.0387117,0.405245,0.0232221,0.730681,0.786105,0.971863,0.591285,0.559937,0.18544,0.24834,0.747646,0.619519,0.267567,0.34005,0.178542,0.00582552,0.0405333,0.750431,0.336855,0.390154,0.535765,0.332166,0.849578,0.369987,0.411535,0.427404,0.813863,0.649578,0.530164,0.44628,0.474746,0.568875,0.851525,0.497968,0.299556,0.63763,0.469832,0.890841,0.197566,0.655272,0.139182,0.945213,0.274791,0.406749,0.285263,0.453333,0.412574,0.325796,0.203764,0.749429,0.71595,0.73953,0.0815949,0.565528,0.109517,0.49313,0.992932,0.92338,0.142707,0.523095,0.36966,0.617454,0.0919706,0.221185,0.115422,0.391527,0.858815,0.585254,0.282369,0.0563812,0.240526,0.42155,0.00159386,0.515317,0.828299,0.286857,0.96865,0.240873,0.612653,0.172415,0.990303,0.328603,0.911944,0.0718974,0.894131,0.0214615,0.565027,0.887063,0.944841,0.707735,0.410159,0.314501,0.325188,0.502129,0.535686,0.440611,0.893656,0.394501,0.0258647,0.176025,0.450882,0.26639,0.597575,0.452476,0.781707,0.425874,0.739333,0.750358,0.666747,0.351986,0.922773,0.65705,0.680589,0.834717,0.728947,0.574721,0.856179,0.293975,0.461784,0.80102,0.00170913,0.871942,0.115521,0.326898,0.374072,0.651208,0.767508,0.267728,0.0457089,0.793373,0.443752,0.496591,0.0597635,0.0413273,0.949068,0.841471,0.467201,0.688401,0.591829,0.133949,0.0403868,0.514601,0.790999,0.720976,0.349318,0.519946,0.295697,0.205497,0.81392,0.757481,0.00651685,0.815629,0.629423,0.122038,0.142527,0.00349497,0.773246,0.910035,0.271223,0.818955,0.703408,0.714975,0.315546,0.763172,0.756302,0.264614,0.604643,0.223504,0.953014,0.196471,0.357452,0.993401,0.711073,0.148451,0.714378,0.0603912,0.668396,0.0100746,0.265888,0.482317,0.767556,0.272405,0.297946,0.396979,0.394443,0.440473,0.400474,0.167689,0.350509,0.671697,0.986644,0.053917,0.386672,0.30219,0.817089,0.142974,0.566803,0.421732,0.366477,0.519818,0.618203,0.723929,0.513219,0.329276,0.87238,0.227597,0.389667,0.540777,0.237671,0.655555,0.0230934,0.00522711,0.92796,0.32104,0.402206,0.322403,0.761513,0.80268,0.490092,0.112021,0.474377,0.476736,0.165938,0.861048,0.778925,0.983027,0.00402209,0.345729,0.404759,0.370499,0.865547,0.0229622,0.0944288,0.378766,0.352238,0.966809,0.606363,0.741905,0.507585,0.844034,0.397461,0.530679,0.849261,0.325421,0.851718,0.251467,0.647824,0.613231,0.0541476,0.137916,0.725253,0.528524,0.614652,0.891191,0.389573,0.393577,0.874218,0.393595,0.739305,0.278977,0.764094,0.604852,0.301939,0.858523,0.983618,0.654178,0.825332,0.589981,0.396083,0.332917,0.434015,0.793544,0.863596,0.283276,0.118964,0.715314,0.534743,0.766788,0.328546,0.588891,0.904704,0.0537981,0.117415,0.519356,0.944989,0.506988,0.912933,0.819207,0.900583,0.652238,0.0981846,0.664677,0.25709,0.400124,0.5232,0.240708,0.0543017,0.348532,0.830689,0.450385,0.681449,0.264703,0.243928,0.545045,0.547979,0.362893,0.260359,0.0827228,0.129681,0.588905,0.671614,0.0343858,0.642703,0.789029,0.553742,0.587692,0.296017,0.466675,0.406899,0.1966,0.118913,0.505084,0.861277,0.376003,0.905208,0.384477,0.616712,0.95951,0.733008,0.4474,0.409894,0.414457,0.712104,0.653823,0.959502,0.260083,0.0167158,0.219861,0.342806,0.146397,0.808766,0.0144197,0.180783,0.451469,0.803449,0.734525,0.0391615,0.0994659,0.201199,0.446061,0.296066,0.320112,0.951145,0.157343,0.696116,0.856353,0.54182,0.312827,0.815863,0.274828,0.760228,0.225757,0.689285,0.472331,0.87958,0.648787,0.732414,0.896296,0.868649,0.0752203,0.042693,0.677415,0.08964,0.223476,0.128885,0.893089,0.958001,0.168046,0.992555,0.1592,0.614107,0.288621,0.479312,0.565252,0.445963,0.175428,0.421605,0.987783,0.488256,0.237468,0.262611,0.248483,0.463225,0.951896,0.720815,0.342805,0.600683,0.453229,0.239101,0.469332,0.528449,0.281794,0.146747,0.618089,0.50527,0.275631,0.511178,0.46327,0.443677,0.503733,0.62247,0.0577845,0.792354,0.101783,0.623036,0.238317,0.277211,0.0446414,0.2261,0.765467,0.282109,0.488711,0.0139502,0.745334,0.440606,0.734765,0.0881388,0.0412895,0.187994,0.32724,0.510621,0.716443,0.609033,0.657368,0.334533,0.114303,0.932999,0.845711,0.577574,0.376677,0.349444,0.200044,0.434462,0.141798,0.301827,0.057498,0.380115,0.579038,0.102139,0.606215,0.344505,0.384248,0.0949252,0.358455,0.129582,0.535532,0.0932201,0.217721,0.576821,0.281214,0.54496,0.0874424,0.997657,0.153994,0.74481,0.33219,0.268297,0.67781,0.177901,0.845871,0.054487,0.527345,0.0459148,0.488948,0.669143,0.347742,0.546446,0.0492575,0.92678,0.648586,0.655472,0.271285,0.0328341,0.750397,0.62974,0.162416,0.285929,0.72296,0.380137,0.86275,0.00417438,0.925098,0.950192,0.00183183,0.0790915,0.695003,0.334022,0.347389,0.372813,0.511923,0.193259,0.4273,0.0392688,0.239174,0.916248,0.708412,0.586916,0.462695,0.757669,0.513696,0.11128,0.413141,0.784981,0.144115,0.163538,0.414721,0.306531,0.449467,0.137682,0.686668,0.312217,0.141856,0.611765,0.26241,0.143688,0.690857,0.957412,0.47771,0.0382454,0.330225,0.989633,0.231505,0.757525,0.028902,0.470679,0.673773,0.737314,0.0575953,0.136468,0.494983,0.571291,0.247748,0.908124,0.356272,0.391863,0.0716627,0.770994,0.698394,0.52113,0.908675,0.385061,0.833347,0.050531,0.996827,0.0957569,0.194219,0.687683,0.0531692,0.671929,0.725929,0.383394,0.661562,0.957434,0.140919,0.690464,0.428113,0.814693,0.427778,0.485708,0.95116,0.922761,0.0569994,0.198909,0.830885,0.413272,0.590772,0.902547,0.184265,0.289165,0.423678,0.0929404,0.674227,0.257025,0.143471,0.671053,0.352782,0.33769,0.358736,0.405951,0.00961872,0.0846651,0.789345,0.671181,0.0420987,0.930265,0.361644,0.470212,0.744957,0.789422,0.95592,0.696118,0.712183,0.012919,0.895026,0.543067,0.426191,0.485798,0.445615,0.610456,0.774963,0.869292,0.703396,0.44919,0.126317,0.846868,0.120243,0.479099,0.184558,0.478979,0.88505,0.194177,0.563644,0.674395,0.865357,0.605743,0.60466,0.227002,0.0759545,0.349617,0.0164238,0.0318741,0.0457346,0.728606,0.0447931,0.940761,0.271674,0.470984,0.426559,0.717288,0.0814399,0.201522,0.586581,0.784836,0.650712,0.712898,0.631704,0.770955,0.191997,0.816262,0.249934,0.0770464,0.010439,0.813579,0.751441,0.875796,0.419322,0.356101,0.102798,0.495276,0.705718,0.119222,0.52715,0.751453,0.847828,0.571943,0.692214,0.119502,0.042927,0.118773,0.83679,0.124367,0.320295,0.423371,0.909203,0.971008,0.136269,0.540908,0.741963,0.328266,0.35717,0.991897,0.405312,0.367609,0.805475,0.156754,0.243405,0.224797,0.512855,0.346203,0.720073,0.218573,0.465425,0.247223,0.970026,0.313253,0.819166,0.662239,0.432755,0.862093,0.781012,0.269545,0.98646,0.101307,0.692916,0.895663,0.072315,0.829185,0.436571,0.814278,0.157451,0.793741,0.806174,0.562763,0.16135,0.61165,0.719517,0.404755,0.836446,0.232371,0.750959,0.556519,0.450944,0.216384,0.803742,0.42097,0.529637,0.622908,0.0832091,0.962392,0.485001,0.864221,0.231938,0.471461,0.965529,0.924854,0.367124,0.0378438,0.75404,0.803695,0.852121,0.911491,0.597436,0.658296,0.474254,0.758785,0.269945,0.193771,0.16354,0.106392,0.426142,0.914499,0.662911,0.877086,0.130883,0.466653,0.298056,0.66052,0.0895611,0.381265,0.622912,0.574562,0.245486,0.85485,0.046023,0.211015,0.779705,0.413147,0.248859,0.533744,0.216842,0.10098,0.445235,0.814278,0.759276,0.919489,0.573063,0.0292212,0.113259,0.736603,0.135613,0.539401,0.651102,0.798524,0.416488,0.781985,0.265177,0.714544,0.442505,0.354738,0.0958087,0.0654173,0.9293,0.341295,0.920268,0.975323,0.55231,0.699972,0.38847,0.801169,0.233716,0.605312,0.902149,0.678951,0.41959,0.661425,0.59844,0.992653,0.690647,0.711699,0.729256,0.826259,0.2511,0.380358,0.624784,0.667588,0.162343,0.889961,0.382131,0.604848,0.244699,0.47794,0.670265,0.173999,0.819235,0.590533,0.149322,0.371545,0.290505,0.537793,0.172714,0.524221,0.143105,0.0748639,0.203172,0.562694,0.736289,0.801612,0.555347,0.426936,0.513311,0.284603,0.253195,0.764411,0.664961,0.877979,0.431999,0.827304,0.767939,0.81413,0.432152,0.0126381,0.29207,0.102417,0.186637,0.111305,0.69295,0.335959,0.482851,0.983455,0.873752,0.655565,0.507676,0.0168568,0.730429,0.710848,0.579551,0.466718,0.51246,0.134898,0.893654,0.0257703,0.419501,0.146849,0.790181,0.0844617,0.0248281,0.22218,0.911766,0.792767,0.0363104,0.343918,0.805406,0.328381,0.446335,0.992043,0.439686,0.139285,0.328002,0.922537,0.12274,0.201754,0.578102,0.630416,0.218611,0.308531,0.341264,0.798162,0.775249,0.853723,0.93306,0.668903,0.879494,0.352561,0.815752,0.669675,0.437023,0.84058,0.891855,0.348789,0.633348,0.928166,0.692706,0.438753,0.256546,0.139041,0.430796,0.696232,0.278326,0.758798,0.618769,0.401067,0.960552,0.196871,0.0314825,0.179163,0.505402,0.372746,0.977325,0.280651,0.22647,0.910386,0.949554,0.105964,0.262947,0.765306,0.775639,0.69997,0.605886,0.667494,0.0487582,0.239234,0.59566,0.741464,0.677987,0.852206,0.880506,0.108783,0.548439,0.158832,0.867582,0.167208,0.559899,0.828134,0.364079,0.591381,0.00729746,0.869481,0.964128,0.984623,0.150131,0.190597,0.895009,0.0996847,0.296561,0.157955,0.864991,0.0722,0.857925,0.470877,0.739694,0.906683,0.710111,0.335354,0.648148,0.388098,0.187561,0.528654,0.496882,0.735999,0.687486,0.364463,0.903207,0.247384,0.192597,0.267286,0.838765,0.199895,0.136767,0.802893,0.184518,0.286898,0.99349,0.0795265,0.386583,0.290051,0.237482,0.251573,0.362251,0.0954072,0.72245,0.101946,0.00209062,0.432561,0.4373,0.650238,0.820659,0.62486,0.178892,0.317541,0.36086,0.866378,0.682004,0.264067,0.113762,0.874602,0.531354,0.952527,0.0744965,0.66812,0.75542,0.259014,0.955018,0.74891,0.338541,0.341601,0.0389612,0.576023,0.593174,0.401213,0.67143,0.315625,0.503158,0.673521,0.748185,0.940458,0.323759,0.568845,0.565318,0.502651,0.886385,0.926178,0.369029,0.568389,0.190245,0.48279,0.442991,0.721599,0.435317,0.517488,0.389719,0.190737,0.776502,0.344738,0.939647,0.115043,0.686339,0.978608,0.691066,0.279513,0.37982,0.362496,0.595138,0.882979,0.0360163,0.343323,0.823437,0.359775,0.912168,0.388755,0.862427,0.798553,0.314934,0.231455,0.366943,0.505179,0.714246,0.809934,0.226778,0.149563,0.327421,0.616497,0.3403,0.103923,0.961235,0.279946,0.218966,0.647574,0.258554,0.910031,0.927087,0.638374,0.272527,0.522225,0.521353,0.308543,0.865548,0.34479,0.668319,0.777716,0.733545,0.530745,0.576269,0.048479,0.7622,0.943212,0.553658,0.476446,0.753146,0.780436,0.626009,0.0805668,0.396933,0.966308,0.18449,0.358168,0.246254,0.403455,0.00574229,0.504808,0.313487,0.932829,0.143182,0.586013,0.455054,0.664535,0.894556,0.320603,0.0093253,0.562875,0.0983187,0.742871,0.0936202,0.674588,0.79135,0.855821,0.6178,0.345008,0.332267,0.370946,0.125444,0.958276,0.451512,0.522377,0.924584,0.636002,0.880546,0.170839,0.0394576,0.886288,0.675647,0.352944,0.819117,0.818829,0.938958,0.274172,0.483365,0.833514,0.594774,0.49269,0.396389,0.693093,0.235561,0.490009,0.367681,0.0269101,0.34583,0.985481,0.371918,0.678097,0.356427,0.497361,0.636373,0.807939,0.0197386,0.560957,0.443941,0.900284,0.731795,0.483399,0.786572,0.407442,0.836343,0.605689,0.226271,0.7753,0.879861,0.709636,0.608814,0.474635,0.202326,0.0052036,0.167728,0.437886,0.495213,0.535409,0.464796,0.841043,0.52089,0.836714,0.51914,0.877317,0.334076,0.155512,0.685256,0.353814,0.716469,0.129197,0.254098,0.448264,0.612595,0.0406702,0.855707,0.448938,0.646359,0.0819779,0.224239,0.52622,0.791614,0.833053,0.000855452,0.99394,0.838257,0.168584,0.431826,0.33347,0.703993,0.896622,0.174513,0.224883,0.733336,0.693653,0.102199,0.0674118,0.849165,0.787455,0.421226,0.565634,0.916652,0.675324,0.0138985,0.529247,0.715994,0.869605,0.978185,0.362354,0.951583,0.202424,0.888574,0.743197,0.0354774,0.88943,0.737136,0.873734,0.0580131,0.168962,0.207204,0.762006,0.0655842,0.381717,0.986888,0.79892,0.0753692,0.0890878,0.866332,0.924534,0.876543,0.287558,0.490168,0.793195,0.962882,0.504067,0.322442,0.678877,0.373672,0.300627,0.0412303,0.325254,0.503051,0.929804,0.0684511,0.538529,0.819234,0.805587,0.412263,0.877247,0.974549,0.619467,0.639253,0.0401335,0.00118375,0.626141,0.839054,0.0765529,0.715229,0.705386,0.001087,0.591772,0.992944,0.491255,0.384966,0.955827,0.995322,0.707408,0.634703,0.368993,0.00803532,0.675933,0.694248,0.511087,0.605738,0.762699,0.0496155,0.424972,0.568286,0.461879,0.302218,0.542835,0.0813456,0.941471,0.582969,0.0825294,0.567612,0.422023,0.159082,0.282841,0.127409,0.160169,0.874613,0.120353,0.651424,0.259579,0.0761797,0.646746,0.966987,0.710883,0.0157391,0.975022,0.386816,0.709987,0.486109,0.992554,0.472685,0.535725,0.417526,0.0409712,0.997603,0.719744,0.583806,0.0789488,0.661215,0.166775,0.161478,0.228827,0.588798,0.32056,0.511668,0.716206,0.48073,0.38628,0.83656,0.132154,0.645859,0.912739,0.7789,0.612846,0.623622,0.794639,0.587869,0.0104382,0.504626,0.0739777,0.00299219,0.977311,0.609702,0.420518,0.0182824,0.607305,0.140262,0.602089,0.686254,0.801476,0.768864,0.847732,0.0303033,0.357662,0.168293,0.541971,0.073868,0.649022,0.928251,0.910428,0.781177,0.574111,0.823167,0.560077,0.186957,0.446789,0.354716,0.774826,0.457227,0.859342,0.848803,0.460219,0.836653,0.458506,0.880737,0.854935,0.0658109,0.0209985,0.457024,0.752065,0.822475,0.225888,0.599797,0.852778,0.58355,0.76809,0.394749,0.657418,0.417113,0.323001,0.567845,0.198289,0.897112,0.391012,0.758366,0.0840686,0.837801,0.113082,0.858894,0.295028,0.972424,0.707698,0.755247,0.809077,0.166203,0.635984,0.664012,0.232014,0.656983,0.121036,0.984079,0.479457,0.346925,0.583876,0.332236,0.930474,0.351966,0.726985,0.587892,0.769079,0.0499859,0.155738,0.967368,0.947097,0.54675,0.725734,0.031166,0.384551,0.838816,0.89006,0.679579,0.811239,0.597758,0.434826,0.620316,0.763961,0.0708103,0.284328,0.995975,0.727793,0.405364,0.980054,0.20725,0.752289,0.56393,0.539486,0.682763,0.915896,0.266471,0.270655,0.684975,0.316457,0.426393,0.652343,0.263555,0.973143,0.378077,0.294721,0.357694,0.216893,0.184781,0.037273,0.0281321,0.782539,0.472099,0.648448,0.546499,0.542909,0.932776,0.542474,0.270702,0.33814,0.522528,0.477953,0.0904286,0.086458,0.0174388,0.773192,0.00235441,0.28391,0.0438472,0.68733,0.600367,0.47024,0.339673,0.863922,0.443384,0.71775,0.158643,0.801078,0.934643,0.343424,0.838351,0.962775,0.125962,0.31045,0.611223,0.672462,0.853359,0.543999,0.214936,0.124062,0.882139,0.737464,0.602014,0.972567,0.823922,0.619453,0.745759,0.826276,0.903363,0.789606,0.513606,0.503731,0.259847,0.853279,0.367653,0.703231,0.57103,0.526295,0.504308,0.505673,0.869719,0.342659,0.468448,0.995682,0.653109,0.0796716,0.668144,0.506468,0.623671,0.88308,0.63053,0.505809,0.620544,0.232544,0.478377,0.444466,0.851998,0.224136,0.270742,0.755361,0.0137426,0.784348,0.259091,0.273589,0.637628,0.626744,0.97682,0.208658,0.15304,0.481128,0.714331,0.022759,0.823787,0.182779,0.0184408,0.476896,0.262451,0.686585,0.983364,0.886121,0.569664,0.613894,0.391931,0.190208,0.846439,0.870307,0.634674,0.698436,0.0944435,0.905417,0.453797,0.108186,0.689765,0.712888,0.381776,0.327393,0.339633,0.358596,0.53605,0.492672,0.839724,0.250381,0.515431,0.663511,0.43316,0.533872,0.140407,0.69561,0.220457,0.123771,0.581731,0.790121,0.737666,0.973662,0.980329,0.584104,0.843969,0.615004,0.28254,0.938413,0.52042,0.736337,0.0465991,0.210185,0.449226,0.428375,0.537578,0.788859,0.78697,0.0736287,0.281531,0.626694,0.32401,0.796962,0.290205,0.75717,0.330834,0.430612,0.45278,0.551291,0.554384,0.0345115,0.341412,0.292049,0.00817349,0.321741,0.876154,0.852143,0.936745,0.158694,0.790556,0.457165,0.895031,0.837155,0.667351,0.344257,0.26553,0.204929,0.133116,0.0524998,0.278557,0.414647,0.679194,0.602567,0.211609,0.969399,0.359737,0.542443,0.400011,0.812517,0.0937341,0.954395,0.847028,0.435146,0.246444,0.855202,0.756887,0.122598,0.707345,0.693632,0.281292,0.4979,0.150798,0.176323,0.335055,0.818148,0.52058,0.600585,0.0230769,0.653696,0.653085,0.301634,0.0683426,0.332279,0.904202,0.279951,0.301678,0.263938,0.822395,0.701689,0.076455,0.916129,0.656084,0.923483,0.351275,0.902528,0.778685,0.108162,0.025126,0.48603,0.801795,0.306418,0.98393,0.952592,0.482741,0.318985,0.77074,0.0033212,0.91957,0.793817,0.657017,0.572655,0.0954514,0.72536,0.904934,0.999653,0.00531133,0.206612,0.263591,0.827706,0.908301,0.340046,0.743835,0.564385,0.263529,0.0951094,0.466913,0.0422144,0.203271,0.492039,0.528244,0.005066,0.798457,0.512174,0.957658,0.281198,0.83116,0.728398,0.284519,0.75073,0.522215,0.941536,0.323385,0.617667,0.666896,0.228319,0.61732,0.672207,0.434931,0.880911,0.499913,0.343231,0.220957,0.243748,0.907616,0.484487,0.338857,0.374529,0.526701,0.542129,0.866568,0.0549451,0.547195,0.665025,0.567119,0.504853,0.946223,0.398279,0.233251,0.230742,0.149009,0.755467,0.172278,0.472394,0.373134,0.839174,0.700713,0.990453,0.511381,0.135644,0.871364,0.0112946,0.478875,0.0923214,0.255042,0.386491,0.576808,0.5939,0.76102,0.103509,0.136029,0.627588,0.158454,0.683223,0.292613,0.725573,0.188076,0.238836,0.123852,0.421327,0.469577,0.272861,0.176794,0.641855,0.745255,0.549928,0.481029,0.445968,0.540381,0.99241,0.581611,0.411745,0.00370466,0.0604863,0.504067,0.258747,0.446977,0.0808745,0.852647,0.207997,0.184383,0.988676,0.835585,0.342838,0.671899,0.128198,0.0684108,0.859975,0.367034,0.192263,0.281302,0.836611,0.465124,0.458097,0.478466,0.210379,0.00802411,0.959495,0.656347,0.548405,0.951905,0.237958,0.96015,0.955609,0.298444,0.464217,0.214357,0.745421,0.545091,0.0670036,0.953418,0.729475,0.0556791,0.789004,0.0723122,0.727578,0.917202,0.140723,0.587553,0.284235,0.332986,0.868855,0.120846,0.79811,0.326952,0.599312,0.00848849,0.334976,0.558807,0.664835,0.883381,0.510711,0.902793,0.843531,0.466321,0.201237,0.307748,0.680678,0.946659,0.852839,0.747681,0.900077,0.582314,0.80336,0.689081,0.654626,0.530938,0.606283,0.795349,0.118491,0.890518,0.128335,0.987347,0.0113639,0.926445,0.314299,0.610676,0.934933,0.649275,0.169483,0.599768,0.532656,0.680194,0.502562] diff --git a/lcg_random.pyc b/lcg_random.pyc new file mode 100755 index 0000000..a8f354b Binary files /dev/null and b/lcg_random.pyc differ diff --git a/models/caltech_caffenet/caltech_solver.prototxt b/models/caltech_caffenet/caltech_solver.prototxt new file mode 100755 index 0000000..c7aa209 --- /dev/null +++ b/models/caltech_caffenet/caltech_solver.prototxt @@ -0,0 +1,16 @@ +# The train/test net protocol buffer definition +net: "models/caltech_caffenet/train_val_caltech.prototxt" +test_iter: 303 +test_interval: 500 +base_lr: 0.001 +regularization_type:"L2" +lr_policy: "step" +gamma: 0.1 +stepsize: 7000 #original is 3000 +display: 100 #original is 20 +max_iter: 10800 +momentum: 0.9 +weight_decay: 0.0005 +#snapshot: 5000 +snapshot_prefix: "models/caltech_caffenet/caltech_caffenet_train" +solver_mode: GPU diff --git a/models/caltech_caffenet/train_val_caltech.prototxt b/models/caltech_caffenet/train_val_caltech.prototxt new file mode 100755 index 0000000..4d98343 --- /dev/null +++ b/models/caltech_caffenet/train_val_caltech.prototxt @@ -0,0 +1,526 @@ +name: "CaffeNet" +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 227 + mean_file: "/home/data/caltech256/imagenet_mean.binaryproto" + } +# mean pixel / channel-wise mean instead of mean image +# transform_param { +# crop_size: 227 +# mean_value: 104 +# mean_value: 117 +# mean_value: 123 +# mirror: true +# } + data_param { + source: "/home/data/caltech256/caltech256_train_lmdb" + batch_size: 256 + backend: LMDB + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mirror: false + crop_size: 227 + mean_file: "/home/data/caltech256/imagenet_mean.binaryproto" + } +# mean pixel / channel-wise mean instead of mean image +# transform_param { +# crop_size: 227 +# mean_value: 104 +# mean_value: 117 +# mean_value: 123 +# mirror: true +# } + data_param { + source: "/home/data/caltech256/caltech256_adsval_lmdb" + #batch_size: 15187 # the whole val data + batch_size: 50 + backend: LMDB + } +} +layer { + name: "conv1" + type: "CConvolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 11 + stride: 4 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } + cconvolution_param { + gamma: 0.00002 + power: 1 + c_rate: 1.95 + iter_stop: 100000 + weight_mask_filler { + type: "constant" + value: 1 + } + bias_mask_filler { + type: "constant" + value: 1 + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "conv1" + top: "conv1" +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "norm1" + type: "LRN" + bottom: "pool1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv2" + type: "CConvolution" + bottom: "norm1" + top: "conv2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 2 + kernel_size: 5 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 1 + } + } + cconvolution_param { + gamma: 0.00002 + power: 1 + c_rate: 1.95 + iter_stop: 100000 + weight_mask_filler { + type: "constant" + value: 1 + } + bias_mask_filler { + type: "constant" + value: 1 + } + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "conv2" + top: "conv2" +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "norm2" + type: "LRN" + bottom: "pool2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv3" + type: "CConvolution" + bottom: "norm2" + top: "conv3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } + cconvolution_param { + gamma: 0.00002 + power: 1 + c_rate: 1.95 + iter_stop: 100000 + weight_mask_filler { + type: "constant" + value: 1 + } + bias_mask_filler { + type: "constant" + value: 1 + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "conv3" + top: "conv3" +} +layer { + name: "conv4" + type: "CConvolution" + bottom: "conv3" + top: "conv4" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 1 + } + } + cconvolution_param { + gamma: 0.00002 + power: 1 + c_rate: 1.95 + iter_stop: 100000 + weight_mask_filler { + type: "constant" + value: 1 + } + bias_mask_filler { + type: "constant" + value: 1 + } + } +} +layer { + name: "relu4" + type: "ReLU" + bottom: "conv4" + top: "conv4" +} +layer { + name: "conv5" + type: "CConvolution" + bottom: "conv4" + top: "conv5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 1 + } + } + cconvolution_param { + gamma: 0.00002 + power: 1 + c_rate: 1.95 + iter_stop: 100000 + weight_mask_filler { + type: "constant" + value: 1 + } + bias_mask_filler { + type: "constant" + value: 1 + } + } +} +layer { + name: "relu5" + type: "ReLU" + bottom: "conv5" + top: "conv5" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "fc6" + type: "CInnerProduct" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.005 + } + bias_filler { + type: "constant" + value: 1 + } + } + cinner_product_param { + gamma: 0.0002 + power: 1 + c_rate: 2.8 + iter_stop: 100000 + weight_mask_filler { + type: "constant" + value: 1 + } + bias_mask_filler { + type: "constant" + value: 1 + } + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "drop6" + type: "Dropout" + bottom: "fc6" + top: "fc6" + dropout_param { + dropout_ratio: 0.5 + #dropout_ratio: 0.0 + } +} +layer { + name: "fc7" + type: "CInnerProduct" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.005 + } + bias_filler { + type: "constant" + value: 1 + } + } + cinner_product_param { + gamma: 0.0002 + power: 1 + c_rate: 2.8 + iter_stop: 100000 + weight_mask_filler { + type: "constant" + value: 1 + } + bias_mask_filler { + type: "constant" + value: 1 + } + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + name: "drop7" + type: "Dropout" + bottom: "fc7" + top: "fc7" + dropout_param { + dropout_ratio: 0.5 + #dropout_ratio: 0.0 + } +} +layer { + #name: "new_fc8" #for imagenet pretrained model + name: "fc8*" # for trained from scratch + type: "CInnerProduct" + bottom: "fc7" + top: "fc8" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 257 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } + cinner_product_param { + gamma: 0.0002 + power: 1 + c_rate: 2.8 + iter_stop: 100000 + weight_mask_filler { + type: "constant" + value: 1 + } + bias_mask_filler { + type: "constant" + value: 1 + } + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "fc8" + bottom: "label" + top: "accuracy" + include { + phase: TEST + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "fc8" + bottom: "label" + top: "accuracy" + include { + phase: TRAIN + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "fc8" + bottom: "label" + top: "loss" +} diff --git a/models/lenet300100/compressed_lenet300100.caffemodel b/models/lenet300100/compressed_lenet300100.caffemodel new file mode 100755 index 0000000..24c4b69 Binary files /dev/null and b/models/lenet300100/compressed_lenet300100.caffemodel differ diff --git a/models/lenet300100/lenet300100_iter_10000.caffemodel b/models/lenet300100/lenet300100_iter_10000.caffemodel new file mode 100755 index 0000000..c58d2a3 Binary files /dev/null and b/models/lenet300100/lenet300100_iter_10000.caffemodel differ diff --git a/models/lenet300100/lenet_solver.prototxt b/models/lenet300100/lenet_solver.prototxt new file mode 100755 index 0000000..a34f299 --- /dev/null +++ b/models/lenet300100/lenet_solver.prototxt @@ -0,0 +1,37 @@ +# The train/test net protocol buffer definition +net: "models/lenet300100/lenet_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of MNIST, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +#------added by Guiying---- +#test_state: {stage:"original test"} +#test_state: {stage:"tune crates"} +#test_iter: 600 +#-----Guiying-------------- +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +regularization_type:"L2" #added by Guiying Li +momentum: 0.9 +weight_decay: 0.0005 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 20500 +# snapshot intermediate results +#snapshot: 1000 +#snapshot: 12000 +#snapshot: 16500 +#snapshot: 17500 +#snapshot: 19500 +#snapshot: 20500 +#snapshot: 30000 +snapshot_prefix: "models/lenet300100/lenet300100" +# solver mode: CPU or GPU +solver_mode: GPU diff --git a/models/lenet300100/lenet_train_test.prototxt b/models/lenet300100/lenet_train_test.prototxt new file mode 100755 index 0000000..3bc36e5 --- /dev/null +++ b/models/lenet300100/lenet_train_test.prototxt @@ -0,0 +1,198 @@ +name: "LeNet" +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "/home/data/mnist/mnist_train_lmdb" + batch_size: 64 + backend: LMDB + } +} +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + #stage: "original test" + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "/home/data/mnist/mnist_test_lmdb" + batch_size: 100 + backend: LMDB + } +} +#layer { +# name: "mnist" +# type: "Data" +# top: "data" +# top: "label" +# include { +# phase: TEST +# stage: "tune crates" +# } +# transform_param { +# scale: 0.00390625 +# } +# data_param { +# source: "/home/data/mnist/mnist_train_lmdb_tmp" +# batch_size: 100 +# backend: LMDB +# } +#} +layer { + name: "ip1" + type: "CInnerProduct" + bottom: "data" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 300 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } + cinner_product_param { + gamma: 0.0002 + power: 1 + c_rate: 3.7 + iter_stop: 100000 + weight_mask_filler { + type: "constant" + value: 1 + } + bias_mask_filler { + type: "constant" + value: 1 + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "CInnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 100 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } + cinner_product_param { + gamma: 0.0002 + power: 1 + c_rate: 2.8 + iter_stop: 100000 + weight_mask_filler { + type: "constant" + value: 1 + } + bias_mask_filler { + type: "constant" + value: 1 + } + } +} +layer{ + name: "relu2" + type: "ReLU" + bottom: "ip2" + top: "ip2" +} +layer{ + name: "ip3" + type: "CInnerProduct" + bottom: "ip2" + top: "ip3" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } + cinner_product_param { + gamma: 0.0002 + power: 1 + c_rate: 2.8 + iter_stop: 100000 + weight_mask_filler { + type: "constant" + value: 1 + } + bias_mask_filler { + type: "constant" + value: 1 + } + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "ip3" + bottom: "label" + top: "accuracy" + include { + phase: TRAIN + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "ip3" + bottom: "label" + top: "accuracy" + include { + phase: TEST + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "ip3" + bottom: "label" + top: "loss" +} diff --git a/models/lenet5/caffe_lenet5_original.caffemodel b/models/lenet5/caffe_lenet5_original.caffemodel new file mode 100755 index 0000000..9142ab0 Binary files /dev/null and b/models/lenet5/caffe_lenet5_original.caffemodel differ diff --git a/models/lenet5/compressed_lenet5.caffemodel b/models/lenet5/compressed_lenet5.caffemodel new file mode 100755 index 0000000..0c3999f Binary files /dev/null and b/models/lenet5/compressed_lenet5.caffemodel differ diff --git a/models/lenet5/lenet_solver.prototxt b/models/lenet5/lenet_solver.prototxt new file mode 100755 index 0000000..24fbbc0 --- /dev/null +++ b/models/lenet5/lenet_solver.prototxt @@ -0,0 +1,37 @@ +# The train/test net protocol buffer definition +net: "models/lenet5/lenet_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of MNIST, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +#------added by Guiying---- +#test_state: {stage:"original test"} +#test_state: {stage:"tune crates"} +#test_iter: 600 +#-----Guiying-------------- +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +regularization_type:"L2" #added by Guiying Li +momentum: 0.9 +weight_decay: 0.0005 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 20500 +# snapshot intermediate results +#snapshot: 1000 +#snapshot: 12000 +#snapshot: 16500 +#snapshot: 17500 +#snapshot: 19500 +#snapshot: 20500 +#snapshot: 30000 +snapshot_prefix: "models/lenet5/lenet5" +# solver mode: CPU or GPU +solver_mode: GPU diff --git a/models/lenet5/lenet_train_test.prototxt b/models/lenet5/lenet_train_test.prototxt new file mode 100755 index 0000000..472f9e5 --- /dev/null +++ b/models/lenet5/lenet_train_test.prototxt @@ -0,0 +1,253 @@ +name: "LeNet" +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "/home/data/mnist/mnist_train_lmdb" + batch_size: 64 + backend: LMDB + } +} +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + #stage: "original test" + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "/home/data/mnist/mnist_test_lmdb" + batch_size: 100 + backend: LMDB + } +} +#layer { +# name: "mnist" +# type: "Data" +# top: "data" +# top: "label" +# include { +# phase: TEST +# stage: "tune crates" +# } +# transform_param { +# scale: 0.00390625 +# } +# data_param { +# source: "/home/data/mnist/mnist_train_lmdb_tmp" +# batch_size: 100 +# backend: LMDB +# } +#} +layer { + name: "conv1" + type: "CConvolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } + cconvolution_param { + gamma: 0.00002 + power: 1 + c_rate: 1.95 + iter_stop: 100000 + weight_mask_filler { + type: "constant" + value: 1 + } + bias_mask_filler { + type: "constant" + value: 1 + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "CConvolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } + cconvolution_param { + gamma: 0.00002 + power: 1 + c_rate: 3.35 + iter_stop: 100000 + weight_mask_filler { + type: "constant" + value: 1 + } + bias_mask_filler { + type: "constant" + value: 1 + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "CInnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } + cinner_product_param { + gamma: 0.0002 + power: 1 + c_rate: 3.7 + iter_stop: 100000 + weight_mask_filler { + type: "constant" + value: 1 + } + bias_mask_filler { + type: "constant" + value: 1 + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "CInnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } + cinner_product_param { + gamma: 0.0002 + power: 1 + c_rate: 2.8 + iter_stop: 100000 + weight_mask_filler { + type: "constant" + value: 1 + } + bias_mask_filler { + type: "constant" + value: 1 + } + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "ip2" + bottom: "label" + top: "accuracy" + include { + phase: TRAIN + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "ip2" + bottom: "label" + top: "accuracy" + include { + phase: TEST + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "ip2" + bottom: "label" + top: "loss" +} diff --git a/ncs.py b/ncs.py new file mode 100755 index 0000000..9d6ab0c --- /dev/null +++ b/ncs.py @@ -0,0 +1,179 @@ +# ----------------------------- +# Written by Guiying Li +# Copyright@UBRI, 2016 +# ----------------------------- + +"""Python version of Negatively Correlated Search""" + +import numpy as np +import pdb + +class NCS: + 'This class contain the alogirhtm of NCS, and its API for invoking.' + + def __init__(self, parameters): + '''Init an instance of NCS.''' + self.init_value = parameters.init_value + self.stepsize = parameters.stepsize + self.bounds = parameters.bounds + self.ftarget = parameters.ftarget + self.popsize = parameters.popsize + self.Tmax = parameters.tmax + self.n = np.shape(parameters.init_value)[0] + self.xl = self.bounds[0]*np.ones([parameters.popsize, self.n]) + self.xu = self.bounds[1]*np.ones([parameters.popsize, self.n]) + self.best_k = parameters.best_k + self.k_min_f = np.zeros([self.best_k, 1]) + self.k_bestpop = np.zeros([self.best_k, self.n]) + #self.pop = np.random.rand(parameters.popsize, self.n)*0.1#(self.bounds[1] - self.bounds[0]) + #self.pop[0,:] = parameters.init_value + #the same init values + #self.pop = np.ones([parameters.popsize, self.n])*0.1 + if parameters.has_key('init_pop'): + self.pop = np.tile(parameters.init_pop, (parameters.popsize,1))[:parameters.popsize,:] + else: + self.pop = np.tile(self.init_value, (parameters.popsize,1)) + if parameters.reset_xl_to_pop: + self.xl = self.pop + + def set_initFitness(self, fitness, sigma=None): + arg_min = np.argmin(fitness) + self.min_f = fitness[arg_min] + self.bestpop = self.pop[arg_min, :] + if sigma==None: + #self.sigma = np.ones([self.popsize, self.n]) * ((np.array(self.bounds[1]) - np.array(self.bounds[0]))*1./self.popsize) + self.sigma = np.ones([self.popsize, self.n]) * self.stepsize + else: + self.sigma = np.tile(sigma, (self.popsize, 1)) + self.r = 0.99 + self.fit = np.array(fitness) + self.flag = np.zeros([self.popsize, 1]) + self.epoch = self.popsize + self.lambda_ = np.ones([self.popsize, 1]) + self.lambda_sigma = 0.1 + self.lambda_range = self.lambda_sigma + self.FES = self.popsize + self.Gen = 0 + # record best + self.k_min_f[0,0] = self.min_f + self.k_bestpop[0,:] = self.bestpop + + def set_lowerBound(self, lowerBound): + '''Set the lower bound for each individual, so that no extra search will happen''' + self.xl = lowerBound + + def stop(self): + '''Return the finishing state of algorithm''' + return self.FES > self.Tmax + + def result(self): + '''Return the results''' + return (self.bestpop, self.min_f, self.k_bestpop, self.k_min_f) + + def disp(self, count): + if self.Gen % count == 0: + print "%-----------------Best so far-----------------------%" + print "[{}]best fitness: {}".format(self.Gen/count, self.min_f) + print "k best records" + for i in range(self.best_k): + print "fitness of record[{}]:{}".format(i, self.k_min_f[i]) + print "%---------------------------------------------------%" + return False + else: + return False + + + def ask(self): + '''Return the next population''' + uSet = self.pop + self.sigma * np.random.randn(self.popsize, self.n) + #check the boundary + #pos = np.where((uSet < self.xl) + (uSet > self.xu)) + pos = np.where(uSet < self.xl) + uSet[pos] = self.xl[pos]+0.0001 + pos = np.where(uSet > self.xu) + uSet[pos] = self.xu[pos]-0.0001 + #while (pos[0].size > 0): + # uSet[pos] = (self.pop + self.sigma * np.random.randn(self.popsize, self.n))[pos] + # pos = np.where((uSet < self.xl) + (uSet > self.xu)) + #uSet[pos] = 2*self.xl[pos] - uSet[pos] + #bound_condition = (uSet[pos] > self.xu[pos]) + #uSet[pos] = bound_condition*self.xu[pos] + np.logical_not(bound_condition)*uSet[pos] + + #uSet[pos] = 2*self.xu[pos] - uSet[pos] + #bound_condition = (uSet[pos] < self.xl[pos]) + #uSet[pos] = bound_condition*self.xl[pos] + np.logical_not(bound_condition)*uSet[pos] + + listResult = [] + for i in range(self.popsize): + listResult.append(uSet[i,:]) + return listResult + + def tell(self, uSet, fitSet): + '''Tell the algorithm about the pair of population and fitness.''' + #record once evaluation + self.FES = self.FES + self.popsize + self.Gen = self.Gen + 1 + + uSet = np.array(uSet) + fitSet = np.array(fitSet) + + # normalize fitness values + arg_min = np.argmin(fitSet) + if fitSet[arg_min] < self.min_f: + self.min_f = fitSet[arg_min] + self.bestpop = uSet[arg_min] + #record the k best + record_tag = True + # records should be identical + for i_k in range(self.best_k): + if self.k_min_f[i_k] == self.min_f: + record_tag = False + if record_tag: + tmp_max_ind = np.argmax(self.k_min_f) + self.k_min_f[tmp_max_ind, 0] = self.min_f + self.k_bestpop[tmp_max_ind, :] = self.bestpop + + tempFit = self.fit - self.min_f + tempTrialFit = fitSet - self.min_f + normFit = tempFit / (tempFit + tempTrialFit) + normTrialFit = tempTrialFit / (tempFit + tempTrialFit) + + # calculate the BHattacharyya distance + pCorr = 1e300*np.ones([self.popsize, self.popsize]) + trialCorr = 1e300*np.ones([self.popsize, self.popsize]) + + for i in range(self.popsize): + for j in range(self.popsize): + if j != i: + # BD + m1 = self.pop[i,:] - self.pop[j,:] + c1 = (np.power(self.sigma[i,:],2) + np.power(self.sigma[j,:],2))/2. + tempD = 0 + for k in range(self.n): + tempD = tempD + np.log(c1[k]) - 0.5*(np.log(np.power(self.sigma[i,k],2)) + np.log(np.power(self.sigma[j,k],2))) + pCorr[i,j] = (1./8) * m1.dot(np.diag(1./c1)).dot(np.transpose(m1)) + 0.5*tempD + # BD + m2 = uSet[i,:] - self.pop[j,:] + trialCorr[i,j] = (1./8) * m2.dot(np.diag(1./c1)).dot(np.transpose(m2)) + 0.5*tempD + pMinCorr = pCorr.min(1) + trialMinCorr = trialCorr.min(1) + + # normalize correlation values + normCorr = pMinCorr / (pMinCorr + trialMinCorr) + normTrialCorr = trialMinCorr / (pMinCorr + trialMinCorr) + self.lambda_ = 1 + self.lambda_sigma*np.random.randn(self.popsize) + self.lambda_sigma = self.lambda_range - self.lambda_range*self.Gen/(self.Tmax*1./self.popsize) + pos = np.where(((self.lambda_ * normTrialCorr) > normTrialFit)*(fitSet < 0)) + pos = pos[0] + self.pop[pos, :] = uSet[pos, :] + self.fit[pos] = fitSet[pos] + self.flag[pos] = self.flag[pos] + 1 + # i/5 successful rule + if self.Gen % self.epoch == 0: + for i in range(self.popsize): + if self.flag[i]*1./self.epoch > 0.2: + self.sigma[i,:] = self.sigma[i,:]/self.r + elif self.flag[i]*1./self.epoch < 0.2: + self.sigma[i,:] = self.sigma[i,:]*self.r + self.flag = np.zeros([self.popsize,1]) + diff --git a/ncs.pyc b/ncs.pyc new file mode 100755 index 0000000..b93b131 Binary files /dev/null and b/ncs.pyc differ diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt new file mode 100755 index 0000000..df0401d --- /dev/null +++ b/python/CMakeLists.txt @@ -0,0 +1,34 @@ +if(NOT HAVE_PYTHON) + message(STATUS "Python interface is disabled or not all required dependecies found. Building without it...") + return() +endif() + +include_directories(${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} ${Boost_INCLUDE_DIRS}) +file(GLOB_RECURSE python_srcs ${PROJECT_SOURCE_DIR}/python/*.cpp) + +add_library(pycaffe SHARED ${python_srcs}) +target_link_libraries(pycaffe ${Caffe_LINK} ${PYTHON_LIBRARIES} ${Boost_LIBRARIES}) +set_target_properties(pycaffe PROPERTIES PREFIX "" OUTPUT_NAME "_caffe") +caffe_default_properties(pycaffe) + +if(UNIX OR APPLE) + set(__linkname "${PROJECT_SOURCE_DIR}/python/caffe/_caffe.so") + add_custom_command(TARGET pycaffe POST_BUILD + COMMAND ln -sf $ "${__linkname}" + COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_SOURCE_DIR}/python/caffe/proto + COMMAND touch ${PROJECT_SOURCE_DIR}/python/caffe/proto/__init__.py + COMMAND cp ${proto_gen_folder}/*.py ${PROJECT_SOURCE_DIR}/python/caffe/proto/ + COMMENT "Creating symlink ${__linkname} -> ${PROJECT_BINARY_DIR}/lib/_caffe${CAffe_POSTFIX}.so") +endif() + +# ---[ Install +file(GLOB files1 *.py requirements.txt) +install(FILES ${files1} DESTINATION python) + +file(GLOB files2 caffe/*.py) +install(FILES ${files2} DESTINATION python/caffe) +install(TARGETS pycaffe DESTINATION python/caffe) +install(DIRECTORY caffe/imagenet caffe/proto caffe/test DESTINATION python/caffe) + + + diff --git a/python/caffe/__init__.py b/python/caffe/__init__.py new file mode 100755 index 0000000..6cc44e7 --- /dev/null +++ b/python/caffe/__init__.py @@ -0,0 +1,7 @@ +from .pycaffe import Net, SGDSolver +from ._caffe import set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list +from .proto.caffe_pb2 import TRAIN, TEST +from .classifier import Classifier +from .detector import Detector +from . import io +from .net_spec import layers, params, NetSpec, to_proto diff --git a/python/caffe/__init__.pyc b/python/caffe/__init__.pyc new file mode 100755 index 0000000..12cab80 Binary files /dev/null and b/python/caffe/__init__.pyc differ diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp new file mode 100755 index 0000000..020a5be --- /dev/null +++ b/python/caffe/_caffe.cpp @@ -0,0 +1,312 @@ +#include // NOLINT(build/include_alpha) + +// Produce deprecation warnings (needs to come before arrayobject.h inclusion). +#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + +#include +#include +#include +#include +#include + +// these need to be included after boost on OS X +#include // NOLINT(build/include_order) +#include // NOLINT(build/include_order) +#include // NOLINT + +#include "caffe/caffe.hpp" +#include "caffe/python_layer.hpp" + +// Temporary solution for numpy < 1.7 versions: old macro, no promises. +// You're strongly advised to upgrade to >= 1.7. +#ifndef NPY_ARRAY_C_CONTIGUOUS +#define NPY_ARRAY_C_CONTIGUOUS NPY_C_CONTIGUOUS +#define PyArray_SetBaseObject(arr, x) (PyArray_BASE(arr) = (x)) +#endif + +namespace bp = boost::python; + +namespace caffe { + +// For Python, for now, we'll just always use float as the type. +typedef float Dtype; +const int NPY_DTYPE = NPY_FLOAT32; + +// Selecting mode. +void set_mode_cpu() { Caffe::set_mode(Caffe::CPU); } +void set_mode_gpu() { Caffe::set_mode(Caffe::GPU); } + +// For convenience, check that input files can be opened, and raise an +// exception that boost will send to Python if not (caffe could still crash +// later if the input files are disturbed before they are actually used, but +// this saves frustration in most cases). +static void CheckFile(const string& filename) { + std::ifstream f(filename.c_str()); + if (!f.good()) { + f.close(); + throw std::runtime_error("Could not open file " + filename); + } + f.close(); +} + +void CheckContiguousArray(PyArrayObject* arr, string name, + int channels, int height, int width) { + if (!(PyArray_FLAGS(arr) & NPY_ARRAY_C_CONTIGUOUS)) { + throw std::runtime_error(name + " must be C contiguous"); + } + if (PyArray_NDIM(arr) != 4) { + throw std::runtime_error(name + " must be 4-d"); + } + if (PyArray_TYPE(arr) != NPY_FLOAT32) { + throw std::runtime_error(name + " must be float32"); + } + if (PyArray_DIMS(arr)[1] != channels) { + throw std::runtime_error(name + " has wrong number of channels"); + } + if (PyArray_DIMS(arr)[2] != height) { + throw std::runtime_error(name + " has wrong height"); + } + if (PyArray_DIMS(arr)[3] != width) { + throw std::runtime_error(name + " has wrong width"); + } +} + +// Net constructor for passing phase as int +shared_ptr > Net_Init( + string param_file, int phase) { + CheckFile(param_file); + + shared_ptr > net(new Net(param_file, + static_cast(phase))); + return net; +} + +// Net construct-and-load convenience constructor +shared_ptr > Net_Init_Load( + string param_file, string pretrained_param_file, int phase) { + CheckFile(param_file); + CheckFile(pretrained_param_file); + + shared_ptr > net(new Net(param_file, + static_cast(phase))); + net->CopyTrainedLayersFrom(pretrained_param_file); + return net; +} + +void Net_Save(const Net& net, string filename) { + NetParameter net_param; + net.ToProto(&net_param, false); + WriteProtoToBinaryFile(net_param, filename.c_str()); +} + +void Net_SetInputArrays(Net* net, bp::object data_obj, + bp::object labels_obj) { + // check that this network has an input MemoryDataLayer + shared_ptr > md_layer = + boost::dynamic_pointer_cast >(net->layers()[0]); + if (!md_layer) { + throw std::runtime_error("set_input_arrays may only be called if the" + " first layer is a MemoryDataLayer"); + } + + // check that we were passed appropriately-sized contiguous memory + PyArrayObject* data_arr = + reinterpret_cast(data_obj.ptr()); + PyArrayObject* labels_arr = + reinterpret_cast(labels_obj.ptr()); + CheckContiguousArray(data_arr, "data array", md_layer->channels(), + md_layer->height(), md_layer->width()); + CheckContiguousArray(labels_arr, "labels array", 1, 1, 1); + if (PyArray_DIMS(data_arr)[0] != PyArray_DIMS(labels_arr)[0]) { + throw std::runtime_error("data and labels must have the same first" + " dimension"); + } + if (PyArray_DIMS(data_arr)[0] % md_layer->batch_size() != 0) { + throw std::runtime_error("first dimensions of input arrays must be a" + " multiple of batch size"); + } + + md_layer->Reset(static_cast(PyArray_DATA(data_arr)), + static_cast(PyArray_DATA(labels_arr)), + PyArray_DIMS(data_arr)[0]); +} + +Solver* GetSolverFromFile(const string& filename) { + SolverParameter param; + ReadProtoFromTextFileOrDie(filename, ¶m); + return GetSolver(param); +} + +struct NdarrayConverterGenerator { + template struct apply; +}; + +template <> +struct NdarrayConverterGenerator::apply { + struct type { + PyObject* operator() (Dtype* data) const { + // Just store the data pointer, and add the shape information in postcall. + return PyArray_SimpleNewFromData(0, NULL, NPY_DTYPE, data); + } + const PyTypeObject* get_pytype() { + return &PyArray_Type; + } + }; +}; + +struct NdarrayCallPolicies : public bp::default_call_policies { + typedef NdarrayConverterGenerator result_converter; + PyObject* postcall(PyObject* pyargs, PyObject* result) { + bp::object pyblob = bp::extract(pyargs)()[0]; + shared_ptr > blob = + bp::extract > >(pyblob); + // Free the temporary pointer-holding array, and construct a new one with + // the shape information from the blob. + void* data = PyArray_DATA(reinterpret_cast(result)); + Py_DECREF(result); + const int num_axes = blob->num_axes(); + vector dims(blob->shape().begin(), blob->shape().end()); + PyObject *arr_obj = PyArray_SimpleNewFromData(num_axes, dims.data(), + NPY_FLOAT32, data); + // SetBaseObject steals a ref, so we need to INCREF. + Py_INCREF(pyblob.ptr()); + PyArray_SetBaseObject(reinterpret_cast(arr_obj), + pyblob.ptr()); + return arr_obj; + } +}; + +bp::object Blob_Reshape(bp::tuple args, bp::dict kwargs) { + if (bp::len(kwargs) > 0) { + throw std::runtime_error("Blob.reshape takes no kwargs"); + } + Blob* self = bp::extract*>(args[0]); + vector shape(bp::len(args) - 1); + for (int i = 1; i < bp::len(args); ++i) { + shape[i - 1] = bp::extract(args[i]); + } + self->Reshape(shape); + // We need to explicitly return None to use bp::raw_function. + return bp::object(); +} + +BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(SolveOverloads, Solve, 0, 1); + +BOOST_PYTHON_MODULE(_caffe) { + // below, we prepend an underscore to methods that will be replaced + // in Python + // Caffe utility functions + bp::def("set_mode_cpu", &set_mode_cpu); + bp::def("set_mode_gpu", &set_mode_gpu); + bp::def("set_device", &Caffe::SetDevice); + + bp::def("layer_type_list", &LayerRegistry::LayerTypeList); + + bp::class_, shared_ptr >, boost::noncopyable >("Net", + bp::no_init) + .def("__init__", bp::make_constructor(&Net_Init)) + .def("__init__", bp::make_constructor(&Net_Init_Load)) + .def("_forward", &Net::ForwardFromTo) + .def("_backward", &Net::BackwardFromTo) + .def("reshape", &Net::Reshape) + // The cast is to select a particular overload. + .def("copy_from", static_cast::*)(const string)>( + &Net::CopyTrainedLayersFrom)) + .def("share_with", &Net::ShareTrainedLayersWith) + .add_property("_blob_loss_weights", bp::make_function( + &Net::blob_loss_weights, bp::return_internal_reference<>())) + .add_property("_blobs", bp::make_function(&Net::blobs, + bp::return_internal_reference<>())) + .add_property("layers", bp::make_function(&Net::layers, + bp::return_internal_reference<>())) + .add_property("_blob_names", bp::make_function(&Net::blob_names, + bp::return_value_policy())) + .add_property("_layer_names", bp::make_function(&Net::layer_names, + bp::return_value_policy())) + .add_property("_inputs", bp::make_function(&Net::input_blob_indices, + bp::return_value_policy())) + .add_property("_outputs", + bp::make_function(&Net::output_blob_indices, + bp::return_value_policy())) + .def("_set_input_arrays", &Net_SetInputArrays, + bp::with_custodian_and_ward<1, 2, bp::with_custodian_and_ward<1, 3> >()) + .def("save", &Net_Save); + + bp::class_, shared_ptr >, boost::noncopyable>( + "Blob", bp::no_init) + .add_property("shape", + bp::make_function( + static_cast& (Blob::*)() const>( + &Blob::shape), + bp::return_value_policy())) + .add_property("num", &Blob::num) + .add_property("channels", &Blob::channels) + .add_property("height", &Blob::height) + .add_property("width", &Blob::width) + .add_property("count", static_cast::*)() const>( + &Blob::count)) + .def("reshape", bp::raw_function(&Blob_Reshape)) + .add_property("data", bp::make_function(&Blob::mutable_cpu_data, + NdarrayCallPolicies())) + .add_property("diff", bp::make_function(&Blob::mutable_cpu_diff, + NdarrayCallPolicies())); + + bp::class_, shared_ptr >, + boost::noncopyable>("Layer", bp::init()) + .add_property("blobs", bp::make_function(&Layer::blobs, + bp::return_internal_reference<>())) + .def("setup", &Layer::LayerSetUp) + .def("reshape", &Layer::Reshape) + .add_property("type", bp::make_function(&Layer::type)); + bp::register_ptr_to_python > >(); + + bp::class_("LayerParameter", bp::no_init); + + bp::class_, shared_ptr >, boost::noncopyable>( + "Solver", bp::no_init) + .add_property("net", &Solver::net) + .add_property("test_nets", bp::make_function(&Solver::test_nets, + bp::return_internal_reference<>())) + .add_property("iter", &Solver::iter) + .def("solve", static_cast::*)(const char*)>( + &Solver::Solve), SolveOverloads()) + .def("step", &Solver::Step) + .def("restore", &Solver::Restore); + + bp::class_, bp::bases >, + shared_ptr >, boost::noncopyable>( + "SGDSolver", bp::init()); + bp::class_, bp::bases >, + shared_ptr >, boost::noncopyable>( + "NesterovSolver", bp::init()); + bp::class_, bp::bases >, + shared_ptr >, boost::noncopyable>( + "AdaGradSolver", bp::init()); + + bp::def("get_solver", &GetSolverFromFile, + bp::return_value_policy()); + + // vector wrappers for all the vector types we use + bp::class_ > > >("BlobVec") + .def(bp::vector_indexing_suite > >, true>()); + bp::class_*> >("RawBlobVec") + .def(bp::vector_indexing_suite*>, true>()); + bp::class_ > > >("LayerVec") + .def(bp::vector_indexing_suite > >, true>()); + bp::class_ >("StringVec") + .def(bp::vector_indexing_suite >()); + bp::class_ >("IntVec") + .def(bp::vector_indexing_suite >()); + bp::class_ >("DtypeVec") + .def(bp::vector_indexing_suite >()); + bp::class_ > > >("NetVec") + .def(bp::vector_indexing_suite > >, true>()); + bp::class_ >("BoolVec") + .def(bp::vector_indexing_suite >()); + + // boost python expects a void (missing) return value, while import_array + // returns NULL for python3. import_array1() forces a void return value. + import_array1(); +} + +} // namespace caffe diff --git a/python/caffe/classifier.py b/python/caffe/classifier.py new file mode 100755 index 0000000..537193d --- /dev/null +++ b/python/caffe/classifier.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python +""" +Classifier is an image classifier specialization of Net. +""" + +import numpy as np + +import caffe + + +class Classifier(caffe.Net): + """ + Classifier extends Net for image class prediction + by scaling, center cropping, or oversampling. + + Parameters + ---------- + image_dims : dimensions to scale input for cropping/sampling. + Default is to scale to net input size for whole-image crop. + mean, input_scale, raw_scale, channel_swap: params for + preprocessing options. + """ + def __init__(self, model_file, pretrained_file, image_dims=None, + mean=None, input_scale=None, raw_scale=None, + channel_swap=None): + caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST) + + # configure pre-processing + in_ = self.inputs[0] + self.transformer = caffe.io.Transformer( + {in_: self.blobs[in_].data.shape}) + self.transformer.set_transpose(in_, (2, 0, 1)) + if mean is not None: + self.transformer.set_mean(in_, mean) + if input_scale is not None: + self.transformer.set_input_scale(in_, input_scale) + if raw_scale is not None: + self.transformer.set_raw_scale(in_, raw_scale) + if channel_swap is not None: + self.transformer.set_channel_swap(in_, channel_swap) + + self.crop_dims = np.array(self.blobs[in_].data.shape[2:]) + if not image_dims: + image_dims = self.crop_dims + self.image_dims = image_dims + + def predict(self, inputs, oversample=True): + """ + Predict classification probabilities of inputs. + + Parameters + ---------- + inputs : iterable of (H x W x K) input ndarrays. + oversample : boolean + average predictions across center, corners, and mirrors + when True (default). Center-only prediction when False. + + Returns + ------- + predictions: (N x C) ndarray of class probabilities for N images and C + classes. + """ + # Scale to standardize input dimensions. + input_ = np.zeros((len(inputs), + self.image_dims[0], + self.image_dims[1], + inputs[0].shape[2]), + dtype=np.float32) + for ix, in_ in enumerate(inputs): + input_[ix] = caffe.io.resize_image(in_, self.image_dims) + + if oversample: + # Generate center, corner, and mirrored crops. + input_ = caffe.io.oversample(input_, self.crop_dims) + else: + # Take center crop. + center = np.array(self.image_dims) / 2.0 + crop = np.tile(center, (1, 2))[0] + np.concatenate([ + -self.crop_dims / 2.0, + self.crop_dims / 2.0 + ]) + input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :] + + # Classify + caffe_in = np.zeros(np.array(input_.shape)[[0, 3, 1, 2]], + dtype=np.float32) + for ix, in_ in enumerate(input_): + caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_) + out = self.forward_all(**{self.inputs[0]: caffe_in}) + predictions = out[self.outputs[0]] + + # For oversampling, average predictions across crops. + if oversample: + predictions = predictions.reshape((len(predictions) / 10, 10, -1)) + predictions = predictions.mean(1) + + return predictions diff --git a/python/caffe/classifier.pyc b/python/caffe/classifier.pyc new file mode 100755 index 0000000..26b3123 Binary files /dev/null and b/python/caffe/classifier.pyc differ diff --git a/python/caffe/detector.py b/python/caffe/detector.py new file mode 100755 index 0000000..75cd3b1 --- /dev/null +++ b/python/caffe/detector.py @@ -0,0 +1,216 @@ +#!/usr/bin/env python +""" +Do windowed detection by classifying a number of images/crops at once, +optionally using the selective search window proposal method. + +This implementation follows ideas in + Ross Girshick, Jeff Donahue, Trevor Darrell, Jitendra Malik. + Rich feature hierarchies for accurate object detection and semantic + segmentation. + http://arxiv.org/abs/1311.2524 + +The selective_search_ijcv_with_python code required for the selective search +proposal mode is available at + https://github.com/sergeyk/selective_search_ijcv_with_python +""" +import numpy as np +import os + +import caffe + + +class Detector(caffe.Net): + """ + Detector extends Net for windowed detection by a list of crops or + selective search proposals. + + Parameters + ---------- + mean, input_scale, raw_scale, channel_swap : params for preprocessing + options. + context_pad : amount of surrounding context to take s.t. a `context_pad` + sized border of pixels in the network input image is context, as in + R-CNN feature extraction. + """ + def __init__(self, model_file, pretrained_file, mean=None, + input_scale=None, raw_scale=None, channel_swap=None, + context_pad=None): + caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST) + + # configure pre-processing + in_ = self.inputs[0] + self.transformer = caffe.io.Transformer( + {in_: self.blobs[in_].data.shape}) + self.transformer.set_transpose(in_, (2, 0, 1)) + if mean is not None: + self.transformer.set_mean(in_, mean) + if input_scale is not None: + self.transformer.set_input_scale(in_, input_scale) + if raw_scale is not None: + self.transformer.set_raw_scale(in_, raw_scale) + if channel_swap is not None: + self.transformer.set_channel_swap(in_, channel_swap) + + self.configure_crop(context_pad) + + def detect_windows(self, images_windows): + """ + Do windowed detection over given images and windows. Windows are + extracted then warped to the input dimensions of the net. + + Parameters + ---------- + images_windows: (image filename, window list) iterable. + context_crop: size of context border to crop in pixels. + + Returns + ------- + detections: list of {filename: image filename, window: crop coordinates, + predictions: prediction vector} dicts. + """ + # Extract windows. + window_inputs = [] + for image_fname, windows in images_windows: + image = caffe.io.load_image(image_fname).astype(np.float32) + for window in windows: + window_inputs.append(self.crop(image, window)) + + # Run through the net (warping windows to input dimensions). + in_ = self.inputs[0] + caffe_in = np.zeros((len(window_inputs), window_inputs[0].shape[2]) + + self.blobs[in_].data.shape[2:], + dtype=np.float32) + for ix, window_in in enumerate(window_inputs): + caffe_in[ix] = self.transformer.preprocess(in_, window_in) + out = self.forward_all(**{in_: caffe_in}) + predictions = out[self.outputs[0]].squeeze(axis=(2, 3)) + + # Package predictions with images and windows. + detections = [] + ix = 0 + for image_fname, windows in images_windows: + for window in windows: + detections.append({ + 'window': window, + 'prediction': predictions[ix], + 'filename': image_fname + }) + ix += 1 + return detections + + def detect_selective_search(self, image_fnames): + """ + Do windowed detection over Selective Search proposals by extracting + the crop and warping to the input dimensions of the net. + + Parameters + ---------- + image_fnames: list + + Returns + ------- + detections: list of {filename: image filename, window: crop coordinates, + predictions: prediction vector} dicts. + """ + import selective_search_ijcv_with_python as selective_search + # Make absolute paths so MATLAB can find the files. + image_fnames = [os.path.abspath(f) for f in image_fnames] + windows_list = selective_search.get_windows( + image_fnames, + cmd='selective_search_rcnn' + ) + # Run windowed detection on the selective search list. + return self.detect_windows(zip(image_fnames, windows_list)) + + def crop(self, im, window): + """ + Crop a window from the image for detection. Include surrounding context + according to the `context_pad` configuration. + + Parameters + ---------- + im: H x W x K image ndarray to crop. + window: bounding box coordinates as ymin, xmin, ymax, xmax. + + Returns + ------- + crop: cropped window. + """ + # Crop window from the image. + crop = im[window[0]:window[2], window[1]:window[3]] + + if self.context_pad: + box = window.copy() + crop_size = self.blobs[self.inputs[0]].width # assumes square + scale = crop_size / (1. * crop_size - self.context_pad * 2) + # Crop a box + surrounding context. + half_h = (box[2] - box[0] + 1) / 2. + half_w = (box[3] - box[1] + 1) / 2. + center = (box[0] + half_h, box[1] + half_w) + scaled_dims = scale * np.array((-half_h, -half_w, half_h, half_w)) + box = np.round(np.tile(center, 2) + scaled_dims) + full_h = box[2] - box[0] + 1 + full_w = box[3] - box[1] + 1 + scale_h = crop_size / full_h + scale_w = crop_size / full_w + pad_y = round(max(0, -box[0]) * scale_h) # amount out-of-bounds + pad_x = round(max(0, -box[1]) * scale_w) + + # Clip box to image dimensions. + im_h, im_w = im.shape[:2] + box = np.clip(box, 0., [im_h, im_w, im_h, im_w]) + clip_h = box[2] - box[0] + 1 + clip_w = box[3] - box[1] + 1 + assert(clip_h > 0 and clip_w > 0) + crop_h = round(clip_h * scale_h) + crop_w = round(clip_w * scale_w) + if pad_y + crop_h > crop_size: + crop_h = crop_size - pad_y + if pad_x + crop_w > crop_size: + crop_w = crop_size - pad_x + + # collect with context padding and place in input + # with mean padding + context_crop = im[box[0]:box[2], box[1]:box[3]] + context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w)) + crop = np.ones(self.crop_dims, dtype=np.float32) * self.crop_mean + crop[pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w)] = context_crop + + return crop + + def configure_crop(self, context_pad): + """ + Configure crop dimensions and amount of context for cropping. + If context is included, make the special input mean for context padding. + + Parameters + ---------- + context_pad : amount of context for cropping. + """ + # crop dimensions + in_ = self.inputs[0] + tpose = self.transformer.transpose[in_] + inv_tpose = [tpose[t] for t in tpose] + self.crop_dims = np.array(self.blobs[in_].data.shape[1:])[inv_tpose] + #.transpose(inv_tpose) + # context padding + self.context_pad = context_pad + if self.context_pad: + in_ = self.inputs[0] + transpose = self.transformer.transpose.get(in_) + channel_order = self.transformer.channel_swap.get(in_) + raw_scale = self.transformer.raw_scale.get(in_) + # Padding context crops needs the mean in unprocessed input space. + mean = self.transformer.mean.get(in_) + if mean is not None: + inv_transpose = [transpose[t] for t in transpose] + crop_mean = mean.copy().transpose(inv_transpose) + if channel_order is not None: + channel_order_inverse = [channel_order.index(i) + for i in range(crop_mean.shape[2])] + crop_mean = crop_mean[:, :, channel_order_inverse] + if raw_scale is not None: + crop_mean /= raw_scale + self.crop_mean = crop_mean + else: + self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32) diff --git a/python/caffe/detector.pyc b/python/caffe/detector.pyc new file mode 100755 index 0000000..eed6c8f Binary files /dev/null and b/python/caffe/detector.pyc differ diff --git a/python/caffe/draw.py b/python/caffe/draw.py new file mode 100755 index 0000000..324929d --- /dev/null +++ b/python/caffe/draw.py @@ -0,0 +1,213 @@ +""" +Caffe network visualization: draw the NetParameter protobuffer. + + +.. note:: + + This requires pydot>=1.0.2, which is not included in requirements.txt since + it requires graphviz and other prerequisites outside the scope of the + Caffe. +""" + +from caffe.proto import caffe_pb2 +import pydot + +# Internal layer and blob styles. +LAYER_STYLE_DEFAULT = {'shape': 'record', + 'fillcolor': '#6495ED', + 'style': 'filled'} +NEURON_LAYER_STYLE = {'shape': 'record', + 'fillcolor': '#90EE90', + 'style': 'filled'} +BLOB_STYLE = {'shape': 'octagon', + 'fillcolor': '#E0E0E0', + 'style': 'filled'} + + +def get_pooling_types_dict(): + """Get dictionary mapping pooling type number to type name + """ + desc = caffe_pb2.PoolingParameter.PoolMethod.DESCRIPTOR + d = {} + for k, v in desc.values_by_name.items(): + d[v.number] = k + return d + + +def get_edge_label(layer): + """Define edge label based on layer type. + """ + + if layer.type == 'Data': + edge_label = 'Batch ' + str(layer.data_param.batch_size) + elif layer.type == 'Convolution': + edge_label = str(layer.convolution_param.num_output) + elif layer.type == 'InnerProduct': + edge_label = str(layer.inner_product_param.num_output) + else: + edge_label = '""' + + return edge_label + + +def get_layer_label(layer, rankdir): + """Define node label based on layer type. + + Parameters + ---------- + layer : ? + rankdir : {'LR', 'TB', 'BT'} + Direction of graph layout. + + Returns + ------- + string : + A label for the current layer + """ + + if rankdir in ('TB', 'BT'): + # If graph orientation is vertical, horizontal space is free and + # vertical space is not; separate words with spaces + separator = ' ' + else: + # If graph orientation is horizontal, vertical space is free and + # horizontal space is not; separate words with newlines + separator = '\\n' + + if layer.type == 'Convolution': + # Outer double quotes needed or else colon characters don't parse + # properly + node_label = '"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d"' %\ + (layer.name, + separator, + layer.type, + separator, + layer.convolution_param.kernel_size, + separator, + layer.convolution_param.stride, + separator, + layer.convolution_param.pad) + elif layer.type == 'Pooling': + pooling_types_dict = get_pooling_types_dict() + node_label = '"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' %\ + (layer.name, + separator, + pooling_types_dict[layer.pooling_param.pool], + layer.type, + separator, + layer.pooling_param.kernel_size, + separator, + layer.pooling_param.stride, + separator, + layer.pooling_param.pad) + else: + node_label = '"%s%s(%s)"' % (layer.name, separator, layer.type) + return node_label + + +def choose_color_by_layertype(layertype): + """Define colors for nodes based on the layer type. + """ + color = '#6495ED' # Default + if layertype == 'Convolution': + color = '#FF5050' + elif layertype == 'Pooling': + color = '#FF9900' + elif layertype == 'InnerProduct': + color = '#CC33FF' + return color + + +def get_pydot_graph(caffe_net, rankdir, label_edges=True): + """Create a data structure which represents the `caffe_net`. + + Parameters + ---------- + caffe_net : object + rankdir : {'LR', 'TB', 'BT'} + Direction of graph layout. + label_edges : boolean, optional + Label the edges (default is True). + + Returns + ------- + pydot graph object + """ + pydot_graph = pydot.Dot(caffe_net.name, + graph_type='digraph', + rankdir=rankdir) + pydot_nodes = {} + pydot_edges = [] + for layer in caffe_net.layer: + node_label = get_layer_label(layer, rankdir) + node_name = "%s_%s" % (layer.name, layer.type) + if (len(layer.bottom) == 1 and len(layer.top) == 1 and + layer.bottom[0] == layer.top[0]): + # We have an in-place neuron layer. + pydot_nodes[node_name] = pydot.Node(node_label, + **NEURON_LAYER_STYLE) + else: + layer_style = LAYER_STYLE_DEFAULT + layer_style['fillcolor'] = choose_color_by_layertype(layer.type) + pydot_nodes[node_name] = pydot.Node(node_label, **layer_style) + for bottom_blob in layer.bottom: + pydot_nodes[bottom_blob + '_blob'] = pydot.Node('%s' % bottom_blob, + **BLOB_STYLE) + edge_label = '""' + pydot_edges.append({'src': bottom_blob + '_blob', + 'dst': node_name, + 'label': edge_label}) + for top_blob in layer.top: + pydot_nodes[top_blob + '_blob'] = pydot.Node('%s' % (top_blob)) + if label_edges: + edge_label = get_edge_label(layer) + else: + edge_label = '""' + pydot_edges.append({'src': node_name, + 'dst': top_blob + '_blob', + 'label': edge_label}) + # Now, add the nodes and edges to the graph. + for node in pydot_nodes.values(): + pydot_graph.add_node(node) + for edge in pydot_edges: + pydot_graph.add_edge( + pydot.Edge(pydot_nodes[edge['src']], + pydot_nodes[edge['dst']], + label=edge['label'])) + return pydot_graph + + +def draw_net(caffe_net, rankdir, ext='png'): + """Draws a caffe net and returns the image string encoded using the given + extension. + + Parameters + ---------- + caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer. + ext : string, optional + The image extension (the default is 'png'). + + Returns + ------- + string : + Postscript representation of the graph. + """ + return get_pydot_graph(caffe_net, rankdir).create(format=ext) + + +def draw_net_to_file(caffe_net, filename, rankdir='LR'): + """Draws a caffe net, and saves it to file using the format given as the + file extension. Use '.raw' to output raw text that you can manually feed + to graphviz to draw graphs. + + Parameters + ---------- + caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer. + filename : string + The path to a file where the networks visualization will be stored. + rankdir : {'LR', 'TB', 'BT'} + Direction of graph layout. + """ + ext = filename[filename.rfind('.')+1:] + with open(filename, 'wb') as fid: + fid.write(draw_net(caffe_net, rankdir, ext)) diff --git a/python/caffe/imagenet/ilsvrc_2012_mean.npy b/python/caffe/imagenet/ilsvrc_2012_mean.npy new file mode 100755 index 0000000..666082c Binary files /dev/null and b/python/caffe/imagenet/ilsvrc_2012_mean.npy differ diff --git a/python/caffe/io.py b/python/caffe/io.py new file mode 100755 index 0000000..fc96266 --- /dev/null +++ b/python/caffe/io.py @@ -0,0 +1,379 @@ +import numpy as np +import skimage.io +from scipy.ndimage import zoom +from skimage.transform import resize + +try: + # Python3 will most likely not be able to load protobuf + from caffe.proto import caffe_pb2 +except: + import sys + if sys.version_info >= (3, 0): + print("Failed to include caffe_pb2, things might go wrong!") + else: + raise + + +## proto / datum / ndarray conversion +def blobproto_to_array(blob, return_diff=False): + """ + Convert a blob proto to an array. In default, we will just return the data, + unless return_diff is True, in which case we will return the diff. + """ + if return_diff: + return np.array(blob.diff).reshape( + blob.num, blob.channels, blob.height, blob.width) + else: + return np.array(blob.data).reshape( + blob.num, blob.channels, blob.height, blob.width) + + +def array_to_blobproto(arr, diff=None): + """Converts a 4-dimensional array to blob proto. If diff is given, also + convert the diff. You need to make sure that arr and diff have the same + shape, and this function does not do sanity check. + """ + if arr.ndim != 4: + raise ValueError('Incorrect array shape.') + blob = caffe_pb2.BlobProto() + blob.num, blob.channels, blob.height, blob.width = arr.shape + blob.data.extend(arr.astype(float).flat) + if diff is not None: + blob.diff.extend(diff.astype(float).flat) + return blob + + +def arraylist_to_blobprotovecor_str(arraylist): + """Converts a list of arrays to a serialized blobprotovec, which could be + then passed to a network for processing. + """ + vec = caffe_pb2.BlobProtoVector() + vec.blobs.extend([array_to_blobproto(arr) for arr in arraylist]) + return vec.SerializeToString() + + +def blobprotovector_str_to_arraylist(str): + """Converts a serialized blobprotovec to a list of arrays. + """ + vec = caffe_pb2.BlobProtoVector() + vec.ParseFromString(str) + return [blobproto_to_array(blob) for blob in vec.blobs] + + +def array_to_datum(arr, label=0): + """Converts a 3-dimensional array to datum. If the array has dtype uint8, + the output data will be encoded as a string. Otherwise, the output data + will be stored in float format. + """ + if arr.ndim != 3: + raise ValueError('Incorrect array shape.') + datum = caffe_pb2.Datum() + datum.channels, datum.height, datum.width = arr.shape + if arr.dtype == np.uint8: + datum.data = arr.tostring() + else: + datum.float_data.extend(arr.flat) + datum.label = label + return datum + + +def datum_to_array(datum): + """Converts a datum to an array. Note that the label is not returned, + as one can easily get it by calling datum.label. + """ + if len(datum.data): + return np.fromstring(datum.data, dtype=np.uint8).reshape( + datum.channels, datum.height, datum.width) + else: + return np.array(datum.float_data).astype(float).reshape( + datum.channels, datum.height, datum.width) + + +## Pre-processing + +class Transformer: + """ + Transform input for feeding into a Net. + + Note: this is mostly for illustrative purposes and it is likely better + to define your own input preprocessing routine for your needs. + + Parameters + ---------- + net : a Net for which the input should be prepared + """ + def __init__(self, inputs): + self.inputs = inputs + self.transpose = {} + self.channel_swap = {} + self.raw_scale = {} + self.mean = {} + self.input_scale = {} + + def __check_input(self, in_): + if in_ not in self.inputs: + raise Exception('{} is not one of the net inputs: {}'.format( + in_, self.inputs)) + + def preprocess(self, in_, data): + """ + Format input for Caffe: + - convert to single + - resize to input dimensions (preserving number of channels) + - transpose dimensions to K x H x W + - reorder channels (for instance color to BGR) + - scale raw input (e.g. from [0, 1] to [0, 255] for ImageNet models) + - subtract mean + - scale feature + + Parameters + ---------- + in_ : name of input blob to preprocess for + data : (H' x W' x K) ndarray + + Returns + ------- + caffe_in : (K x H x W) ndarray for input to a Net + """ + self.__check_input(in_) + caffe_in = data.astype(np.float32, copy=False) + transpose = self.transpose.get(in_) + channel_swap = self.channel_swap.get(in_) + raw_scale = self.raw_scale.get(in_) + mean = self.mean.get(in_) + input_scale = self.input_scale.get(in_) + in_dims = self.inputs[in_][2:] + if caffe_in.shape[:2] != in_dims: + caffe_in = resize_image(caffe_in, in_dims) + if transpose is not None: + caffe_in = caffe_in.transpose(transpose) + if channel_swap is not None: + caffe_in = caffe_in[channel_swap, :, :] + if raw_scale is not None: + caffe_in *= raw_scale + if mean is not None: + caffe_in -= mean + if input_scale is not None: + caffe_in *= input_scale + return caffe_in + + def deprocess(self, in_, data): + """ + Invert Caffe formatting; see preprocess(). + """ + self.__check_input(in_) + decaf_in = data.copy().squeeze() + transpose = self.transpose.get(in_) + channel_swap = self.channel_swap.get(in_) + raw_scale = self.raw_scale.get(in_) + mean = self.mean.get(in_) + input_scale = self.input_scale.get(in_) + if input_scale is not None: + decaf_in /= input_scale + if mean is not None: + decaf_in += mean + if raw_scale is not None: + decaf_in /= raw_scale + if channel_swap is not None: + decaf_in = decaf_in[channel_swap, :, :] + if transpose is not None: + decaf_in = decaf_in.transpose([transpose[t] for t in transpose]) + return decaf_in + + def set_transpose(self, in_, order): + """ + Set the input channel order for e.g. RGB to BGR conversion + as needed for the reference ImageNet model. + + Parameters + ---------- + in_ : which input to assign this channel order + order : the order to transpose the dimensions + """ + self.__check_input(in_) + if len(order) != len(self.inputs[in_]) - 1: + raise Exception('Transpose order needs to have the same number of ' + 'dimensions as the input.') + self.transpose[in_] = order + + def set_channel_swap(self, in_, order): + """ + Set the input channel order for e.g. RGB to BGR conversion + as needed for the reference ImageNet model. + N.B. this assumes the channels are the first dimension AFTER transpose. + + Parameters + ---------- + in_ : which input to assign this channel order + order : the order to take the channels. + (2,1,0) maps RGB to BGR for example. + """ + self.__check_input(in_) + if len(order) != self.inputs[in_][1]: + raise Exception('Channel swap needs to have the same number of ' + 'dimensions as the input channels.') + self.channel_swap[in_] = order + + def set_raw_scale(self, in_, scale): + """ + Set the scale of raw features s.t. the input blob = input * scale. + While Python represents images in [0, 1], certain Caffe models + like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale + of these models must be 255. + + Parameters + ---------- + in_ : which input to assign this scale factor + scale : scale coefficient + """ + self.__check_input(in_) + self.raw_scale[in_] = scale + + def set_mean(self, in_, mean): + """ + Set the mean to subtract for centering the data. + + Parameters + ---------- + in_ : which input to assign this mean. + mean : mean ndarray (input dimensional or broadcastable) + """ + self.__check_input(in_) + ms = mean.shape + if mean.ndim == 1: + # broadcast channels + if ms[0] != self.inputs[in_][1]: + raise ValueError('Mean channels incompatible with input.') + mean = mean[:, np.newaxis, np.newaxis] + else: + # elementwise mean + if len(ms) == 2: + ms = (1,) + ms + if len(ms) != 3: + raise ValueError('Mean shape invalid') + if ms != self.inputs[in_][1:]: + raise ValueError('Mean shape incompatible with input shape.') + self.mean[in_] = mean + + def set_input_scale(self, in_, scale): + """ + Set the scale of preprocessed inputs s.t. the blob = blob * scale. + N.B. input_scale is done AFTER mean subtraction and other preprocessing + while raw_scale is done BEFORE. + + Parameters + ---------- + in_ : which input to assign this scale factor + scale : scale coefficient + """ + self.__check_input(in_) + self.input_scale[in_] = scale + + +## Image IO + +def load_image(filename, color=True): + """ + Load an image converting from grayscale or alpha as needed. + + Parameters + ---------- + filename : string + color : boolean + flag for color format. True (default) loads as RGB while False + loads as intensity (if image is already grayscale). + + Returns + ------- + image : an image with type np.float32 in range [0, 1] + of size (H x W x 3) in RGB or + of size (H x W x 1) in grayscale. + """ + img = skimage.img_as_float(skimage.io.imread(filename)).astype(np.float32) + if img.ndim == 2: + img = img[:, :, np.newaxis] + if color: + img = np.tile(img, (1, 1, 3)) + elif img.shape[2] == 4: + img = img[:, :, :3] + return img + + +def resize_image(im, new_dims, interp_order=1): + """ + Resize an image array with interpolation. + + Parameters + ---------- + im : (H x W x K) ndarray + new_dims : (height, width) tuple of new dimensions. + interp_order : interpolation order, default is linear. + + Returns + ------- + im : resized ndarray with shape (new_dims[0], new_dims[1], K) + """ + if im.shape[-1] == 1 or im.shape[-1] == 3: + im_min, im_max = im.min(), im.max() + if im_max > im_min: + # skimage is fast but only understands {1,3} channel images + # in [0, 1]. + im_std = (im - im_min) / (im_max - im_min) + resized_std = resize(im_std, new_dims, order=interp_order) + resized_im = resized_std * (im_max - im_min) + im_min + else: + # the image is a constant -- avoid divide by 0 + ret = np.empty((new_dims[0], new_dims[1], im.shape[-1]), + dtype=np.float32) + ret.fill(im_min) + return ret + else: + # ndimage interpolates anything but more slowly. + scale = tuple(np.array(new_dims) / np.array(im.shape[:2])) + resized_im = zoom(im, scale + (1,), order=interp_order) + return resized_im.astype(np.float32) + + +def oversample(images, crop_dims): + """ + Crop images into the four corners, center, and their mirrored versions. + + Parameters + ---------- + image : iterable of (H x W x K) ndarrays + crop_dims : (height, width) tuple for the crops. + + Returns + ------- + crops : (10*N x H x W x K) ndarray of crops for number of inputs N. + """ + # Dimensions and center. + im_shape = np.array(images[0].shape) + crop_dims = np.array(crop_dims) + im_center = im_shape[:2] / 2.0 + + # Make crop coordinates + h_indices = (0, im_shape[0] - crop_dims[0]) + w_indices = (0, im_shape[1] - crop_dims[1]) + crops_ix = np.empty((5, 4), dtype=int) + curr = 0 + for i in h_indices: + for j in w_indices: + crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1]) + curr += 1 + crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate([ + -crop_dims / 2.0, + crop_dims / 2.0 + ]) + crops_ix = np.tile(crops_ix, (2, 1)) + + # Extract crops + crops = np.empty((10 * len(images), crop_dims[0], crop_dims[1], + im_shape[-1]), dtype=np.float32) + ix = 0 + for im in images: + for crop in crops_ix: + crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :] + ix += 1 + crops[ix-5:ix] = crops[ix-5:ix, :, ::-1, :] # flip for mirrors + return crops diff --git a/python/caffe/io.pyc b/python/caffe/io.pyc new file mode 100755 index 0000000..87eb10f Binary files /dev/null and b/python/caffe/io.pyc differ diff --git a/python/caffe/net_spec.py b/python/caffe/net_spec.py new file mode 100755 index 0000000..77a0e00 --- /dev/null +++ b/python/caffe/net_spec.py @@ -0,0 +1,214 @@ +"""Python net specification. + +This module provides a way to write nets directly in Python, using a natural, +functional style. See examples/pycaffe/caffenet.py for an example. + +Currently this works as a thin wrapper around the Python protobuf interface, +with layers and parameters automatically generated for the "layers" and +"params" pseudo-modules, which are actually objects using __getattr__ magic +to generate protobuf messages. + +Note that when using to_proto or Top.to_proto, names of intermediate blobs will +be automatically generated. To explicitly specify blob names, use the NetSpec +class -- assign to its attributes directly to name layers, and call +NetSpec.to_proto to serialize all assigned layers. + +This interface is expected to continue to evolve as Caffe gains new capabilities +for specifying nets. In particular, the automatically generated layer names +are not guaranteed to be forward-compatible. +""" + +from collections import OrderedDict, Counter + +from .proto import caffe_pb2 +from google import protobuf +import six + + +def param_name_dict(): + """Find out the correspondence between layer names and parameter names.""" + + layer = caffe_pb2.LayerParameter() + # get all parameter names (typically underscore case) and corresponding + # type names (typically camel case), which contain the layer names + # (note that not all parameters correspond to layers, but we'll ignore that) + param_names = [s for s in dir(layer) if s.endswith('_param')] + param_type_names = [type(getattr(layer, s)).__name__ for s in param_names] + # strip the final '_param' or 'Parameter' + param_names = [s[:-len('_param')] for s in param_names] + param_type_names = [s[:-len('Parameter')] for s in param_type_names] + return dict(zip(param_type_names, param_names)) + + +def to_proto(*tops): + """Generate a NetParameter that contains all layers needed to compute + all arguments.""" + + layers = OrderedDict() + autonames = Counter() + for top in tops: + top.fn._to_proto(layers, {}, autonames) + net = caffe_pb2.NetParameter() + net.layer.extend(layers.values()) + return net + + +def assign_proto(proto, name, val): + """Assign a Python object to a protobuf message, based on the Python + type (in recursive fashion). Lists become repeated fields/messages, dicts + become messages, and other types are assigned directly.""" + + if isinstance(val, list): + if isinstance(val[0], dict): + for item in val: + proto_item = getattr(proto, name).add() + for k, v in six.iteritems(item): + assign_proto(proto_item, k, v) + else: + getattr(proto, name).extend(val) + elif isinstance(val, dict): + for k, v in six.iteritems(val): + assign_proto(getattr(proto, name), k, v) + else: + setattr(proto, name, val) + + +class Top(object): + """A Top specifies a single output blob (which could be one of several + produced by a layer.)""" + + def __init__(self, fn, n): + self.fn = fn + self.n = n + + def to_proto(self): + """Generate a NetParameter that contains all layers needed to compute + this top.""" + + return to_proto(self) + + def _to_proto(self, layers, names, autonames): + return self.fn._to_proto(layers, names, autonames) + + +class Function(object): + """A Function specifies a layer, its parameters, and its inputs (which + are Tops from other layers).""" + + def __init__(self, type_name, inputs, params): + self.type_name = type_name + self.inputs = inputs + self.params = params + self.ntop = self.params.get('ntop', 1) + # use del to make sure kwargs are not double-processed as layer params + if 'ntop' in self.params: + del self.params['ntop'] + self.in_place = self.params.get('in_place', False) + if 'in_place' in self.params: + del self.params['in_place'] + self.tops = tuple(Top(self, n) for n in range(self.ntop)) + + def _get_name(self, names, autonames): + if self not in names and self.ntop > 0: + names[self] = self._get_top_name(self.tops[0], names, autonames) + elif self not in names: + autonames[self.type_name] += 1 + names[self] = self.type_name + str(autonames[self.type_name]) + return names[self] + + def _get_top_name(self, top, names, autonames): + if top not in names: + autonames[top.fn.type_name] += 1 + names[top] = top.fn.type_name + str(autonames[top.fn.type_name]) + return names[top] + + def _to_proto(self, layers, names, autonames): + if self in layers: + return + bottom_names = [] + for inp in self.inputs: + inp._to_proto(layers, names, autonames) + bottom_names.append(layers[inp.fn].top[inp.n]) + layer = caffe_pb2.LayerParameter() + layer.type = self.type_name + layer.bottom.extend(bottom_names) + + if self.in_place: + layer.top.extend(layer.bottom) + else: + for top in self.tops: + layer.top.append(self._get_top_name(top, names, autonames)) + layer.name = self._get_name(names, autonames) + + for k, v in six.iteritems(self.params): + # special case to handle generic *params + if k.endswith('param'): + assign_proto(layer, k, v) + else: + try: + assign_proto(getattr(layer, + _param_names[self.type_name] + '_param'), k, v) + except (AttributeError, KeyError): + assign_proto(layer, k, v) + + layers[self] = layer + + +class NetSpec(object): + """A NetSpec contains a set of Tops (assigned directly as attributes). + Calling NetSpec.to_proto generates a NetParameter containing all of the + layers needed to produce all of the assigned Tops, using the assigned + names.""" + + def __init__(self): + super(NetSpec, self).__setattr__('tops', OrderedDict()) + + def __setattr__(self, name, value): + self.tops[name] = value + + def __getattr__(self, name): + return self.tops[name] + + def to_proto(self): + names = {v: k for k, v in six.iteritems(self.tops)} + autonames = Counter() + layers = OrderedDict() + for name, top in six.iteritems(self.tops): + top._to_proto(layers, names, autonames) + net = caffe_pb2.NetParameter() + net.layer.extend(layers.values()) + return net + + +class Layers(object): + """A Layers object is a pseudo-module which generates functions that specify + layers; e.g., Layers().Convolution(bottom, kernel_size=3) will produce a Top + specifying a 3x3 convolution applied to bottom.""" + + def __getattr__(self, name): + def layer_fn(*args, **kwargs): + fn = Function(name, args, kwargs) + if fn.ntop == 0: + return fn + elif fn.ntop == 1: + return fn.tops[0] + else: + return fn.tops + return layer_fn + + +class Parameters(object): + """A Parameters object is a pseudo-module which generates constants used + in layer parameters; e.g., Parameters().Pooling.MAX is the value used + to specify max pooling.""" + + def __getattr__(self, name): + class Param: + def __getattr__(self, param_name): + return getattr(getattr(caffe_pb2, name + 'Parameter'), param_name) + return Param() + + +_param_names = param_name_dict() +layers = Layers() +params = Parameters() diff --git a/python/caffe/net_spec.pyc b/python/caffe/net_spec.pyc new file mode 100755 index 0000000..fec970e Binary files /dev/null and b/python/caffe/net_spec.pyc differ diff --git a/python/caffe/pycaffe.py b/python/caffe/pycaffe.py new file mode 100755 index 0000000..4f980a9 --- /dev/null +++ b/python/caffe/pycaffe.py @@ -0,0 +1,291 @@ +""" +Wrap the internal caffe C++ module (_caffe.so) with a clean, Pythonic +interface. +""" + +from collections import OrderedDict +try: + from itertools import izip_longest +except: + from itertools import zip_longest as izip_longest +import numpy as np + +from ._caffe import Net, SGDSolver +import caffe.io + +# We directly update methods from Net here (rather than using composition or +# inheritance) so that nets created by caffe (e.g., by SGDSolver) will +# automatically have the improved interface. + + +@property +def _Net_blobs(self): + """ + An OrderedDict (bottom to top, i.e., input to output) of network + blobs indexed by name + """ + return OrderedDict(zip(self._blob_names, self._blobs)) + + +@property +def _Net_blob_loss_weights(self): + """ + An OrderedDict (bottom to top, i.e., input to output) of network + blob loss weights indexed by name + """ + return OrderedDict(zip(self._blob_names, self._blob_loss_weights)) + + +@property +def _Net_params(self): + """ + An OrderedDict (bottom to top, i.e., input to output) of network + parameters indexed by name; each is a list of multiple blobs (e.g., + weights and biases) + """ + return OrderedDict([(name, lr.blobs) + for name, lr in zip(self._layer_names, self.layers) + if len(lr.blobs) > 0]) + + +@property +def _Net_inputs(self): + return [list(self.blobs.keys())[i] for i in self._inputs] + + +@property +def _Net_outputs(self): + return [list(self.blobs.keys())[i] for i in self._outputs] + + +def _Net_forward(self, blobs=None, start=None, end=None, **kwargs): + """ + Forward pass: prepare inputs and run the net forward. + + Parameters + ---------- + blobs : list of blobs to return in addition to output blobs. + kwargs : Keys are input blob names and values are blob ndarrays. + For formatting inputs for Caffe, see Net.preprocess(). + If None, input is taken from data layers. + start : optional name of layer at which to begin the forward pass + end : optional name of layer at which to finish the forward pass + (inclusive) + + Returns + ------- + outs : {blob name: blob ndarray} dict. + """ + if blobs is None: + blobs = [] + + if start is not None: + start_ind = list(self._layer_names).index(start) + else: + start_ind = 0 + + if end is not None: + end_ind = list(self._layer_names).index(end) + outputs = set([end] + blobs) + else: + end_ind = len(self.layers) - 1 + outputs = set(self.outputs + blobs) + + if kwargs: + if set(kwargs.keys()) != set(self.inputs): + raise Exception('Input blob arguments do not match net inputs.') + # Set input according to defined shapes and make arrays single and + # C-contiguous as Caffe expects. + for in_, blob in kwargs.iteritems(): + if blob.shape[0] != self.blobs[in_].num: + raise Exception('Input is not batch sized') + self.blobs[in_].data[...] = blob + + self._forward(start_ind, end_ind) + + # Unpack blobs to extract + return {out: self.blobs[out].data for out in outputs} + + +def _Net_backward(self, diffs=None, start=None, end=None, **kwargs): + """ + Backward pass: prepare diffs and run the net backward. + + Parameters + ---------- + diffs : list of diffs to return in addition to bottom diffs. + kwargs : Keys are output blob names and values are diff ndarrays. + If None, top diffs are taken from forward loss. + start : optional name of layer at which to begin the backward pass + end : optional name of layer at which to finish the backward pass + (inclusive) + + Returns + ------- + outs: {blob name: diff ndarray} dict. + """ + if diffs is None: + diffs = [] + + if start is not None: + start_ind = list(self._layer_names).index(start) + else: + start_ind = len(self.layers) - 1 + + if end is not None: + end_ind = list(self._layer_names).index(end) + outputs = set([end] + diffs) + else: + end_ind = 0 + outputs = set(self.inputs + diffs) + + if kwargs: + if set(kwargs.keys()) != set(self.outputs): + raise Exception('Top diff arguments do not match net outputs.') + # Set top diffs according to defined shapes and make arrays single and + # C-contiguous as Caffe expects. + for top, diff in kwargs.iteritems(): + if diff.ndim != 4: + raise Exception('{} diff is not 4-d'.format(top)) + if diff.shape[0] != self.blobs[top].num: + raise Exception('Diff is not batch sized') + self.blobs[top].diff[...] = diff + + self._backward(start_ind, end_ind) + + # Unpack diffs to extract + return {out: self.blobs[out].diff for out in outputs} + + +def _Net_forward_all(self, blobs=None, **kwargs): + """ + Run net forward in batches. + + Parameters + ---------- + blobs : list of blobs to extract as in forward() + kwargs : Keys are input blob names and values are blob ndarrays. + Refer to forward(). + + Returns + ------- + all_outs : {blob name: list of blobs} dict. + """ + # Collect outputs from batches + all_outs = {out: [] for out in set(self.outputs + (blobs or []))} + for batch in self._batch(kwargs): + outs = self.forward(blobs=blobs, **batch) + for out, out_blob in outs.iteritems(): + all_outs[out].extend(out_blob.copy()) + # Package in ndarray. + for out in all_outs: + all_outs[out] = np.asarray(all_outs[out]) + # Discard padding. + pad = len(all_outs.itervalues().next()) - len(kwargs.itervalues().next()) + if pad: + for out in all_outs: + all_outs[out] = all_outs[out][:-pad] + return all_outs + + +def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs): + """ + Run net forward + backward in batches. + + Parameters + ---------- + blobs: list of blobs to extract as in forward() + diffs: list of diffs to extract as in backward() + kwargs: Keys are input (for forward) and output (for backward) blob names + and values are ndarrays. Refer to forward() and backward(). + Prefilled variants are called for lack of input or output blobs. + + Returns + ------- + all_blobs: {blob name: blob ndarray} dict. + all_diffs: {blob name: diff ndarray} dict. + """ + # Batch blobs and diffs. + all_outs = {out: [] for out in set(self.outputs + (blobs or []))} + all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))} + forward_batches = self._batch({in_: kwargs[in_] + for in_ in self.inputs if in_ in kwargs}) + backward_batches = self._batch({out: kwargs[out] + for out in self.outputs if out in kwargs}) + # Collect outputs from batches (and heed lack of forward/backward batches). + for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}): + batch_blobs = self.forward(blobs=blobs, **fb) + batch_diffs = self.backward(diffs=diffs, **bb) + for out, out_blobs in batch_blobs.iteritems(): + all_outs[out].extend(out_blobs) + for diff, out_diffs in batch_diffs.iteritems(): + all_diffs[diff].extend(out_diffs) + # Package in ndarray. + for out, diff in zip(all_outs, all_diffs): + all_outs[out] = np.asarray(all_outs[out]) + all_diffs[diff] = np.asarray(all_diffs[diff]) + # Discard padding at the end and package in ndarray. + pad = len(all_outs.itervalues().next()) - len(kwargs.itervalues().next()) + if pad: + for out, diff in zip(all_outs, all_diffs): + all_outs[out] = all_outs[out][:-pad] + all_diffs[diff] = all_diffs[diff][:-pad] + return all_outs, all_diffs + + +def _Net_set_input_arrays(self, data, labels): + """ + Set input arrays of the in-memory MemoryDataLayer. + (Note: this is only for networks declared with the memory data layer.) + """ + if labels.ndim == 1: + labels = np.ascontiguousarray(labels[:, np.newaxis, np.newaxis, + np.newaxis]) + return self._set_input_arrays(data, labels) + + +def _Net_batch(self, blobs): + """ + Batch blob lists according to net's batch size. + + Parameters + ---------- + blobs: Keys blob names and values are lists of blobs (of any length). + Naturally, all the lists should have the same length. + + Yields + ------ + batch: {blob name: list of blobs} dict for a single batch. + """ + num = len(blobs.itervalues().next()) + batch_size = self.blobs.itervalues().next().num + remainder = num % batch_size + num_batches = num / batch_size + + # Yield full batches. + for b in range(num_batches): + i = b * batch_size + yield {name: blobs[name][i:i + batch_size] for name in blobs} + + # Yield last padded batch, if any. + if remainder > 0: + padded_batch = {} + for name in blobs: + padding = np.zeros((batch_size - remainder,) + + blobs[name].shape[1:]) + padded_batch[name] = np.concatenate([blobs[name][-remainder:], + padding]) + yield padded_batch + +# Attach methods to Net. +Net.blobs = _Net_blobs +Net.blob_loss_weights = _Net_blob_loss_weights +Net.params = _Net_params +Net.forward = _Net_forward +Net.backward = _Net_backward +Net.forward_all = _Net_forward_all +Net.forward_backward_all = _Net_forward_backward_all +Net.set_input_arrays = _Net_set_input_arrays +Net._batch = _Net_batch +Net.inputs = _Net_inputs +Net.outputs = _Net_outputs diff --git a/python/caffe/pycaffe.pyc b/python/caffe/pycaffe.pyc new file mode 100755 index 0000000..304ad52 Binary files /dev/null and b/python/caffe/pycaffe.pyc differ diff --git a/python/caffe/test/test_layer_type_list.py b/python/caffe/test/test_layer_type_list.py new file mode 100755 index 0000000..7edc80d --- /dev/null +++ b/python/caffe/test/test_layer_type_list.py @@ -0,0 +1,10 @@ +import unittest + +import caffe + +class TestLayerTypeList(unittest.TestCase): + + def test_standard_types(self): + for type_name in ['Data', 'Convolution', 'InnerProduct']: + self.assertIn(type_name, caffe.layer_type_list(), + '%s not in layer_type_list()' % type_name) diff --git a/python/caffe/test/test_net.py b/python/caffe/test/test_net.py new file mode 100755 index 0000000..aad828a --- /dev/null +++ b/python/caffe/test/test_net.py @@ -0,0 +1,81 @@ +import unittest +import tempfile +import os +import numpy as np +import six + +import caffe + + +def simple_net_file(num_output): + """Make a simple net prototxt, based on test_net.cpp, returning the name + of the (temporary) file.""" + + f = tempfile.NamedTemporaryFile(mode='w+', delete=False) + f.write("""name: 'testnet' force_backward: true + layer { type: 'DummyData' name: 'data' top: 'data' top: 'label' + dummy_data_param { num: 5 channels: 2 height: 3 width: 4 + num: 5 channels: 1 height: 1 width: 1 + data_filler { type: 'gaussian' std: 1 } + data_filler { type: 'constant' } } } + layer { type: 'Convolution' name: 'conv' bottom: 'data' top: 'conv' + convolution_param { num_output: 11 kernel_size: 2 pad: 3 + weight_filler { type: 'gaussian' std: 1 } + bias_filler { type: 'constant' value: 2 } } + param { decay_mult: 1 } param { decay_mult: 0 } + } + layer { type: 'InnerProduct' name: 'ip' bottom: 'conv' top: 'ip' + inner_product_param { num_output: """ + str(num_output) + """ + weight_filler { type: 'gaussian' std: 2.5 } + bias_filler { type: 'constant' value: -3 } } } + layer { type: 'SoftmaxWithLoss' name: 'loss' bottom: 'ip' bottom: 'label' + top: 'loss' }""") + f.close() + return f.name + + +class TestNet(unittest.TestCase): + def setUp(self): + self.num_output = 13 + net_file = simple_net_file(self.num_output) + self.net = caffe.Net(net_file, caffe.TRAIN) + # fill in valid labels + self.net.blobs['label'].data[...] = \ + np.random.randint(self.num_output, + size=self.net.blobs['label'].data.shape) + os.remove(net_file) + + def test_memory(self): + """Check that holding onto blob data beyond the life of a Net is OK""" + + params = sum(map(list, six.itervalues(self.net.params)), []) + blobs = self.net.blobs.values() + del self.net + + # now sum everything (forcing all memory to be read) + total = 0 + for p in params: + total += p.data.sum() + p.diff.sum() + for bl in blobs: + total += bl.data.sum() + bl.diff.sum() + + def test_forward_backward(self): + self.net.forward() + self.net.backward() + + def test_inputs_outputs(self): + self.assertEqual(self.net.inputs, []) + self.assertEqual(self.net.outputs, ['loss']) + + def test_save_and_read(self): + f = tempfile.NamedTemporaryFile(mode='w+', delete=False) + f.close() + self.net.save(f.name) + net_file = simple_net_file(self.num_output) + net2 = caffe.Net(net_file, f.name, caffe.TRAIN) + os.remove(net_file) + os.remove(f.name) + for name in self.net.params: + for i in range(len(self.net.params[name])): + self.assertEqual(abs(self.net.params[name][i].data + - net2.params[name][i].data).sum(), 0) diff --git a/python/caffe/test/test_net_spec.py b/python/caffe/test/test_net_spec.py new file mode 100755 index 0000000..b4595e6 --- /dev/null +++ b/python/caffe/test/test_net_spec.py @@ -0,0 +1,82 @@ +import unittest +import tempfile +import caffe +from caffe import layers as L +from caffe import params as P + +def lenet(batch_size): + n = caffe.NetSpec() + n.data, n.label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]), + dict(dim=[batch_size, 1, 1, 1])], + transform_param=dict(scale=1./255), ntop=2) + n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, + weight_filler=dict(type='xavier')) + n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) + n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, + weight_filler=dict(type='xavier')) + n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) + n.ip1 = L.InnerProduct(n.pool2, num_output=500, + weight_filler=dict(type='xavier')) + n.relu1 = L.ReLU(n.ip1, in_place=True) + n.ip2 = L.InnerProduct(n.relu1, num_output=10, + weight_filler=dict(type='xavier')) + n.loss = L.SoftmaxWithLoss(n.ip2, n.label) + return n.to_proto() + +def anon_lenet(batch_size): + data, label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]), + dict(dim=[batch_size, 1, 1, 1])], + transform_param=dict(scale=1./255), ntop=2) + conv1 = L.Convolution(data, kernel_size=5, num_output=20, + weight_filler=dict(type='xavier')) + pool1 = L.Pooling(conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) + conv2 = L.Convolution(pool1, kernel_size=5, num_output=50, + weight_filler=dict(type='xavier')) + pool2 = L.Pooling(conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) + ip1 = L.InnerProduct(pool2, num_output=500, + weight_filler=dict(type='xavier')) + relu1 = L.ReLU(ip1, in_place=True) + ip2 = L.InnerProduct(relu1, num_output=10, + weight_filler=dict(type='xavier')) + loss = L.SoftmaxWithLoss(ip2, label) + return loss.to_proto() + +def silent_net(): + n = caffe.NetSpec() + n.data, n.data2 = L.DummyData(shape=[dict(dim=[3]), dict(dim=[4, 2])], + ntop=2) + n.silence_data = L.Silence(n.data, ntop=0) + n.silence_data2 = L.Silence(n.data2, ntop=0) + return n.to_proto() + +class TestNetSpec(unittest.TestCase): + def load_net(self, net_proto): + f = tempfile.NamedTemporaryFile(mode='w+', delete=False) + f.write(str(net_proto)) + f.close() + return caffe.Net(f.name, caffe.TEST) + + def test_lenet(self): + """Construct and build the Caffe version of LeNet.""" + + net_proto = lenet(50) + # check that relu is in-place + self.assertEqual(net_proto.layer[6].bottom, + net_proto.layer[6].top) + net = self.load_net(net_proto) + # check that all layers are present + self.assertEqual(len(net.layers), 9) + + # now the check the version with automatically-generated layer names + net_proto = anon_lenet(50) + self.assertEqual(net_proto.layer[6].bottom, + net_proto.layer[6].top) + net = self.load_net(net_proto) + self.assertEqual(len(net.layers), 9) + + def test_zero_tops(self): + """Test net construction for top-less layers.""" + + net_proto = silent_net() + net = self.load_net(net_proto) + self.assertEqual(len(net.forward()), 0) diff --git a/python/caffe/test/test_python_layer.py b/python/caffe/test/test_python_layer.py new file mode 100755 index 0000000..a1e11bc --- /dev/null +++ b/python/caffe/test/test_python_layer.py @@ -0,0 +1,86 @@ +import unittest +import tempfile +import os +import six + +import caffe + + +class SimpleLayer(caffe.Layer): + """A layer that just multiplies by ten""" + + def setup(self, bottom, top): + pass + + def reshape(self, bottom, top): + top[0].reshape(*bottom[0].data.shape) + + def forward(self, bottom, top): + top[0].data[...] = 10 * bottom[0].data + + def backward(self, top, propagate_down, bottom): + bottom[0].diff[...] = 10 * top[0].diff + + +class ExceptionLayer(caffe.Layer): + """A layer for checking exceptions from Python""" + + def setup(self, bottom, top): + raise RuntimeError + + +def python_net_file(): + with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: + f.write("""name: 'pythonnet' force_backward: true + input: 'data' input_shape { dim: 10 dim: 9 dim: 8 } + layer { type: 'Python' name: 'one' bottom: 'data' top: 'one' + python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } + layer { type: 'Python' name: 'two' bottom: 'one' top: 'two' + python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } + layer { type: 'Python' name: 'three' bottom: 'two' top: 'three' + python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }""") + return f.name + + +def exception_net_file(): + with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: + f.write("""name: 'pythonnet' force_backward: true + input: 'data' input_shape { dim: 10 dim: 9 dim: 8 } + layer { type: 'Python' name: 'layer' bottom: 'data' top: 'top' + python_param { module: 'test_python_layer' layer: 'ExceptionLayer' } } + """) + return f.name + + +class TestPythonLayer(unittest.TestCase): + def setUp(self): + net_file = python_net_file() + self.net = caffe.Net(net_file, caffe.TRAIN) + os.remove(net_file) + + def test_forward(self): + x = 8 + self.net.blobs['data'].data[...] = x + self.net.forward() + for y in self.net.blobs['three'].data.flat: + self.assertEqual(y, 10**3 * x) + + def test_backward(self): + x = 7 + self.net.blobs['three'].diff[...] = x + self.net.backward() + for y in self.net.blobs['data'].diff.flat: + self.assertEqual(y, 10**3 * x) + + def test_reshape(self): + s = 4 + self.net.blobs['data'].reshape(s, s, s, s) + self.net.forward() + for blob in six.itervalues(self.net.blobs): + for d in blob.data.shape: + self.assertEqual(s, d) + + def test_exception(self): + net_file = exception_net_file() + self.assertRaises(RuntimeError, caffe.Net, net_file, caffe.TEST) + os.remove(net_file) diff --git a/python/caffe/test/test_python_layer_with_param_str.py b/python/caffe/test/test_python_layer_with_param_str.py new file mode 100755 index 0000000..3d0f107 --- /dev/null +++ b/python/caffe/test/test_python_layer_with_param_str.py @@ -0,0 +1,59 @@ +import unittest +import tempfile +import os +import six + +import caffe + + +class SimpleParamLayer(caffe.Layer): + """A layer that just multiplies by the numeric value of its param string""" + + def setup(self, bottom, top): + try: + self.value = float(self.param_str) + except ValueError: + raise ValueError("Parameter string must be a legible float") + + def reshape(self, bottom, top): + top[0].reshape(*bottom[0].data.shape) + + def forward(self, bottom, top): + top[0].data[...] = self.value * bottom[0].data + + def backward(self, top, propagate_down, bottom): + bottom[0].diff[...] = self.value * top[0].diff + + +def python_param_net_file(): + with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: + f.write("""name: 'pythonnet' force_backward: true + input: 'data' input_shape { dim: 10 dim: 9 dim: 8 } + layer { type: 'Python' name: 'mul10' bottom: 'data' top: 'mul10' + python_param { module: 'test_python_layer_with_param_str' + layer: 'SimpleParamLayer' param_str: '10' } } + layer { type: 'Python' name: 'mul2' bottom: 'mul10' top: 'mul2' + python_param { module: 'test_python_layer_with_param_str' + layer: 'SimpleParamLayer' param_str: '2' } }""") + return f.name + + +class TestLayerWithParam(unittest.TestCase): + def setUp(self): + net_file = python_param_net_file() + self.net = caffe.Net(net_file, caffe.TRAIN) + os.remove(net_file) + + def test_forward(self): + x = 8 + self.net.blobs['data'].data[...] = x + self.net.forward() + for y in self.net.blobs['mul2'].data.flat: + self.assertEqual(y, 2 * 10 * x) + + def test_backward(self): + x = 7 + self.net.blobs['mul2'].diff[...] = x + self.net.backward() + for y in self.net.blobs['data'].diff.flat: + self.assertEqual(y, 2 * 10 * x) diff --git a/python/caffe/test/test_solver.py b/python/caffe/test/test_solver.py new file mode 100755 index 0000000..9cfc10d --- /dev/null +++ b/python/caffe/test/test_solver.py @@ -0,0 +1,53 @@ +import unittest +import tempfile +import os +import numpy as np +import six + +import caffe +from test_net import simple_net_file + + +class TestSolver(unittest.TestCase): + def setUp(self): + self.num_output = 13 + net_f = simple_net_file(self.num_output) + f = tempfile.NamedTemporaryFile(mode='w+', delete=False) + f.write("""net: '""" + net_f + """' + test_iter: 10 test_interval: 10 base_lr: 0.01 momentum: 0.9 + weight_decay: 0.0005 lr_policy: 'inv' gamma: 0.0001 power: 0.75 + display: 100 max_iter: 100 snapshot_after_train: false""") + f.close() + self.solver = caffe.SGDSolver(f.name) + # also make sure get_solver runs + caffe.get_solver(f.name) + caffe.set_mode_cpu() + # fill in valid labels + self.solver.net.blobs['label'].data[...] = \ + np.random.randint(self.num_output, + size=self.solver.net.blobs['label'].data.shape) + self.solver.test_nets[0].blobs['label'].data[...] = \ + np.random.randint(self.num_output, + size=self.solver.test_nets[0].blobs['label'].data.shape) + os.remove(f.name) + os.remove(net_f) + + def test_solve(self): + self.assertEqual(self.solver.iter, 0) + self.solver.solve() + self.assertEqual(self.solver.iter, 100) + + def test_net_memory(self): + """Check that nets survive after the solver is destroyed.""" + + nets = [self.solver.net] + list(self.solver.test_nets) + self.assertEqual(len(nets), 2) + del self.solver + + total = 0 + for net in nets: + for ps in six.itervalues(net.params): + for p in ps: + total += p.data.sum() + p.diff.sum() + for bl in six.itervalues(net.blobs): + total += bl.data.sum() + bl.diff.sum() diff --git a/python/classify.py b/python/classify.py new file mode 100755 index 0000000..4544c51 --- /dev/null +++ b/python/classify.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python +""" +classify.py is an out-of-the-box image classifer callable from the command line. + +By default it configures and runs the Caffe reference ImageNet model. +""" +import numpy as np +import os +import sys +import argparse +import glob +import time + +import caffe + + +def main(argv): + pycaffe_dir = os.path.dirname(__file__) + + parser = argparse.ArgumentParser() + # Required arguments: input and output files. + parser.add_argument( + "input_file", + help="Input image, directory, or npy." + ) + parser.add_argument( + "output_file", + help="Output npy filename." + ) + # Optional arguments. + parser.add_argument( + "--model_def", + default=os.path.join(pycaffe_dir, + "../models/bvlc_reference_caffenet/deploy.prototxt"), + help="Model definition file." + ) + parser.add_argument( + "--pretrained_model", + default=os.path.join(pycaffe_dir, + "../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"), + help="Trained model weights file." + ) + parser.add_argument( + "--gpu", + action='store_true', + help="Switch for gpu computation." + ) + parser.add_argument( + "--center_only", + action='store_true', + help="Switch for prediction from center crop alone instead of " + + "averaging predictions across crops (default)." + ) + parser.add_argument( + "--images_dim", + default='256,256', + help="Canonical 'height,width' dimensions of input images." + ) + parser.add_argument( + "--mean_file", + default=os.path.join(pycaffe_dir, + 'caffe/imagenet/ilsvrc_2012_mean.npy'), + help="Data set image mean of [Channels x Height x Width] dimensions " + + "(numpy array). Set to '' for no mean subtraction." + ) + parser.add_argument( + "--input_scale", + type=float, + help="Multiply input features by this scale to finish preprocessing." + ) + parser.add_argument( + "--raw_scale", + type=float, + default=255.0, + help="Multiply raw input by this scale before preprocessing." + ) + parser.add_argument( + "--channel_swap", + default='2,1,0', + help="Order to permute input channels. The default converts " + + "RGB -> BGR since BGR is the Caffe default by way of OpenCV." + ) + parser.add_argument( + "--ext", + default='jpg', + help="Image file extension to take as input when a directory " + + "is given as the input file." + ) + args = parser.parse_args() + + image_dims = [int(s) for s in args.images_dim.split(',')] + + mean, channel_swap = None, None + if args.mean_file: + mean = np.load(args.mean_file) + if args.channel_swap: + channel_swap = [int(s) for s in args.channel_swap.split(',')] + + if args.gpu: + caffe.set_mode_gpu() + print("GPU mode") + else: + caffe.set_mode_cpu() + print("CPU mode") + + # Make classifier. + classifier = caffe.Classifier(args.model_def, args.pretrained_model, + image_dims=image_dims, mean=mean, + input_scale=args.input_scale, raw_scale=args.raw_scale, + channel_swap=channel_swap) + + # Load numpy array (.npy), directory glob (*.jpg), or image file. + args.input_file = os.path.expanduser(args.input_file) + if args.input_file.endswith('npy'): + print("Loading file: %s" % args.input_file) + inputs = np.load(args.input_file) + elif os.path.isdir(args.input_file): + print("Loading folder: %s" % args.input_file) + inputs =[caffe.io.load_image(im_f) + for im_f in glob.glob(args.input_file + '/*.' + args.ext)] + else: + print("Loading file: %s" % args.input_file) + inputs = [caffe.io.load_image(args.input_file)] + + print("Classifying %d inputs." % len(inputs)) + + # Classify. + start = time.time() + predictions = classifier.predict(inputs, not args.center_only) + print("Done in %.2f s." % (time.time() - start)) + + # Save + print("Saving results into %s" % args.output_file) + np.save(args.output_file, predictions) + + +if __name__ == '__main__': + main(sys.argv) diff --git a/python/detect.py b/python/detect.py new file mode 100755 index 0000000..691098f --- /dev/null +++ b/python/detect.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python +""" +detector.py is an out-of-the-box windowed detector +callable from the command line. + +By default it configures and runs the Caffe reference ImageNet model. +Note that this model was trained for image classification and not detection, +and finetuning for detection can be expected to improve results. + +The selective_search_ijcv_with_python code required for the selective search +proposal mode is available at + https://github.com/sergeyk/selective_search_ijcv_with_python + +TODO: +- batch up image filenames as well: don't want to load all of them into memory +- come up with a batching scheme that preserved order / keeps a unique ID +""" +import numpy as np +import pandas as pd +import os +import argparse +import time + +import caffe + +CROP_MODES = ['list', 'selective_search'] +COORD_COLS = ['ymin', 'xmin', 'ymax', 'xmax'] + + +def main(argv): + pycaffe_dir = os.path.dirname(__file__) + + parser = argparse.ArgumentParser() + # Required arguments: input and output. + parser.add_argument( + "input_file", + help="Input txt/csv filename. If .txt, must be list of filenames.\ + If .csv, must be comma-separated file with header\ + 'filename, xmin, ymin, xmax, ymax'" + ) + parser.add_argument( + "output_file", + help="Output h5/csv filename. Format depends on extension." + ) + # Optional arguments. + parser.add_argument( + "--model_def", + default=os.path.join(pycaffe_dir, + "../models/bvlc_reference_caffenet/deploy.prototxt.prototxt"), + help="Model definition file." + ) + parser.add_argument( + "--pretrained_model", + default=os.path.join(pycaffe_dir, + "../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"), + help="Trained model weights file." + ) + parser.add_argument( + "--crop_mode", + default="selective_search", + choices=CROP_MODES, + help="How to generate windows for detection." + ) + parser.add_argument( + "--gpu", + action='store_true', + help="Switch for gpu computation." + ) + parser.add_argument( + "--mean_file", + default=os.path.join(pycaffe_dir, + 'caffe/imagenet/ilsvrc_2012_mean.npy'), + help="Data set image mean of H x W x K dimensions (numpy array). " + + "Set to '' for no mean subtraction." + ) + parser.add_argument( + "--input_scale", + type=float, + help="Multiply input features by this scale to finish preprocessing." + ) + parser.add_argument( + "--raw_scale", + type=float, + default=255.0, + help="Multiply raw input by this scale before preprocessing." + ) + parser.add_argument( + "--channel_swap", + default='2,1,0', + help="Order to permute input channels. The default converts " + + "RGB -> BGR since BGR is the Caffe default by way of OpenCV." + + ) + parser.add_argument( + "--context_pad", + type=int, + default='16', + help="Amount of surrounding context to collect in input window." + ) + args = parser.parse_args() + + mean, channel_swap = None, None + if args.mean_file: + mean = np.load(args.mean_file) + if mean.shape[1:] != (1, 1): + mean = mean.mean(1).mean(1) + if args.channel_swap: + channel_swap = [int(s) for s in args.channel_swap.split(',')] + + if args.gpu: + caffe.set_mode_gpu() + print("GPU mode") + else: + caffe.set_mode_cpu() + print("CPU mode") + + # Make detector. + detector = caffe.Detector(args.model_def, args.pretrained_model, mean=mean, + input_scale=args.input_scale, raw_scale=args.raw_scale, + channel_swap=channel_swap, + context_pad=args.context_pad) + + # Load input. + t = time.time() + print("Loading input...") + if args.input_file.lower().endswith('txt'): + with open(args.input_file) as f: + inputs = [_.strip() for _ in f.readlines()] + elif args.input_file.lower().endswith('csv'): + inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str}) + inputs.set_index('filename', inplace=True) + else: + raise Exception("Unknown input file type: not in txt or csv.") + + # Detect. + if args.crop_mode == 'list': + # Unpack sequence of (image filename, windows). + images_windows = [ + (ix, inputs.iloc[np.where(inputs.index == ix)][COORD_COLS].values) + for ix in inputs.index.unique() + ] + detections = detector.detect_windows(images_windows) + else: + detections = detector.detect_selective_search(inputs) + print("Processed {} windows in {:.3f} s.".format(len(detections), + time.time() - t)) + + # Collect into dataframe with labeled fields. + df = pd.DataFrame(detections) + df.set_index('filename', inplace=True) + df[COORD_COLS] = pd.DataFrame( + data=np.vstack(df['window']), index=df.index, columns=COORD_COLS) + del(df['window']) + + # Save results. + t = time.time() + if args.output_file.lower().endswith('csv'): + # csv + # Enumerate the class probabilities. + class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)] + df[class_cols] = pd.DataFrame( + data=np.vstack(df['feat']), index=df.index, columns=class_cols) + df.to_csv(args.output_file, cols=COORD_COLS + class_cols) + else: + # h5 + df.to_hdf(args.output_file, 'df', mode='w') + print("Saved to {} in {:.3f} s.".format(args.output_file, + time.time() - t)) + + +if __name__ == "__main__": + import sys + main(sys.argv) diff --git a/python/draw_net.py b/python/draw_net.py new file mode 100755 index 0000000..ec76a74 --- /dev/null +++ b/python/draw_net.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +""" +Draw a graph of the net architecture. +""" +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from google.protobuf import text_format + +import caffe +import caffe.draw +from caffe.proto import caffe_pb2 + + +def parse_args(): + """Parse input arguments + """ + + parser = ArgumentParser(description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter) + + parser.add_argument('input_net_proto_file', + help='Input network prototxt file') + parser.add_argument('output_image_file', + help='Output image file') + parser.add_argument('--rankdir', + help=('One of TB (top-bottom, i.e., vertical), ' + 'RL (right-left, i.e., horizontal), or another ' + 'valid dot option; see ' + 'http://www.graphviz.org/doc/info/' + 'attrs.html#k:rankdir'), + default='LR') + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + net = caffe_pb2.NetParameter() + text_format.Merge(open(args.input_net_proto_file).read(), net) + print('Drawing net to %s' % args.output_image_file) + caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir) + + +if __name__ == '__main__': + main() diff --git a/python/requirements.txt b/python/requirements.txt new file mode 100755 index 0000000..e7d89e6 --- /dev/null +++ b/python/requirements.txt @@ -0,0 +1,17 @@ +Cython>=0.19.2 +numpy>=1.7.1 +scipy>=0.13.2 +scikit-image>=0.9.3 +matplotlib>=1.3.1 +ipython>=3.0.0 +h5py>=2.2.0 +leveldb>=0.191 +networkx>=1.8.1 +nose>=1.3.0 +pandas>=0.12.0 +python-dateutil>=1.4,<2 +protobuf>=2.5.0 +python-gflags>=2.0 +pyyaml>=3.10 +Pillow>=2.3.0 +six>=1.1.0 \ No newline at end of file diff --git a/sparsity_caltech.py b/sparsity_caltech.py new file mode 100755 index 0000000..7425287 --- /dev/null +++ b/sparsity_caltech.py @@ -0,0 +1,54 @@ +import sys +sys.path.insert(0, './python/') +import caffe +import numpy as np +import pdb +#weights='/home/gitProject/Dynamic-Network-Surgery/models/lenet5/10_lenet_iter_28000.caffemodel' +weights='./models/caltech_caffenet/compressed_alexnet_caltech.caffemodel' +#weights='/home/gitProject/Dynamic-Network-Surgery/models/lenet5/caffe_lenet5_original.caffemodel' +#weights='/home/gitProject/Dynamic-Network-Surgery/models/lenet5/caffe_lenet5_sparse.caffemodel' +proto='./models/caltech_caffenet/train_val_caltech.prototxt' +net=caffe.Net(proto, weights, caffe.TEST) +total=0 +aa=0 +w_m=2 +b_m=3 + +a1=len(np.where(net.params['conv1'][b_m].data != 0)[0]) +a2=len(np.where(net.params['conv1'][w_m].data != 0)[0]) +a3=len(np.where(net.params['conv2'][w_m].data != 0)[0]) +a4=len(np.where(net.params['conv2'][b_m].data != 0)[0]) +a5=len(np.where(net.params['conv3'][w_m].data != 0)[0]) +a6=len(np.where(net.params['conv3'][b_m].data != 0)[0]) +a7=len(np.where(net.params['conv4'][w_m].data != 0)[0]) +a8=len(np.where(net.params['conv4'][b_m].data != 0)[0]) +a9=len(np.where(net.params['conv5'][w_m].data != 0)[0]) +a10=len(np.where(net.params['conv5'][b_m].data != 0)[0]) +a11=len(np.where(net.params['fc6'][b_m].data != 0)[0]) +a12=len(np.where(net.params['fc6'][w_m].data != 0)[0]) +a13=len(np.where(net.params['fc7'][w_m].data != 0)[0]) +a14=len(np.where(net.params['fc7'][b_m].data != 0)[0]) +a15=len(np.where(net.params['fc8*'][w_m].data != 0)[0]) +a16=len(np.where(net.params['fc8*'][b_m].data != 0)[0]) + +b1=net.params['conv1'][0].data.size+net.params['conv1'][1].data.size +b2=net.params['conv2'][0].data.size+net.params['conv2'][1].data.size +b3=net.params['conv3'][0].data.size+net.params['conv3'][1].data.size +b4=net.params['conv4'][0].data.size+net.params['conv4'][1].data.size +b5=net.params['conv5'][0].data.size+net.params['conv5'][1].data.size +b6=net.params['fc6'][0].data.size+net.params['fc6'][1].data.size +b7=net.params['fc7'][0].data.size+net.params['fc7'][1].data.size +b8=net.params['fc8*'][0].data.size+net.params['fc8*'][1].data.size + +aa = a1+a2+a3+a4+a5+a6+a7+a8+a9+a10+a11+a12+a13+a14+a15+a16 +total = b1+b2+b3+b4+b5+b6+b7+b8 + +print 'Compression rate :{}% ({}x)'.format(100.- aa*100./total,total*1./aa) +print 'conv1:{}%'.format((a1+a2)*100./b1) +print 'conv2:{}%'.format((a3+a4)*100./b2) +print 'conv3:{}%'.format((a5+a6)*100./b3) +print 'conv4:{}%'.format((a7+a8)*100./b4) +print 'conv5:{}%'.format((a9+a10)*100./b5) +print 'fc6:{}%'.format((a11+a12)*100./b6) +print 'fc7:{}%'.format((a13+a14)*100./b7) +print 'fc8*:{}%'.format((a15+a16)*100./b8) diff --git a/sparsity_lenet300100.py b/sparsity_lenet300100.py new file mode 100755 index 0000000..734cf6e --- /dev/null +++ b/sparsity_lenet300100.py @@ -0,0 +1,37 @@ +import sys +sys.path.insert(0, './python/') +import caffe +import numpy as np +import pdb +#weights='./models/lenet300100/caffe_lenet300100_original.caffemodel' +weights='./models/lenet300100/compressed_lenet300100.caffemodel' +#weights='/home/gitProject/Dynamic-Network-Surgery/models/lenet300100/caffe_lenet300100_sparse.caffemodel' +proto='./models/lenet300100/lenet_train_test.prototxt' +net=caffe.Net(proto, weights, caffe.TEST) +total=0 +aa=0 +# for each layer, a mask is applied to the original weights and bias. +# here, for net.params['ip1'], net.params['ip1'][0] is the weights, net.params['ip1'][1] is the bias, +# net.params['ip1'][2] is the mask for the weights, net.params['ip1'][3] is the mask for the bias. +# if one of the element value in the mask is 0, the corresponding element in network is pruned. +w_m=2 +b_m=3 + +a1=len(np.where(net.params['ip1'][b_m].data != 0)[0]) +a2=len(np.where(net.params['ip1'][w_m].data != 0)[0]) +a3=len(np.where(net.params['ip2'][w_m].data != 0)[0]) +a4=len(np.where(net.params['ip2'][b_m].data != 0)[0]) +a5=len(np.where(net.params['ip3'][b_m].data != 0)[0]) +a6=len(np.where(net.params['ip3'][w_m].data != 0)[0]) + +b1=net.params['ip1'][0].data.size+net.params['ip1'][1].data.size +b2=net.params['ip2'][0].data.size+net.params['ip2'][1].data.size +b3=net.params['ip3'][0].data.size+net.params['ip3'][1].data.size + +aa = a1+a2+a3+a4+a5+a6 +total = b1+b2+b3 + +print 'Compression rate :{}% ({}x)'.format(1- aa*1./total,total*1./aa) +print 'ip1:{}%'.format((a1+a2)*100./b1) +print 'ip2:{}%'.format((a3+a4)*100./b2) +print 'ip3:{}%'.format((a5+a6)*100./b3) diff --git a/sparsity_lenet5.py b/sparsity_lenet5.py new file mode 100755 index 0000000..14db544 --- /dev/null +++ b/sparsity_lenet5.py @@ -0,0 +1,35 @@ +import sys +sys.path.insert(0, './python/') +import caffe +import numpy as np +import pdb +weights='./models/lenet5/compressed_lenet5.caffemodel' +proto='./models/lenet5/lenet_train_test.prototxt' +net=caffe.Net(proto, weights, caffe.TEST) +total=0 +aa=0 +w_m=2 +b_m=3 + +a1=len(np.where(net.params['conv1'][b_m].data != 0)[0]) +a2=len(np.where(net.params['conv1'][w_m].data != 0)[0]) +a3=len(np.where(net.params['conv2'][w_m].data != 0)[0]) +a4=len(np.where(net.params['conv2'][b_m].data != 0)[0]) +a5=len(np.where(net.params['ip1'][b_m].data != 0)[0]) +a6=len(np.where(net.params['ip1'][w_m].data != 0)[0]) +a7=len(np.where(net.params['ip2'][w_m].data != 0)[0]) +a8=len(np.where(net.params['ip2'][b_m].data != 0)[0]) + +b1=net.params['conv1'][0].data.size+net.params['conv1'][1].data.size +b2=net.params['conv2'][0].data.size+net.params['conv2'][1].data.size +b3=net.params['ip1'][0].data.size+net.params['ip1'][1].data.size +b4=net.params['ip2'][0].data.size+net.params['ip2'][1].data.size + +aa = a1+a2+a3+a4+a5+a6+a7+a8 +total = b1+b2+b3+b4 + +print 'Compression rate :{}% ({}x)'.format(1- aa*1./total,total*1./aa) +print 'conv1:{}%'.format((a1+a2)*100./b1) +print 'conv2:{}%'.format((a3+a4)*100./b2) +print 'ip1:{}%'.format((a5+a6)*100./b3) +print 'ip2:{}%'.format((a7+a8)*100./b4) diff --git a/src/caffe/CMakeLists.txt b/src/caffe/CMakeLists.txt new file mode 100755 index 0000000..40e6c11 --- /dev/null +++ b/src/caffe/CMakeLists.txt @@ -0,0 +1,36 @@ +# generate protobuf sources +file(GLOB proto_files proto/*.proto) +caffe_protobuf_generate_cpp_py(${proto_gen_folder} proto_srcs proto_hdrs proto_python ${proto_files}) + +# include python files either to force generation +add_library(proto STATIC ${proto_hdrs} ${proto_srcs} ${proto_python}) +set(Caffe_LINKER_LIBS proto ${Caffe_LINKER_LIBS}) # note, crucial to prepend! +caffe_default_properties(proto) + +# --[ Caffe library + +# creates 'test_srcs', 'srcs', 'test_cuda', 'cuda' lists +caffe_pickup_caffe_sources(${PROJECT_SOURCE_DIR}) + +if(HAVE_CUDA) + caffe_cuda_compile(cuda_objs ${cuda}) + list(APPEND srcs ${cuda_objs} ${cuda}) +endif() + +add_library(caffe ${srcs}) +target_link_libraries(caffe proto ${Caffe_LINKER_LIBS}) +caffe_default_properties(caffe) + +# ---[ Tests + add_subdirectory(test) + +# ---[ Install +install(DIRECTORY ${Caffe_INCLUDE_DIR}/caffe DESTINATION include) +install(FILES ${proto_hdrs} DESTINATION include/caffe/proto) +install(TARGETS caffe proto EXPORT CaffeTargets DESTINATION lib) + +file(WRITE ${PROJECT_BINARY_DIR}/__init__.py) +list(APPEND proto_python ${PROJECT_BINARY_DIR}/__init__.py) +install(PROGRAMS ${proto_python} DESTINATION python/caffe/proto) + + diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp new file mode 100755 index 0000000..8450aa1 --- /dev/null +++ b/src/caffe/blob.cpp @@ -0,0 +1,530 @@ +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void Blob::Reshape(const int num, const int channels, const int height, + const int width) { + vector shape(4); + shape[0] = num; + shape[1] = channels; + shape[2] = height; + shape[3] = width; + Reshape(shape); +} + +template +void Blob::Reshape(const vector& shape) { + CHECK_LE(shape.size(), kMaxBlobAxes); + count_ = 1; + shape_.resize(shape.size()); + for (int i = 0; i < shape.size(); ++i) { + CHECK_GE(shape[i], 0); + CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; + count_ *= shape[i]; + shape_[i] = shape[i]; + } + if (count_ > capacity_) { + capacity_ = count_; + data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); + diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); + } +} + +template +void Blob::Reshape(const BlobShape& shape) { + CHECK_LE(shape.dim_size(), kMaxBlobAxes); + vector shape_vec(shape.dim_size()); + for (int i = 0; i < shape.dim_size(); ++i) { + shape_vec[i] = shape.dim(i); + } + Reshape(shape_vec); +} + +template +void Blob::ReshapeLike(const Blob& other) { + Reshape(other.shape()); +} + +template +Blob::Blob(const int num, const int channels, const int height, + const int width) + // capacity_ must be initialized before calling Reshape + : capacity_(0) { + Reshape(num, channels, height, width); +} + +template +Blob::Blob(const vector& shape) + // capacity_ must be initialized before calling Reshape + : capacity_(0) { + Reshape(shape); +} + +template +const Dtype* Blob::cpu_data() const { + CHECK(data_); + return (const Dtype*)data_->cpu_data(); +} + +template +void Blob::set_cpu_data(Dtype* data) { + CHECK(data); + data_->set_cpu_data(data); +} + +template +const Dtype* Blob::gpu_data() const { + CHECK(data_); + return (const Dtype*)data_->gpu_data(); +} + +template +const Dtype* Blob::cpu_diff() const { + CHECK(diff_); + return (const Dtype*)diff_->cpu_data(); +} + +template +const Dtype* Blob::gpu_diff() const { + CHECK(diff_); + return (const Dtype*)diff_->gpu_data(); +} + +template +Dtype* Blob::mutable_cpu_data() { + CHECK(data_); + return static_cast(data_->mutable_cpu_data()); +} + +template +Dtype* Blob::mutable_gpu_data() { + CHECK(data_); + return static_cast(data_->mutable_gpu_data()); +} + +template +Dtype* Blob::mutable_cpu_diff() { + CHECK(diff_); + return static_cast(diff_->mutable_cpu_data()); +} + +template +Dtype* Blob::mutable_gpu_diff() { + CHECK(diff_); + return static_cast(diff_->mutable_gpu_data()); +} + +template +void Blob::ShareData(const Blob& other) { + CHECK_EQ(count_, other.count()); + data_ = other.data(); +} + +template +void Blob::ShareDiff(const Blob& other) { + CHECK_EQ(count_, other.count()); + diff_ = other.diff(); +} + +// The "update" method is used for parameter blobs in a Net, which are stored +// as Blob or Blob -- hence we do not define it for +// Blob or Blob. +template <> void Blob::Update() { NOT_IMPLEMENTED; } +template <> void Blob::Update() { NOT_IMPLEMENTED; } + +template +void Blob::Update() { + // We will perform update based on where the data is located. + switch (data_->head()) { + case SyncedMemory::HEAD_AT_CPU: + // perform computation on CPU + caffe_axpy(count_, Dtype(-1), + static_cast(diff_->cpu_data()), + static_cast(data_->mutable_cpu_data())); + break; + case SyncedMemory::HEAD_AT_GPU: + case SyncedMemory::SYNCED: +#ifndef CPU_ONLY + // perform computation on GPU + caffe_gpu_axpy(count_, Dtype(-1), + static_cast(diff_->gpu_data()), + static_cast(data_->mutable_gpu_data())); +#else + NO_GPU; +#endif + break; + default: + LOG(FATAL) << "Syncedmem not initialized."; + } +} + +template <> unsigned int Blob::asum_data() const { + NOT_IMPLEMENTED; + return 0; +} + +template <> int Blob::asum_data() const { + NOT_IMPLEMENTED; + return 0; +} + +template +Dtype Blob::asum_data() const { + if (!data_) { return 0; } + switch (data_->head()) { + case SyncedMemory::HEAD_AT_CPU: + return caffe_cpu_asum(count_, cpu_data()); + case SyncedMemory::HEAD_AT_GPU: + case SyncedMemory::SYNCED: +#ifndef CPU_ONLY + { + Dtype asum; + caffe_gpu_asum(count_, gpu_data(), &asum); + return asum; + } +#else + NO_GPU; +#endif + case SyncedMemory::UNINITIALIZED: + return 0; + default: + LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); + } + return 0; +} + +template <> unsigned int Blob::asum_diff() const { + NOT_IMPLEMENTED; + return 0; +} + +template <> int Blob::asum_diff() const { + NOT_IMPLEMENTED; + return 0; +} + +template +Dtype Blob::asum_diff() const { + if (!diff_) { return 0; } + switch (diff_->head()) { + case SyncedMemory::HEAD_AT_CPU: + return caffe_cpu_asum(count_, cpu_diff()); + case SyncedMemory::HEAD_AT_GPU: + case SyncedMemory::SYNCED: +#ifndef CPU_ONLY + { + Dtype asum; + caffe_gpu_asum(count_, gpu_diff(), &asum); + return asum; + } +#else + NO_GPU; +#endif + case SyncedMemory::UNINITIALIZED: + return 0; + default: + LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head(); + } + return 0; +} + +template <> unsigned int Blob::sumsq_data() const { + NOT_IMPLEMENTED; + return 0; +} + +template <> int Blob::sumsq_data() const { + NOT_IMPLEMENTED; + return 0; +} + +template +Dtype Blob::sumsq_data() const { + Dtype sumsq; + const Dtype* data; + if (!data_) { return 0; } + switch (data_->head()) { + case SyncedMemory::HEAD_AT_CPU: + data = cpu_data(); + sumsq = caffe_cpu_dot(count_, data, data); + break; + case SyncedMemory::HEAD_AT_GPU: + case SyncedMemory::SYNCED: +#ifndef CPU_ONLY + data = gpu_data(); + caffe_gpu_dot(count_, data, data, &sumsq); +#else + NO_GPU; +#endif + break; + case SyncedMemory::UNINITIALIZED: + return 0; + default: + LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); + } + return sumsq; +} + +template <> unsigned int Blob::sumsq_diff() const { + NOT_IMPLEMENTED; + return 0; +} + +template <> int Blob::sumsq_diff() const { + NOT_IMPLEMENTED; + return 0; +} + +template +Dtype Blob::sumsq_diff() const { + Dtype sumsq; + const Dtype* diff; + if (!diff_) { return 0; } + switch (diff_->head()) { + case SyncedMemory::HEAD_AT_CPU: + diff = cpu_diff(); + sumsq = caffe_cpu_dot(count_, diff, diff); + break; + case SyncedMemory::HEAD_AT_GPU: + case SyncedMemory::SYNCED: +#ifndef CPU_ONLY + diff = gpu_diff(); + caffe_gpu_dot(count_, diff, diff, &sumsq); + break; +#else + NO_GPU; +#endif + case SyncedMemory::UNINITIALIZED: + return 0; + default: + LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); + } + return sumsq; +} + +template <> void Blob::scale_data(unsigned int scale_factor) { + NOT_IMPLEMENTED; +} + +template <> void Blob::scale_data(int scale_factor) { + NOT_IMPLEMENTED; +} + +template +void Blob::scale_data(Dtype scale_factor) { + Dtype* data; + if (!data_) { return; } + switch (data_->head()) { + case SyncedMemory::HEAD_AT_CPU: + data = mutable_cpu_data(); + caffe_scal(count_, scale_factor, data); + return; + case SyncedMemory::HEAD_AT_GPU: + case SyncedMemory::SYNCED: +#ifndef CPU_ONLY + data = mutable_gpu_data(); + caffe_gpu_scal(count_, scale_factor, data); + return; +#else + NO_GPU; +#endif + case SyncedMemory::UNINITIALIZED: + return; + default: + LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); + } +} + +template <> void Blob::scale_diff(unsigned int scale_factor) { + NOT_IMPLEMENTED; +} + +template <> void Blob::scale_diff(int scale_factor) { + NOT_IMPLEMENTED; +} + +template +void Blob::scale_diff(Dtype scale_factor) { + Dtype* diff; + if (!diff_) { return; } + switch (diff_->head()) { + case SyncedMemory::HEAD_AT_CPU: + diff = mutable_cpu_diff(); + caffe_scal(count_, scale_factor, diff); + return; + case SyncedMemory::HEAD_AT_GPU: + case SyncedMemory::SYNCED: +#ifndef CPU_ONLY + diff = mutable_gpu_diff(); + caffe_gpu_scal(count_, scale_factor, diff); + return; +#else + NO_GPU; +#endif + case SyncedMemory::UNINITIALIZED: + return; + default: + LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head(); + } +} + +template +bool Blob::ShapeEquals(const BlobProto& other) { + if (other.has_num() || other.has_channels() || + other.has_height() || other.has_width()) { + // Using deprecated 4D Blob dimensions -- + // shape is (num, channels, height, width). + // Note: we do not use the normal Blob::num(), Blob::channels(), etc. + // methods as these index from the beginning of the blob shape, where legacy + // parameter blobs were indexed from the end of the blob shape (e.g., bias + // Blob shape (1 x 1 x 1 x N), IP layer weight Blob shape (1 x 1 x M x N)). + return shape_.size() <= 4 && + LegacyShape(-4) == other.num() && + LegacyShape(-3) == other.channels() && + LegacyShape(-2) == other.height() && + LegacyShape(-1) == other.width(); + } + vector other_shape(other.shape().dim_size()); + for (int i = 0; i < other.shape().dim_size(); ++i) { + other_shape[i] = other.shape().dim(i); + } + return shape_ == other_shape; +} + +template +void Blob::CopyFrom(const Blob& source, bool copy_diff, bool reshape) { + if (source.count() != count_ || source.shape() != shape_) { + if (reshape) { + ReshapeLike(source); + } else { + LOG(FATAL) << "Trying to copy blobs of different sizes."; + } + } + switch (Caffe::mode()) { + case Caffe::GPU: + if (copy_diff) { + caffe_copy(count_, source.gpu_diff(), + static_cast(diff_->mutable_gpu_data())); + } else { + caffe_copy(count_, source.gpu_data(), + static_cast(data_->mutable_gpu_data())); + } + break; + case Caffe::CPU: + if (copy_diff) { + caffe_copy(count_, source.cpu_diff(), + static_cast(diff_->mutable_cpu_data())); + } else { + caffe_copy(count_, source.cpu_data(), + static_cast(data_->mutable_cpu_data())); + } + break; + default: + LOG(FATAL) << "Unknown caffe mode."; + } +} + +template +void Blob::FromProto(const BlobProto& proto, bool reshape) { + if (reshape) { + vector shape; + if (proto.has_num() || proto.has_channels() || + proto.has_height() || proto.has_width()) { + // Using deprecated 4D Blob dimensions -- + // shape is (num, channels, height, width). + shape.resize(4); + shape[0] = proto.num(); + shape[1] = proto.channels(); + shape[2] = proto.height(); + shape[3] = proto.width(); + } else { + shape.resize(proto.shape().dim_size()); + for (int i = 0; i < proto.shape().dim_size(); ++i) { + shape[i] = proto.shape().dim(i); + } + } + Reshape(shape); + } else { + CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)"; + } + // copy data + Dtype* data_vec = mutable_cpu_data(); + if (proto.double_data_size() > 0) { + CHECK_EQ(count_, proto.double_data_size()); + for (int i = 0; i < count_; ++i) { + data_vec[i] = proto.double_data(i); + } + } else { + CHECK_EQ(count_, proto.data_size()); + for (int i = 0; i < count_; ++i) { + data_vec[i] = proto.data(i); + } + } + if (proto.double_diff_size() > 0) { + CHECK_EQ(count_, proto.double_diff_size()); + Dtype* diff_vec = mutable_cpu_diff(); + for (int i = 0; i < count_; ++i) { + diff_vec[i] = proto.double_diff(i); + } + } else if (proto.diff_size() > 0) { + CHECK_EQ(count_, proto.diff_size()); + Dtype* diff_vec = mutable_cpu_diff(); + for (int i = 0; i < count_; ++i) { + diff_vec[i] = proto.diff(i); + } + } +} + +template <> +void Blob::ToProto(BlobProto* proto, bool write_diff) const { + proto->clear_shape(); + for (int i = 0; i < shape_.size(); ++i) { + proto->mutable_shape()->add_dim(shape_[i]); + } + proto->clear_double_data(); + proto->clear_double_diff(); + const double* data_vec = cpu_data(); + for (int i = 0; i < count_; ++i) { + proto->add_double_data(data_vec[i]); + } + if (write_diff) { + const double* diff_vec = cpu_diff(); + for (int i = 0; i < count_; ++i) { + proto->add_double_diff(diff_vec[i]); + } + } +} + +template <> +void Blob::ToProto(BlobProto* proto, bool write_diff) const { + proto->clear_shape(); + for (int i = 0; i < shape_.size(); ++i) { + proto->mutable_shape()->add_dim(shape_[i]); + } + proto->clear_data(); + proto->clear_diff(); + const float* data_vec = cpu_data(); + for (int i = 0; i < count_; ++i) { + proto->add_data(data_vec[i]); + } + if (write_diff) { + const float* diff_vec = cpu_diff(); + for (int i = 0; i < count_; ++i) { + proto->add_diff(diff_vec[i]); + } + } +} + +INSTANTIATE_CLASS(Blob); +template class Blob; +template class Blob; + +} // namespace caffe + diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp new file mode 100755 index 0000000..7077f37 --- /dev/null +++ b/src/caffe/common.cpp @@ -0,0 +1,281 @@ +#include +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/util/rng.hpp" + +namespace caffe { + +// Make sure each thread can have different values. +static boost::thread_specific_ptr thread_instance_; + +Caffe& Caffe::Get() { + if (!thread_instance_.get()) { + thread_instance_.reset(new Caffe()); + } + return *(thread_instance_.get()); +} + +// random seeding +int64_t cluster_seedgen(void) { + int64_t s, seed, pid; + FILE* f = fopen("/dev/urandom", "rb"); + if (f && fread(&seed, 1, sizeof(seed), f) == sizeof(seed)) { + fclose(f); + return seed; + } + + LOG(INFO) << "System entropy source not available, " + "using fallback algorithm to generate seed instead."; + if (f) + fclose(f); + + pid = getpid(); + s = time(NULL); + seed = abs(((s * 181) * ((pid - 83) * 359)) % 104729); + return seed; +} + + +void GlobalInit(int* pargc, char*** pargv) { + // Google flags. + ::gflags::ParseCommandLineFlags(pargc, pargv, true); + // Google logging. + ::google::InitGoogleLogging(*(pargv)[0]); + // Provide a backtrace on segfault. + ::google::InstallFailureSignalHandler(); +} + +#ifdef CPU_ONLY // CPU-only Caffe. + +Caffe::Caffe() + : random_generator_(), mode_(Caffe::CPU), + solver_count_(1), root_solver_(true) { } + +Caffe::~Caffe() { } + +void Caffe::set_random_seed(const unsigned int seed) { + // RNG seed + Get().random_generator_.reset(new RNG(seed)); +} + +void Caffe::SetDevice(const int device_id) { + NO_GPU; +} + +void Caffe::DeviceQuery() { + NO_GPU; +} + + +class Caffe::RNG::Generator { + public: + Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {} + explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {} + caffe::rng_t* rng() { return rng_.get(); } + private: + shared_ptr rng_; +}; + +Caffe::RNG::RNG() : generator_(new Generator()) { } + +Caffe::RNG::RNG(unsigned int seed) : generator_(new Generator(seed)) { } + +Caffe::RNG& Caffe::RNG::operator=(const RNG& other) { + generator_ = other.generator_; + return *this; +} + +void* Caffe::RNG::generator() { + return static_cast(generator_->rng()); +} + +#else // Normal GPU + CPU Caffe. + +Caffe::Caffe() + : cublas_handle_(NULL), curand_generator_(NULL), random_generator_(), + mode_(Caffe::CPU), solver_count_(1), root_solver_(true) { + // Try to create a cublas handler, and report an error if failed (but we will + // keep the program running as one might just want to run CPU code). + if (cublasCreate(&cublas_handle_) != CUBLAS_STATUS_SUCCESS) { + LOG(ERROR) << "Cannot create Cublas handle. Cublas won't be available."; + } + // Try to create a curand handler. + if (curandCreateGenerator(&curand_generator_, CURAND_RNG_PSEUDO_DEFAULT) + != CURAND_STATUS_SUCCESS || + curandSetPseudoRandomGeneratorSeed(curand_generator_, cluster_seedgen()) + != CURAND_STATUS_SUCCESS) { + LOG(ERROR) << "Cannot create Curand generator. Curand won't be available."; + } +} + +Caffe::~Caffe() { + if (cublas_handle_) CUBLAS_CHECK(cublasDestroy(cublas_handle_)); + if (curand_generator_) { + CURAND_CHECK(curandDestroyGenerator(curand_generator_)); + } +} + +void Caffe::set_random_seed(const unsigned int seed) { + // Curand seed + static bool g_curand_availability_logged = false; + if (Get().curand_generator_) { + CURAND_CHECK(curandSetPseudoRandomGeneratorSeed(curand_generator(), + seed)); + CURAND_CHECK(curandSetGeneratorOffset(curand_generator(), 0)); + } else { + if (!g_curand_availability_logged) { + LOG(ERROR) << + "Curand not available. Skipping setting the curand seed."; + g_curand_availability_logged = true; + } + } + // RNG seed + Get().random_generator_.reset(new RNG(seed)); +} + +void Caffe::SetDevice(const int device_id) { + int current_device; + CUDA_CHECK(cudaGetDevice(¤t_device)); + if (current_device == device_id) { + return; + } + // The call to cudaSetDevice must come before any calls to Get, which + // may perform initialization using the GPU. + CUDA_CHECK(cudaSetDevice(device_id)); + if (Get().cublas_handle_) CUBLAS_CHECK(cublasDestroy(Get().cublas_handle_)); + if (Get().curand_generator_) { + CURAND_CHECK(curandDestroyGenerator(Get().curand_generator_)); + } + CUBLAS_CHECK(cublasCreate(&Get().cublas_handle_)); + CURAND_CHECK(curandCreateGenerator(&Get().curand_generator_, + CURAND_RNG_PSEUDO_DEFAULT)); + CURAND_CHECK(curandSetPseudoRandomGeneratorSeed(Get().curand_generator_, + cluster_seedgen())); +} + +void Caffe::DeviceQuery() { + cudaDeviceProp prop; + int device; + if (cudaSuccess != cudaGetDevice(&device)) { + printf("No cuda device present.\n"); + return; + } + CUDA_CHECK(cudaGetDeviceProperties(&prop, device)); + LOG(INFO) << "Device id: " << device; + LOG(INFO) << "Major revision number: " << prop.major; + LOG(INFO) << "Minor revision number: " << prop.minor; + LOG(INFO) << "Name: " << prop.name; + LOG(INFO) << "Total global memory: " << prop.totalGlobalMem; + LOG(INFO) << "Total shared memory per block: " << prop.sharedMemPerBlock; + LOG(INFO) << "Total registers per block: " << prop.regsPerBlock; + LOG(INFO) << "Warp size: " << prop.warpSize; + LOG(INFO) << "Maximum memory pitch: " << prop.memPitch; + LOG(INFO) << "Maximum threads per block: " << prop.maxThreadsPerBlock; + LOG(INFO) << "Maximum dimension of block: " + << prop.maxThreadsDim[0] << ", " << prop.maxThreadsDim[1] << ", " + << prop.maxThreadsDim[2]; + LOG(INFO) << "Maximum dimension of grid: " + << prop.maxGridSize[0] << ", " << prop.maxGridSize[1] << ", " + << prop.maxGridSize[2]; + LOG(INFO) << "Clock rate: " << prop.clockRate; + LOG(INFO) << "Total constant memory: " << prop.totalConstMem; + LOG(INFO) << "Texture alignment: " << prop.textureAlignment; + LOG(INFO) << "Concurrent copy and execution: " + << (prop.deviceOverlap ? "Yes" : "No"); + LOG(INFO) << "Number of multiprocessors: " << prop.multiProcessorCount; + LOG(INFO) << "Kernel execution timeout: " + << (prop.kernelExecTimeoutEnabled ? "Yes" : "No"); + return; +} + + +class Caffe::RNG::Generator { + public: + Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {} + explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {} + caffe::rng_t* rng() { return rng_.get(); } + private: + shared_ptr rng_; +}; + +Caffe::RNG::RNG() : generator_(new Generator()) { } + +Caffe::RNG::RNG(unsigned int seed) : generator_(new Generator(seed)) { } + +Caffe::RNG& Caffe::RNG::operator=(const RNG& other) { + generator_.reset(other.generator_.get()); + return *this; +} + +void* Caffe::RNG::generator() { + return static_cast(generator_->rng()); +} + +const char* cublasGetErrorString(cublasStatus_t error) { + switch (error) { + case CUBLAS_STATUS_SUCCESS: + return "CUBLAS_STATUS_SUCCESS"; + case CUBLAS_STATUS_NOT_INITIALIZED: + return "CUBLAS_STATUS_NOT_INITIALIZED"; + case CUBLAS_STATUS_ALLOC_FAILED: + return "CUBLAS_STATUS_ALLOC_FAILED"; + case CUBLAS_STATUS_INVALID_VALUE: + return "CUBLAS_STATUS_INVALID_VALUE"; + case CUBLAS_STATUS_ARCH_MISMATCH: + return "CUBLAS_STATUS_ARCH_MISMATCH"; + case CUBLAS_STATUS_MAPPING_ERROR: + return "CUBLAS_STATUS_MAPPING_ERROR"; + case CUBLAS_STATUS_EXECUTION_FAILED: + return "CUBLAS_STATUS_EXECUTION_FAILED"; + case CUBLAS_STATUS_INTERNAL_ERROR: + return "CUBLAS_STATUS_INTERNAL_ERROR"; +#if CUDA_VERSION >= 6000 + case CUBLAS_STATUS_NOT_SUPPORTED: + return "CUBLAS_STATUS_NOT_SUPPORTED"; +#endif +#if CUDA_VERSION >= 6050 + case CUBLAS_STATUS_LICENSE_ERROR: + return "CUBLAS_STATUS_LICENSE_ERROR"; +#endif + } + return "Unknown cublas status"; +} + +const char* curandGetErrorString(curandStatus_t error) { + switch (error) { + case CURAND_STATUS_SUCCESS: + return "CURAND_STATUS_SUCCESS"; + case CURAND_STATUS_VERSION_MISMATCH: + return "CURAND_STATUS_VERSION_MISMATCH"; + case CURAND_STATUS_NOT_INITIALIZED: + return "CURAND_STATUS_NOT_INITIALIZED"; + case CURAND_STATUS_ALLOCATION_FAILED: + return "CURAND_STATUS_ALLOCATION_FAILED"; + case CURAND_STATUS_TYPE_ERROR: + return "CURAND_STATUS_TYPE_ERROR"; + case CURAND_STATUS_OUT_OF_RANGE: + return "CURAND_STATUS_OUT_OF_RANGE"; + case CURAND_STATUS_LENGTH_NOT_MULTIPLE: + return "CURAND_STATUS_LENGTH_NOT_MULTIPLE"; + case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED: + return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED"; + case CURAND_STATUS_LAUNCH_FAILURE: + return "CURAND_STATUS_LAUNCH_FAILURE"; + case CURAND_STATUS_PREEXISTING_FAILURE: + return "CURAND_STATUS_PREEXISTING_FAILURE"; + case CURAND_STATUS_INITIALIZATION_FAILED: + return "CURAND_STATUS_INITIALIZATION_FAILED"; + case CURAND_STATUS_ARCH_MISMATCH: + return "CURAND_STATUS_ARCH_MISMATCH"; + case CURAND_STATUS_INTERNAL_ERROR: + return "CURAND_STATUS_INTERNAL_ERROR"; + } + return "Unknown curand status"; +} + +#endif // CPU_ONLY + +} // namespace caffe diff --git a/src/caffe/data_reader.cpp b/src/caffe/data_reader.cpp new file mode 100755 index 0000000..1637820 --- /dev/null +++ b/src/caffe/data_reader.cpp @@ -0,0 +1,119 @@ +#include +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/data_layers.hpp" +#include "caffe/data_reader.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +using boost::weak_ptr; + +map > DataReader::bodies_; +static boost::mutex bodies_mutex_; + +DataReader::DataReader(const LayerParameter& param) + : queue_pair_(new QueuePair( // + param.data_param().prefetch() * param.data_param().batch_size())) { + // Get or create a body + boost::mutex::scoped_lock lock(bodies_mutex_); + string key = source_key(param); + weak_ptr& weak = bodies_[key]; + body_ = weak.lock(); + if (!body_) { + body_.reset(new Body(param)); + bodies_[key] = weak_ptr(body_); + } + body_->new_queue_pairs_.push(queue_pair_); +} + +DataReader::~DataReader() { + string key = source_key(body_->param_); + body_.reset(); + boost::mutex::scoped_lock lock(bodies_mutex_); + if (bodies_[key].expired()) { + bodies_.erase(key); + } +} + +// + +DataReader::QueuePair::QueuePair(int size) { + // Initialize the free queue with requested number of datums + for (int i = 0; i < size; ++i) { + free_.push(new Datum()); + } +} + +DataReader::QueuePair::~QueuePair() { + Datum* datum; + while (free_.try_pop(&datum)) { + delete datum; + } + while (full_.try_pop(&datum)) { + delete datum; + } +} + +// + +DataReader::Body::Body(const LayerParameter& param) + : param_(param), + new_queue_pairs_() { + StartInternalThread(); +} + +DataReader::Body::~Body() { + StopInternalThread(); +} + +void DataReader::Body::InternalThreadEntry() { + shared_ptr db(db::GetDB(param_.data_param().backend())); + db->Open(param_.data_param().source(), db::READ); + shared_ptr cursor(db->NewCursor()); + vector > qps; + try { + int solver_count = param_.phase() == TRAIN ? Caffe::solver_count() : 1; + + // To ensure deterministic runs, only start running once all solvers + // are ready. But solvers need to peek on one item during initialization, + // so read one item, then wait for the next solver. + for (int i = 0; i < solver_count; ++i) { + shared_ptr qp(new_queue_pairs_.pop()); + read_one(cursor.get(), qp.get()); + qps.push_back(qp); + } + // Main loop + while (!must_stop()) { + for (int i = 0; i < solver_count; ++i) { + read_one(cursor.get(), qps[i].get()); + } + // Check no additional readers have been created. This can happen if + // more than one net is trained at a time per process, whether single + // or multi solver. It might also happen if two data layers have same + // name and same source. + CHECK_EQ(new_queue_pairs_.size(), 0); + } + } catch (boost::thread_interrupted&) { + // Interrupted exception is expected on shutdown + } +} + +void DataReader::Body::read_one(db::Cursor* cursor, QueuePair* qp) { + Datum* datum = qp->free_.pop(); + // TODO deserialize in-place instead of copy? + datum->ParseFromString(cursor->value()); + qp->full_.push(datum); + + // go to the next iter + cursor->Next(); + if (!cursor->valid()) { + DLOG(INFO) << "Restarting data prefetching from start."; + cursor->SeekToFirst(); + } +} + +} // namespace caffe diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp new file mode 100755 index 0000000..4666d9b --- /dev/null +++ b/src/caffe/data_transformer.cpp @@ -0,0 +1,531 @@ +#include + +#include +#include + +#include "caffe/data_transformer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/rng.hpp" + +namespace caffe { + +template +DataTransformer::DataTransformer(const TransformationParameter& param, + Phase phase) + : param_(param), phase_(phase) { + // check if we want to use mean_file + if (param_.has_mean_file()) { + CHECK_EQ(param_.mean_value_size(), 0) << + "Cannot specify mean_file and mean_value at the same time"; + const string& mean_file = param.mean_file(); + if (Caffe::root_solver()) { + LOG(INFO) << "Loading mean file from: " << mean_file; + } + BlobProto blob_proto; + ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); + data_mean_.FromProto(blob_proto); + } + // check if we want to use mean_value + if (param_.mean_value_size() > 0) { + CHECK(param_.has_mean_file() == false) << + "Cannot specify mean_file and mean_value at the same time"; + for (int c = 0; c < param_.mean_value_size(); ++c) { + mean_values_.push_back(param_.mean_value(c)); + } + } +} + +template +void DataTransformer::Transform(const Datum& datum, + Dtype* transformed_data) { + const string& data = datum.data(); + const int datum_channels = datum.channels(); + const int datum_height = datum.height(); + const int datum_width = datum.width(); + + const int crop_size = param_.crop_size(); + const Dtype scale = param_.scale(); + const bool do_mirror = param_.mirror() && Rand(2); + const bool has_mean_file = param_.has_mean_file(); + const bool has_uint8 = data.size() > 0; + const bool has_mean_values = mean_values_.size() > 0; + + CHECK_GT(datum_channels, 0); + CHECK_GE(datum_height, crop_size); + CHECK_GE(datum_width, crop_size); + + Dtype* mean = NULL; + if (has_mean_file) { + CHECK_EQ(datum_channels, data_mean_.channels()); + CHECK_EQ(datum_height, data_mean_.height()); + CHECK_EQ(datum_width, data_mean_.width()); + mean = data_mean_.mutable_cpu_data(); + } + if (has_mean_values) { + CHECK(mean_values_.size() == 1 || mean_values_.size() == datum_channels) << + "Specify either 1 mean_value or as many as channels: " << datum_channels; + if (datum_channels > 1 && mean_values_.size() == 1) { + // Replicate the mean_value for simplicity + for (int c = 1; c < datum_channels; ++c) { + mean_values_.push_back(mean_values_[0]); + } + } + } + + int height = datum_height; + int width = datum_width; + + int h_off = 0; + int w_off = 0; + if (crop_size) { + height = crop_size; + width = crop_size; + // We only do random crop when we do training. + if (phase_ == TRAIN) { + h_off = Rand(datum_height - crop_size + 1); + w_off = Rand(datum_width - crop_size + 1); + } else { + h_off = (datum_height - crop_size) / 2; + w_off = (datum_width - crop_size) / 2; + } + } + + Dtype datum_element; + int top_index, data_index; + for (int c = 0; c < datum_channels; ++c) { + for (int h = 0; h < height; ++h) { + for (int w = 0; w < width; ++w) { + data_index = (c * datum_height + h_off + h) * datum_width + w_off + w; + if (do_mirror) { + top_index = (c * height + h) * width + (width - 1 - w); + } else { + top_index = (c * height + h) * width + w; + } + if (has_uint8) { + datum_element = + static_cast(static_cast(data[data_index])); + } else { + datum_element = datum.float_data(data_index); + } + if (has_mean_file) { + transformed_data[top_index] = + (datum_element - mean[data_index]) * scale; + } else { + if (has_mean_values) { + transformed_data[top_index] = + (datum_element - mean_values_[c]) * scale; + } else { + transformed_data[top_index] = datum_element * scale; + } + } + } + } + } +} + +template +void DataTransformer::Transform(const Datum& datum, + Blob* transformed_blob) { + // If datum is encoded, decoded and transform the cv::image. + if (datum.encoded()) { + CHECK(!(param_.force_color() && param_.force_gray())) + << "cannot set both force_color and force_gray"; + cv::Mat cv_img; + if (param_.force_color() || param_.force_gray()) { + // If force_color then decode in color otherwise decode in gray. + cv_img = DecodeDatumToCVMat(datum, param_.force_color()); + } else { + cv_img = DecodeDatumToCVMatNative(datum); + } + // Transform the cv::image into blob. + return Transform(cv_img, transformed_blob); + } else { + if (param_.force_color() || param_.force_gray()) { + LOG(ERROR) << "force_color and force_gray only for encoded datum"; + } + } + + const int crop_size = param_.crop_size(); + const int datum_channels = datum.channels(); + const int datum_height = datum.height(); + const int datum_width = datum.width(); + + // Check dimensions. + const int channels = transformed_blob->channels(); + const int height = transformed_blob->height(); + const int width = transformed_blob->width(); + const int num = transformed_blob->num(); + + CHECK_EQ(channels, datum_channels); + CHECK_LE(height, datum_height); + CHECK_LE(width, datum_width); + CHECK_GE(num, 1); + + if (crop_size) { + CHECK_EQ(crop_size, height); + CHECK_EQ(crop_size, width); + } else { + CHECK_EQ(datum_height, height); + CHECK_EQ(datum_width, width); + } + + Dtype* transformed_data = transformed_blob->mutable_cpu_data(); + Transform(datum, transformed_data); +} + +template +void DataTransformer::Transform(const vector & datum_vector, + Blob* transformed_blob) { + const int datum_num = datum_vector.size(); + const int num = transformed_blob->num(); + const int channels = transformed_blob->channels(); + const int height = transformed_blob->height(); + const int width = transformed_blob->width(); + + CHECK_GT(datum_num, 0) << "There is no datum to add"; + CHECK_LE(datum_num, num) << + "The size of datum_vector must be no greater than transformed_blob->num()"; + Blob uni_blob(1, channels, height, width); + for (int item_id = 0; item_id < datum_num; ++item_id) { + int offset = transformed_blob->offset(item_id); + uni_blob.set_cpu_data(transformed_blob->mutable_cpu_data() + offset); + Transform(datum_vector[item_id], &uni_blob); + } +} + +template +void DataTransformer::Transform(const vector & mat_vector, + Blob* transformed_blob) { + const int mat_num = mat_vector.size(); + const int num = transformed_blob->num(); + const int channels = transformed_blob->channels(); + const int height = transformed_blob->height(); + const int width = transformed_blob->width(); + + CHECK_GT(mat_num, 0) << "There is no MAT to add"; + CHECK_EQ(mat_num, num) << + "The size of mat_vector must be equals to transformed_blob->num()"; + Blob uni_blob(1, channels, height, width); + for (int item_id = 0; item_id < mat_num; ++item_id) { + int offset = transformed_blob->offset(item_id); + uni_blob.set_cpu_data(transformed_blob->mutable_cpu_data() + offset); + Transform(mat_vector[item_id], &uni_blob); + } +} + +template +void DataTransformer::Transform(const cv::Mat& cv_img, + Blob* transformed_blob) { + const int crop_size = param_.crop_size(); + const int img_channels = cv_img.channels(); + const int img_height = cv_img.rows; + const int img_width = cv_img.cols; + + // Check dimensions. + const int channels = transformed_blob->channels(); + const int height = transformed_blob->height(); + const int width = transformed_blob->width(); + const int num = transformed_blob->num(); + + CHECK_EQ(channels, img_channels); + CHECK_LE(height, img_height); + CHECK_LE(width, img_width); + CHECK_GE(num, 1); + + CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; + + const Dtype scale = param_.scale(); + const bool do_mirror = param_.mirror() && Rand(2); + const bool has_mean_file = param_.has_mean_file(); + const bool has_mean_values = mean_values_.size() > 0; + + CHECK_GT(img_channels, 0); + CHECK_GE(img_height, crop_size); + CHECK_GE(img_width, crop_size); + + Dtype* mean = NULL; + if (has_mean_file) { + CHECK_EQ(img_channels, data_mean_.channels()); + CHECK_EQ(img_height, data_mean_.height()); + CHECK_EQ(img_width, data_mean_.width()); + mean = data_mean_.mutable_cpu_data(); + } + if (has_mean_values) { + CHECK(mean_values_.size() == 1 || mean_values_.size() == img_channels) << + "Specify either 1 mean_value or as many as channels: " << img_channels; + if (img_channels > 1 && mean_values_.size() == 1) { + // Replicate the mean_value for simplicity + for (int c = 1; c < img_channels; ++c) { + mean_values_.push_back(mean_values_[0]); + } + } + } + + int h_off = 0; + int w_off = 0; + cv::Mat cv_cropped_img = cv_img; + if (crop_size) { + CHECK_EQ(crop_size, height); + CHECK_EQ(crop_size, width); + // We only do random crop when we do training. + if (phase_ == TRAIN) { + h_off = Rand(img_height - crop_size + 1); + w_off = Rand(img_width - crop_size + 1); + } else { + h_off = (img_height - crop_size) / 2; + w_off = (img_width - crop_size) / 2; + } + cv::Rect roi(w_off, h_off, crop_size, crop_size); + cv_cropped_img = cv_img(roi); + } else { + CHECK_EQ(img_height, height); + CHECK_EQ(img_width, width); + } + + CHECK(cv_cropped_img.data); + + Dtype* transformed_data = transformed_blob->mutable_cpu_data(); + int top_index; + for (int h = 0; h < height; ++h) { + const uchar* ptr = cv_cropped_img.ptr(h); + int img_index = 0; + for (int w = 0; w < width; ++w) { + for (int c = 0; c < img_channels; ++c) { + if (do_mirror) { + top_index = (c * height + h) * width + (width - 1 - w); + } else { + top_index = (c * height + h) * width + w; + } + // int top_index = (c * height + h) * width + w; + Dtype pixel = static_cast(ptr[img_index++]); + if (has_mean_file) { + int mean_index = (c * img_height + h_off + h) * img_width + w_off + w; + transformed_data[top_index] = + (pixel - mean[mean_index]) * scale; + } else { + if (has_mean_values) { + transformed_data[top_index] = + (pixel - mean_values_[c]) * scale; + } else { + transformed_data[top_index] = pixel * scale; + } + } + } + } + } +} + +template +void DataTransformer::Transform(Blob* input_blob, + Blob* transformed_blob) { + const int crop_size = param_.crop_size(); + const int input_num = input_blob->num(); + const int input_channels = input_blob->channels(); + const int input_height = input_blob->height(); + const int input_width = input_blob->width(); + + if (transformed_blob->count() == 0) { + // Initialize transformed_blob with the right shape. + if (crop_size) { + transformed_blob->Reshape(input_num, input_channels, + crop_size, crop_size); + } else { + transformed_blob->Reshape(input_num, input_channels, + input_height, input_width); + } + } + + const int num = transformed_blob->num(); + const int channels = transformed_blob->channels(); + const int height = transformed_blob->height(); + const int width = transformed_blob->width(); + const int size = transformed_blob->count(); + + CHECK_LE(input_num, num); + CHECK_EQ(input_channels, channels); + CHECK_GE(input_height, height); + CHECK_GE(input_width, width); + + + const Dtype scale = param_.scale(); + const bool do_mirror = param_.mirror() && Rand(2); + const bool has_mean_file = param_.has_mean_file(); + const bool has_mean_values = mean_values_.size() > 0; + + int h_off = 0; + int w_off = 0; + if (crop_size) { + CHECK_EQ(crop_size, height); + CHECK_EQ(crop_size, width); + // We only do random crop when we do training. + if (phase_ == TRAIN) { + h_off = Rand(input_height - crop_size + 1); + w_off = Rand(input_width - crop_size + 1); + } else { + h_off = (input_height - crop_size) / 2; + w_off = (input_width - crop_size) / 2; + } + } else { + CHECK_EQ(input_height, height); + CHECK_EQ(input_width, width); + } + + Dtype* input_data = input_blob->mutable_cpu_data(); + if (has_mean_file) { + CHECK_EQ(input_channels, data_mean_.channels()); + CHECK_EQ(input_height, data_mean_.height()); + CHECK_EQ(input_width, data_mean_.width()); + for (int n = 0; n < input_num; ++n) { + int offset = input_blob->offset(n); + caffe_sub(data_mean_.count(), input_data + offset, + data_mean_.cpu_data(), input_data + offset); + } + } + + if (has_mean_values) { + CHECK(mean_values_.size() == 1 || mean_values_.size() == input_channels) << + "Specify either 1 mean_value or as many as channels: " << input_channels; + if (mean_values_.size() == 1) { + caffe_add_scalar(input_blob->count(), -(mean_values_[0]), input_data); + } else { + for (int n = 0; n < input_num; ++n) { + for (int c = 0; c < input_channels; ++c) { + int offset = input_blob->offset(n, c); + caffe_add_scalar(input_height * input_width, -(mean_values_[c]), + input_data + offset); + } + } + } + } + + Dtype* transformed_data = transformed_blob->mutable_cpu_data(); + + for (int n = 0; n < input_num; ++n) { + int top_index_n = n * channels; + int data_index_n = n * channels; + for (int c = 0; c < channels; ++c) { + int top_index_c = (top_index_n + c) * height; + int data_index_c = (data_index_n + c) * input_height + h_off; + for (int h = 0; h < height; ++h) { + int top_index_h = (top_index_c + h) * width; + int data_index_h = (data_index_c + h) * input_width + w_off; + if (do_mirror) { + int top_index_w = top_index_h + width - 1; + for (int w = 0; w < width; ++w) { + transformed_data[top_index_w-w] = input_data[data_index_h + w]; + } + } else { + for (int w = 0; w < width; ++w) { + transformed_data[top_index_h + w] = input_data[data_index_h + w]; + } + } + } + } + } + if (scale != Dtype(1)) { + DLOG(INFO) << "Scale: " << scale; + caffe_scal(size, scale, transformed_data); + } +} + +template +vector DataTransformer::InferBlobShape(const Datum& datum) { + if (datum.encoded()) { + CHECK(!(param_.force_color() && param_.force_gray())) + << "cannot set both force_color and force_gray"; + cv::Mat cv_img; + if (param_.force_color() || param_.force_gray()) { + // If force_color then decode in color otherwise decode in gray. + cv_img = DecodeDatumToCVMat(datum, param_.force_color()); + } else { + cv_img = DecodeDatumToCVMatNative(datum); + } + // InferBlobShape using the cv::image. + return InferBlobShape(cv_img); + } + + const int crop_size = param_.crop_size(); + const int datum_channels = datum.channels(); + const int datum_height = datum.height(); + const int datum_width = datum.width(); + // Check dimensions. + CHECK_GT(datum_channels, 0); + CHECK_GE(datum_height, crop_size); + CHECK_GE(datum_width, crop_size); + // Build BlobShape. + vector shape(4); + shape[0] = 1; + shape[1] = datum_channels; + shape[2] = (crop_size)? crop_size: datum_height; + shape[3] = (crop_size)? crop_size: datum_width; + return shape; +} + +template +vector DataTransformer::InferBlobShape( + const vector & datum_vector) { + const int num = datum_vector.size(); + CHECK_GT(num, 0) << "There is no datum to in the vector"; + // Use first datum in the vector to InferBlobShape. + vector shape = InferBlobShape(datum_vector[0]); + // Adjust num to the size of the vector. + shape[0] = num; + return shape; +} + +template +vector DataTransformer::InferBlobShape(const cv::Mat& cv_img) { + const int crop_size = param_.crop_size(); + const int img_channels = cv_img.channels(); + const int img_height = cv_img.rows; + const int img_width = cv_img.cols; + // Check dimensions. + CHECK_GT(img_channels, 0); + CHECK_GE(img_height, crop_size); + CHECK_GE(img_width, crop_size); + // Build BlobShape. + vector shape(4); + shape[0] = 1; + shape[1] = img_channels; + shape[2] = (crop_size)? crop_size: img_height; + shape[3] = (crop_size)? crop_size: img_width; + return shape; +} + +template +vector DataTransformer::InferBlobShape( + const vector & mat_vector) { + const int num = mat_vector.size(); + CHECK_GT(num, 0) << "There is no cv_img to in the vector"; + // Use first cv_img in the vector to InferBlobShape. + vector shape = InferBlobShape(mat_vector[0]); + // Adjust num to the size of the vector. + shape[0] = num; + return shape; +} + +template +void DataTransformer::InitRand() { + const bool needs_rand = param_.mirror() || + (phase_ == TRAIN && param_.crop_size()); + if (needs_rand) { + const unsigned int rng_seed = caffe_rng_rand(); + rng_.reset(new Caffe::RNG(rng_seed)); + } else { + rng_.reset(); + } +} + +template +int DataTransformer::Rand(int n) { + CHECK(rng_); + CHECK_GT(n, 0); + caffe::rng_t* rng = + static_cast(rng_->generator()); + return ((*rng)() % n); +} + +INSTANTIATE_CLASS(DataTransformer); + +} // namespace caffe diff --git a/src/caffe/internal_thread.cpp b/src/caffe/internal_thread.cpp new file mode 100755 index 0000000..104884e --- /dev/null +++ b/src/caffe/internal_thread.cpp @@ -0,0 +1,66 @@ +#include +#include + +#include "caffe/internal_thread.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +InternalThread::~InternalThread() { + StopInternalThread(); +} + +bool InternalThread::is_started() const { + return thread_ && thread_->joinable(); +} + +bool InternalThread::must_stop() { + return thread_ && thread_->interruption_requested(); +} + +void InternalThread::StartInternalThread() { + CHECK(!is_started()) << "Threads should persist and not be restarted."; + + int device = 0; +#ifndef CPU_ONLY + CUDA_CHECK(cudaGetDevice(&device)); +#endif + Caffe::Brew mode = Caffe::mode(); + int rand_seed = caffe_rng_rand(); + int solver_count = Caffe::solver_count(); + bool root_solver = Caffe::root_solver(); + + try { + thread_.reset(new boost::thread(&InternalThread::entry, this, device, mode, + rand_seed, solver_count, root_solver)); + } catch (std::exception& e) { + LOG(FATAL) << "Thread exception: " << e.what(); + } +} + +void InternalThread::entry(int device, Caffe::Brew mode, int rand_seed, + int solver_count, bool root_solver) { +#ifndef CPU_ONLY + CUDA_CHECK(cudaSetDevice(device)); +#endif + Caffe::set_mode(mode); + Caffe::set_random_seed(rand_seed); + Caffe::set_solver_count(solver_count); + Caffe::set_root_solver(root_solver); + + InternalThreadEntry(); +} + +void InternalThread::StopInternalThread() { + if (is_started()) { + thread_->interrupt(); + try { + thread_->join(); + } catch (boost::thread_interrupted&) { + } catch (std::exception& e) { + LOG(FATAL) << "Thread exception: " << e.what(); + } + } +} + +} // namespace caffe diff --git a/src/caffe/layer.cpp b/src/caffe/layer.cpp new file mode 100755 index 0000000..3b91289 --- /dev/null +++ b/src/caffe/layer.cpp @@ -0,0 +1,27 @@ +#include +#include "caffe/layer.hpp" + +namespace caffe { + +template +void Layer::InitMutex() { + forward_mutex_.reset(new boost::mutex()); +} + +template +void Layer::Lock() { + if (IsShared()) { + forward_mutex_->lock(); + } +} + +template +void Layer::Unlock() { + if (IsShared()) { + forward_mutex_->unlock(); + } +} + +INSTANTIATE_CLASS(Layer); + +} // namespace caffe diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp new file mode 100755 index 0000000..24a1fe9 --- /dev/null +++ b/src/caffe/layer_factory.cpp @@ -0,0 +1,209 @@ +// Make sure we include Python.h before any system header +// to avoid _POSIX_C_SOURCE redefinition +#ifdef WITH_PYTHON_LAYER +#include +#endif +#include + +#include "caffe/layer.hpp" +#include "caffe/layer_factory.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/vision_layers.hpp" + +#ifdef WITH_PYTHON_LAYER +#include "caffe/python_layer.hpp" +#endif + +namespace caffe { + +// Get convolution layer according to engine. +template +shared_ptr > GetConvolutionLayer( + const LayerParameter& param) { + ConvolutionParameter_Engine engine = param.convolution_param().engine(); + if (engine == ConvolutionParameter_Engine_DEFAULT) { + engine = ConvolutionParameter_Engine_CAFFE; +#ifdef USE_CUDNN + engine = ConvolutionParameter_Engine_CUDNN; +#endif + } + if (engine == ConvolutionParameter_Engine_CAFFE) { + return shared_ptr >(new ConvolutionLayer(param)); +#ifdef USE_CUDNN + } else if (engine == ConvolutionParameter_Engine_CUDNN) { + return shared_ptr >(new CuDNNConvolutionLayer(param)); +#endif + } else { + LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + } +} + +REGISTER_LAYER_CREATOR(Convolution, GetConvolutionLayer); + +/************ For dynamic network surgery ***************/ +template +shared_ptr > GetCConvolutionLayer( + const LayerParameter& param) { + ConvolutionParameter_Engine engine = param.convolution_param().engine(); + if (engine == ConvolutionParameter_Engine_DEFAULT) { + engine = ConvolutionParameter_Engine_CAFFE; +#ifdef USE_CUDNN + engine = ConvolutionParameter_Engine_CUDNN; +#endif + } + if (engine == ConvolutionParameter_Engine_CAFFE) { + return shared_ptr >(new CConvolutionLayer(param)); +#ifdef USE_CUDNN + } else if (engine == CConvolutionParameter_Engine_CUDNN) { + return shared_ptr >(new CuDNNConvolutionLayer(param)); +#endif + } else { + LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + } +} + +REGISTER_LAYER_CREATOR(CConvolution, GetCConvolutionLayer); +/********************************************************/ + +// Get pooling layer according to engine. +template +shared_ptr > GetPoolingLayer(const LayerParameter& param) { + PoolingParameter_Engine engine = param.pooling_param().engine(); + if (engine == PoolingParameter_Engine_DEFAULT) { + engine = PoolingParameter_Engine_CAFFE; +#ifdef USE_CUDNN + engine = PoolingParameter_Engine_CUDNN; +#endif + } + if (engine == PoolingParameter_Engine_CAFFE) { + return shared_ptr >(new PoolingLayer(param)); +#ifdef USE_CUDNN + } else if (engine == PoolingParameter_Engine_CUDNN) { + PoolingParameter p_param = param.pooling_param(); + if (p_param.pad() || p_param.pad_h() || p_param.pad_w() || + param.top_size() > 1) { + LOG(INFO) << "CUDNN does not support padding or multiple tops. " + << "Using Caffe's own pooling layer."; + return shared_ptr >(new PoolingLayer(param)); + } + return shared_ptr >(new CuDNNPoolingLayer(param)); +#endif + } else { + LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + } +} + +REGISTER_LAYER_CREATOR(Pooling, GetPoolingLayer); + +// Get relu layer according to engine. +template +shared_ptr > GetReLULayer(const LayerParameter& param) { + ReLUParameter_Engine engine = param.relu_param().engine(); + if (engine == ReLUParameter_Engine_DEFAULT) { + engine = ReLUParameter_Engine_CAFFE; +#ifdef USE_CUDNN + engine = ReLUParameter_Engine_CUDNN; +#endif + } + if (engine == ReLUParameter_Engine_CAFFE) { + return shared_ptr >(new ReLULayer(param)); +#ifdef USE_CUDNN + } else if (engine == ReLUParameter_Engine_CUDNN) { + return shared_ptr >(new CuDNNReLULayer(param)); +#endif + } else { + LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + } +} + +REGISTER_LAYER_CREATOR(ReLU, GetReLULayer); + +// Get sigmoid layer according to engine. +template +shared_ptr > GetSigmoidLayer(const LayerParameter& param) { + SigmoidParameter_Engine engine = param.sigmoid_param().engine(); + if (engine == SigmoidParameter_Engine_DEFAULT) { + engine = SigmoidParameter_Engine_CAFFE; +#ifdef USE_CUDNN + engine = SigmoidParameter_Engine_CUDNN; +#endif + } + if (engine == SigmoidParameter_Engine_CAFFE) { + return shared_ptr >(new SigmoidLayer(param)); +#ifdef USE_CUDNN + } else if (engine == SigmoidParameter_Engine_CUDNN) { + return shared_ptr >(new CuDNNSigmoidLayer(param)); +#endif + } else { + LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + } +} + +REGISTER_LAYER_CREATOR(Sigmoid, GetSigmoidLayer); + +// Get softmax layer according to engine. +template +shared_ptr > GetSoftmaxLayer(const LayerParameter& param) { + SoftmaxParameter_Engine engine = param.softmax_param().engine(); + if (engine == SoftmaxParameter_Engine_DEFAULT) { + engine = SoftmaxParameter_Engine_CAFFE; +#ifdef USE_CUDNN + engine = SoftmaxParameter_Engine_CUDNN; +#endif + } + if (engine == SoftmaxParameter_Engine_CAFFE) { + return shared_ptr >(new SoftmaxLayer(param)); +#ifdef USE_CUDNN + } else if (engine == SoftmaxParameter_Engine_CUDNN) { + return shared_ptr >(new CuDNNSoftmaxLayer(param)); +#endif + } else { + LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + } +} + +REGISTER_LAYER_CREATOR(Softmax, GetSoftmaxLayer); + +// Get tanh layer according to engine. +template +shared_ptr > GetTanHLayer(const LayerParameter& param) { + TanHParameter_Engine engine = param.tanh_param().engine(); + if (engine == TanHParameter_Engine_DEFAULT) { + engine = TanHParameter_Engine_CAFFE; +#ifdef USE_CUDNN + engine = TanHParameter_Engine_CUDNN; +#endif + } + if (engine == TanHParameter_Engine_CAFFE) { + return shared_ptr >(new TanHLayer(param)); +#ifdef USE_CUDNN + } else if (engine == TanHParameter_Engine_CUDNN) { + return shared_ptr >(new CuDNNTanHLayer(param)); +#endif + } else { + LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + } +} + +REGISTER_LAYER_CREATOR(TanH, GetTanHLayer); + +#ifdef WITH_PYTHON_LAYER +template +shared_ptr > GetPythonLayer(const LayerParameter& param) { + Py_Initialize(); + try { + bp::object module = bp::import(param.python_param().module().c_str()); + bp::object layer = module.attr(param.python_param().layer().c_str())(param); + return bp::extract > >(layer)(); + } catch (bp::error_already_set) { + PyErr_Print(); + throw; + } +} + +REGISTER_LAYER_CREATOR(Python, GetPythonLayer); +#endif + +// Layers that use their constructor as their default creator should be +// registered in their corresponding cpp files. Do not register them here. +} // namespace caffe diff --git a/src/caffe/layers/absval_layer.cpp b/src/caffe/layers/absval_layer.cpp new file mode 100755 index 0000000..5ce28c9 --- /dev/null +++ b/src/caffe/layers/absval_layer.cpp @@ -0,0 +1,45 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/neuron_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void AbsValLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + NeuronLayer::LayerSetUp(bottom, top); + CHECK_NE(top[0], bottom[0]) << this->type() << " Layer does not " + "allow in-place computation."; +} + +template +void AbsValLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + const int count = top[0]->count(); + Dtype* top_data = top[0]->mutable_cpu_data(); + caffe_abs(count, bottom[0]->cpu_data(), top_data); +} + +template +void AbsValLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const int count = top[0]->count(); + const Dtype* top_diff = top[0]->cpu_diff(); + if (propagate_down[0]) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + caffe_cpu_sign(count, bottom_data, bottom_diff); + caffe_mul(count, bottom_diff, top_diff, bottom_diff); + } +} + +#ifdef CPU_ONLY +STUB_GPU(AbsValLayer); +#endif + +INSTANTIATE_CLASS(AbsValLayer); +REGISTER_LAYER_CLASS(AbsVal); + +} // namespace caffe diff --git a/src/caffe/layers/absval_layer.cu b/src/caffe/layers/absval_layer.cu new file mode 100755 index 0000000..bb310e1 --- /dev/null +++ b/src/caffe/layers/absval_layer.cu @@ -0,0 +1,33 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void AbsValLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + const int count = top[0]->count(); + Dtype* top_data = top[0]->mutable_gpu_data(); + caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); +} + +template +void AbsValLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const int count = top[0]->count(); + const Dtype* top_diff = top[0]->gpu_diff(); + if (propagate_down[0]) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + caffe_gpu_sign(count, bottom_data, bottom_diff); + caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(AbsValLayer); + + +} // namespace caffe diff --git a/src/caffe/layers/accuracy_layer.cpp b/src/caffe/layers/accuracy_layer.cpp new file mode 100755 index 0000000..90aad67 --- /dev/null +++ b/src/caffe/layers/accuracy_layer.cpp @@ -0,0 +1,91 @@ +#include +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void AccuracyLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + top_k_ = this->layer_param_.accuracy_param().top_k(); + + has_ignore_label_ = + this->layer_param_.accuracy_param().has_ignore_label(); + if (has_ignore_label_) { + ignore_label_ = this->layer_param_.accuracy_param().ignore_label(); + } +} + +template +void AccuracyLayer::Reshape( + const vector*>& bottom, const vector*>& top) { + CHECK_LE(top_k_, bottom[0]->count() / bottom[1]->count()) + << "top_k must be less than or equal to the number of classes."; + label_axis_ = + bottom[0]->CanonicalAxisIndex(this->layer_param_.accuracy_param().axis()); + outer_num_ = bottom[0]->count(0, label_axis_); + inner_num_ = bottom[0]->count(label_axis_ + 1); + CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count()) + << "Number of labels must match number of predictions; " + << "e.g., if label axis == 1 and prediction shape is (N, C, H, W), " + << "label count (number of labels) must be N*H*W, " + << "with integer values in {0, 1, ..., C-1}."; + vector top_shape(0); // Accuracy is a scalar; 0 axes. + top[0]->Reshape(top_shape); +} + +template +void AccuracyLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + Dtype accuracy = 0; + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* bottom_label = bottom[1]->cpu_data(); + const int dim = bottom[0]->count() / outer_num_; + const int num_labels = bottom[0]->shape(label_axis_); + vector maxval(top_k_+1); + vector max_id(top_k_+1); + int count = 0; + for (int i = 0; i < outer_num_; ++i) { + for (int j = 0; j < inner_num_; ++j) { + const int label_value = + static_cast(bottom_label[i * inner_num_ + j]); + if (has_ignore_label_ && label_value == ignore_label_) { + continue; + } + DCHECK_GE(label_value, 0); + DCHECK_LT(label_value, num_labels); + // Top-k accuracy + std::vector > bottom_data_vector; + for (int k = 0; k < num_labels; ++k) { + bottom_data_vector.push_back(std::make_pair( + bottom_data[i * dim + k * inner_num_ + j], k)); + } + std::partial_sort( + bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_, + bottom_data_vector.end(), std::greater >()); + // check if true label is in top k predictions + for (int k = 0; k < top_k_; k++) { + if (bottom_data_vector[k].second == label_value) { + ++accuracy; + break; + } + } + ++count; + } + } + + // LOG(INFO) << "Accuracy: " << accuracy; + top[0]->mutable_cpu_data()[0] = accuracy / count; + // Accuracy layer should not be used as a loss function. +} + +INSTANTIATE_CLASS(AccuracyLayer); +REGISTER_LAYER_CLASS(Accuracy); + +} // namespace caffe diff --git a/src/caffe/layers/argmax_layer.cpp b/src/caffe/layers/argmax_layer.cpp new file mode 100755 index 0000000..c4040cd --- /dev/null +++ b/src/caffe/layers/argmax_layer.cpp @@ -0,0 +1,63 @@ +#include +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void ArgMaxLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + out_max_val_ = this->layer_param_.argmax_param().out_max_val(); + top_k_ = this->layer_param_.argmax_param().top_k(); + CHECK_GE(top_k_, 1) << " top k must not be less than 1."; + CHECK_LE(top_k_, bottom[0]->count() / bottom[0]->num()) + << "top_k must be less than or equal to the number of classes."; +} + +template +void ArgMaxLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + if (out_max_val_) { + // Produces max_ind and max_val + top[0]->Reshape(bottom[0]->num(), 2, top_k_, 1); + } else { + // Produces only max_ind + top[0]->Reshape(bottom[0]->num(), 1, top_k_, 1); + } +} + +template +void ArgMaxLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / bottom[0]->num(); + for (int i = 0; i < num; ++i) { + std::vector > bottom_data_vector; + for (int j = 0; j < dim; ++j) { + bottom_data_vector.push_back( + std::make_pair(bottom_data[i * dim + j], j)); + } + std::partial_sort( + bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_, + bottom_data_vector.end(), std::greater >()); + for (int j = 0; j < top_k_; ++j) { + top_data[top[0]->offset(i, 0, j)] = bottom_data_vector[j].second; + } + if (out_max_val_) { + for (int j = 0; j < top_k_; ++j) { + top_data[top[0]->offset(i, 1, j)] = bottom_data_vector[j].first; + } + } + } +} + +INSTANTIATE_CLASS(ArgMaxLayer); +REGISTER_LAYER_CLASS(ArgMax); + +} // namespace caffe diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp new file mode 100755 index 0000000..ccb3adc --- /dev/null +++ b/src/caffe/layers/base_conv_layer.cpp @@ -0,0 +1,298 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; + // Configure the kernel size, padding, stride, and inputs. + ConvolutionParameter conv_param = this->layer_param_.convolution_param(); + CHECK(!conv_param.has_kernel_size() != + !(conv_param.has_kernel_h() && conv_param.has_kernel_w())) + << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; + CHECK(conv_param.has_kernel_size() || + (conv_param.has_kernel_h() && conv_param.has_kernel_w())) + << "For non-square filters both kernel_h and kernel_w are required."; + CHECK((!conv_param.has_pad() && conv_param.has_pad_h() + && conv_param.has_pad_w()) + || (!conv_param.has_pad_h() && !conv_param.has_pad_w())) + << "pad is pad OR pad_h and pad_w are required."; + CHECK((!conv_param.has_stride() && conv_param.has_stride_h() + && conv_param.has_stride_w()) + || (!conv_param.has_stride_h() && !conv_param.has_stride_w())) + << "Stride is stride OR stride_h and stride_w are required."; + if (conv_param.has_kernel_size()) { + kernel_h_ = kernel_w_ = conv_param.kernel_size(); + } else { + kernel_h_ = conv_param.kernel_h(); + kernel_w_ = conv_param.kernel_w(); + } + CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; + CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; + if (!conv_param.has_pad_h()) { + pad_h_ = pad_w_ = conv_param.pad(); + } else { + pad_h_ = conv_param.pad_h(); + pad_w_ = conv_param.pad_w(); + } + if (!conv_param.has_stride_h()) { + stride_h_ = stride_w_ = conv_param.stride(); + } else { + stride_h_ = conv_param.stride_h(); + stride_w_ = conv_param.stride_w(); + } + // Special case: im2col is the identity for 1x1 convolution with stride 1 + // and no padding, so flag for skipping the buffer and transformation. + is_1x1_ = kernel_w_ == 1 && kernel_h_ == 1 + && stride_h_ == 1 && stride_w_ == 1 && pad_h_ == 0 && pad_w_ == 0; + // Configure output channels and groups. + channels_ = bottom[0]->channels(); + num_output_ = this->layer_param_.convolution_param().num_output(); + CHECK_GT(num_output_, 0); + group_ = this->layer_param_.convolution_param().group(); + CHECK_EQ(channels_ % group_, 0); + CHECK_EQ(num_output_ % group_, 0) + << "Number of output should be multiples of group."; + if (reverse_dimensions()) { + conv_out_channels_ = channels_; + conv_in_channels_ = num_output_; + } else { + conv_out_channels_ = num_output_; + conv_in_channels_ = channels_; + } + // Handle the parameters: weights and biases. + // - blobs_[0] holds the filter weights + // - blobs_[1] holds the biases (optional) + bias_term_ = this->layer_param_.convolution_param().bias_term(); + if (this->blobs_.size() > 0) { + LOG(INFO) << "Skipping parameter initialization"; + } else { + if (bias_term_) { + this->blobs_.resize(2); + } else { + this->blobs_.resize(1); + } + // Initialize and fill the weights: + // output channels x input channels per-group x kernel height x kernel width + this->blobs_[0].reset(new Blob( + conv_out_channels_, conv_in_channels_ / group_, kernel_h_, kernel_w_)); + shared_ptr > weight_filler(GetFiller( + this->layer_param_.convolution_param().weight_filler())); + weight_filler->Fill(this->blobs_[0].get()); + // If necessary, initialize and fill the biases. + if (bias_term_) { + vector bias_shape(1, num_output_); + this->blobs_[1].reset(new Blob(bias_shape)); + shared_ptr > bias_filler(GetFiller( + this->layer_param_.convolution_param().bias_filler())); + bias_filler->Fill(this->blobs_[1].get()); + } + } + // Propagate gradients to the parameters (as directed by backward pass). + this->param_propagate_down_.resize(this->blobs_.size(), true); +} + +template +void BaseConvolutionLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; + num_ = bottom[0]->num(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + CHECK_EQ(bottom[0]->channels(), channels_) << "Input size incompatible with" + " convolution kernel."; + // TODO: generalize to handle inputs of different shapes. + for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) { + CHECK_EQ(num_, bottom[bottom_id]->num()) << "Inputs must have same num."; + CHECK_EQ(channels_, bottom[bottom_id]->channels()) + << "Inputs must have same channels."; + CHECK_EQ(height_, bottom[bottom_id]->height()) + << "Inputs must have same height."; + CHECK_EQ(width_, bottom[bottom_id]->width()) + << "Inputs must have same width."; + } + // Shape the tops. + compute_output_shape(); + for (int top_id = 0; top_id < top.size(); ++top_id) { + top[top_id]->Reshape(num_, num_output_, height_out_, width_out_); + } + if (reverse_dimensions()) { + conv_in_height_ = height_out_; + conv_in_width_ = width_out_; + conv_out_spatial_dim_ = height_ * width_; + } else { + conv_in_height_ = height_; + conv_in_width_ = width_; + conv_out_spatial_dim_ = height_out_ * width_out_; + } + kernel_dim_ = conv_in_channels_ * kernel_h_ * kernel_w_; + weight_offset_ = conv_out_channels_ * kernel_dim_ / group_ / group_; + col_offset_ = kernel_dim_ * conv_out_spatial_dim_ / group_; + output_offset_ = conv_out_channels_ * conv_out_spatial_dim_ / group_; + // The im2col result buffer will only hold one image at a time to avoid + // overly large memory usage. In the special case of 1x1 convolution + // it goes lazily unused to save memory. + if (reverse_dimensions()) { + col_buffer_.Reshape(1, kernel_dim_, height_, width_); + } else { + col_buffer_.Reshape(1, kernel_dim_, height_out_, width_out_); + } + // Set up the all ones "bias multiplier" for adding biases by BLAS + if (bias_term_) { + vector bias_multiplier_shape(1, height_out_ * width_out_); + bias_multiplier_.Reshape(bias_multiplier_shape); + caffe_set(bias_multiplier_.count(), Dtype(1), + bias_multiplier_.mutable_cpu_data()); + } +} + +template +void BaseConvolutionLayer::forward_cpu_gemm(const Dtype* input, + const Dtype* weights, Dtype* output, bool skip_im2col) { + const Dtype* col_buff = input; + if (!is_1x1_) { + if (!skip_im2col) { + conv_im2col_cpu(input, col_buffer_.mutable_cpu_data()); + } + col_buff = col_buffer_.cpu_data(); + } + for (int g = 0; g < group_; ++g) { + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / + group_, conv_out_spatial_dim_, kernel_dim_ / group_, + (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g, + (Dtype)0., output + output_offset_ * g); + } +} + +template +void BaseConvolutionLayer::forward_cpu_bias(Dtype* output, + const Dtype* bias) { + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, + height_out_ * width_out_, 1, (Dtype)1., bias, bias_multiplier_.cpu_data(), + (Dtype)1., output); +} + +template +void BaseConvolutionLayer::backward_cpu_gemm(const Dtype* output, + const Dtype* weights, Dtype* input) { + Dtype* col_buff = col_buffer_.mutable_cpu_data(); + if (is_1x1_) { + col_buff = input; + } + for (int g = 0; g < group_; ++g) { + caffe_cpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_ / group_, + conv_out_spatial_dim_, conv_out_channels_ / group_, + (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g, + (Dtype)0., col_buff + col_offset_ * g); + } + if (!is_1x1_) { + conv_col2im_cpu(col_buff, input); + } +} + +template +void BaseConvolutionLayer::weight_cpu_gemm(const Dtype* input, + const Dtype* output, Dtype* weights) { + const Dtype* col_buff = input; + if (!is_1x1_) { + conv_im2col_cpu(input, col_buffer_.mutable_cpu_data()); + col_buff = col_buffer_.cpu_data(); + } + for (int g = 0; g < group_; ++g) { + caffe_cpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, + kernel_dim_ / group_, conv_out_spatial_dim_, + (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, + (Dtype)1., weights + weight_offset_ * g); + } +} + +template +void BaseConvolutionLayer::backward_cpu_bias(Dtype* bias, + const Dtype* input) { + caffe_cpu_gemv(CblasNoTrans, num_output_, height_out_ * width_out_, 1., + input, bias_multiplier_.cpu_data(), 1., bias); +} + +#ifndef CPU_ONLY + +template +void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, + const Dtype* weights, Dtype* output, bool skip_im2col) { + const Dtype* col_buff = input; + if (!is_1x1_) { + if (!skip_im2col) { + conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); + } + col_buff = col_buffer_.gpu_data(); + } + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / + group_, conv_out_spatial_dim_, kernel_dim_ / group_, + (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g, + (Dtype)0., output + output_offset_ * g); + } +} + +template +void BaseConvolutionLayer::forward_gpu_bias(Dtype* output, + const Dtype* bias) { + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, + height_out_ * width_out_, 1, (Dtype)1., bias, bias_multiplier_.gpu_data(), + (Dtype)1., output); +} + +template +void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, + const Dtype* weights, Dtype* input) { + Dtype* col_buff = col_buffer_.mutable_gpu_data(); + if (is_1x1_) { + col_buff = input; + } + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_ / group_, + conv_out_spatial_dim_, conv_out_channels_ / group_, + (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g, + (Dtype)0., col_buff + col_offset_ * g); + } + if (!is_1x1_) { + conv_col2im_gpu(col_buff, input); + } +} + +template +void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, + const Dtype* output, Dtype* weights) { + const Dtype* col_buff = input; + if (!is_1x1_) { + conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); + col_buff = col_buffer_.gpu_data(); + } + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, + kernel_dim_ / group_, conv_out_spatial_dim_, + (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, + (Dtype)1., weights + weight_offset_ * g); + } +} + +template +void BaseConvolutionLayer::backward_gpu_bias(Dtype* bias, + const Dtype* input) { + caffe_gpu_gemv(CblasNoTrans, num_output_, height_out_ * width_out_, 1., + input, bias_multiplier_.gpu_data(), 1., bias); +} + +#endif // !CPU_ONLY + +INSTANTIATE_CLASS(BaseConvolutionLayer); + +} // namespace caffe diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp new file mode 100755 index 0000000..5303fe9 --- /dev/null +++ b/src/caffe/layers/base_data_layer.cpp @@ -0,0 +1,132 @@ +#include +#include +#include + +#include "caffe/data_layers.hpp" +#include "caffe/net.hpp" +#include "caffe/util/io.hpp" + +namespace caffe { + +template +BaseDataLayer::BaseDataLayer(const LayerParameter& param) + : Layer(param), + transform_param_(param.transform_param()) { +} + +template +void BaseDataLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + if (top.size() == 1) { + output_labels_ = false; + } else { + output_labels_ = true; + } + data_transformer_.reset( + new DataTransformer(transform_param_, this->phase_)); + data_transformer_->InitRand(); + // The subclasses should setup the size of bottom and top + DataLayerSetUp(bottom, top); +} + +template +BasePrefetchingDataLayer::BasePrefetchingDataLayer( + const LayerParameter& param) + : BaseDataLayer(param), + prefetch_free_(), prefetch_full_() { + for (int i = 0; i < PREFETCH_COUNT; ++i) { + prefetch_free_.push(&prefetch_[i]); + } +} + +template +void BasePrefetchingDataLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + BaseDataLayer::LayerSetUp(bottom, top); + // Before starting the prefetch thread, we make cpu_data and gpu_data + // calls so that the prefetch thread does not accidentally make simultaneous + // cudaMalloc calls when the main thread is running. In some GPUs this + // seems to cause failures if we do not so. + for (int i = 0; i < PREFETCH_COUNT; ++i) { + prefetch_[i].data_.mutable_cpu_data(); + if (this->output_labels_) { + prefetch_[i].label_.mutable_cpu_data(); + } + } +#ifndef CPU_ONLY + if (Caffe::mode() == Caffe::GPU) { + for (int i = 0; i < PREFETCH_COUNT; ++i) { + prefetch_[i].data_.mutable_gpu_data(); + if (this->output_labels_) { + prefetch_[i].label_.mutable_gpu_data(); + } + } + } +#endif + DLOG(INFO) << "Initializing prefetch"; + this->data_transformer_->InitRand(); + StartInternalThread(); + DLOG(INFO) << "Prefetch initialized."; +} + +template +void BasePrefetchingDataLayer::InternalThreadEntry() { +#ifndef CPU_ONLY + cudaStream_t stream; + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); + } +#endif + + try { + while (!must_stop()) { + Batch* batch = prefetch_free_.pop(); + load_batch(batch); +#ifndef CPU_ONLY + if (Caffe::mode() == Caffe::GPU) { + batch->data_.data().get()->async_gpu_push(stream); + CUDA_CHECK(cudaStreamSynchronize(stream)); + } +#endif + prefetch_full_.push(batch); + } + } catch (boost::thread_interrupted&) { + // Interrupted exception is expected on shutdown + } +#ifndef CPU_ONLY + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaStreamDestroy(stream)); + } +#endif +} + +template +void BasePrefetchingDataLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); + // Reshape to loaded data. + top[0]->Reshape(batch->data_.num(), batch->data_.channels(), + batch->data_.height(), batch->data_.width()); + // Copy the data + caffe_copy(batch->data_.count(), batch->data_.cpu_data(), + top[0]->mutable_cpu_data()); + DLOG(INFO) << "Prefetch copied"; + if (this->output_labels_) { + // Reshape to loaded labels. + top[1]->ReshapeLike(batch->label_); + // Copy the labels. + caffe_copy(batch->label_.count(), batch->label_.cpu_data(), + top[1]->mutable_cpu_data()); + } + + prefetch_free_.push(batch); +} + +#ifdef CPU_ONLY +STUB_GPU_FORWARD(BasePrefetchingDataLayer, Forward); +#endif + +INSTANTIATE_CLASS(BaseDataLayer); +INSTANTIATE_CLASS(BasePrefetchingDataLayer); + +} // namespace caffe diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu new file mode 100755 index 0000000..56439bc --- /dev/null +++ b/src/caffe/layers/base_data_layer.cu @@ -0,0 +1,29 @@ +#include + +#include "caffe/data_layers.hpp" + +namespace caffe { + +template +void BasePrefetchingDataLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); + // Reshape to loaded data. + top[0]->ReshapeLike(batch->data_); + // Copy the data + caffe_copy(batch->data_.count(), batch->data_.gpu_data(), + top[0]->mutable_gpu_data()); + if (this->output_labels_) { + // Reshape to loaded labels. + top[1]->ReshapeLike(batch->label_); + // Copy the labels. + caffe_copy(batch->label_.count(), batch->label_.gpu_data(), + top[1]->mutable_gpu_data()); + } + + prefetch_free_.push(batch); +} + +INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); + +} // namespace caffe diff --git a/src/caffe/layers/bnll_layer.cpp b/src/caffe/layers/bnll_layer.cpp new file mode 100755 index 0000000..9ba0ea9 --- /dev/null +++ b/src/caffe/layers/bnll_layer.cpp @@ -0,0 +1,48 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +const float kBNLL_THRESHOLD = 50.; + +template +void BNLLLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + const int count = bottom[0]->count(); + for (int i = 0; i < count; ++i) { + top_data[i] = bottom_data[i] > 0 ? + bottom_data[i] + log(1. + exp(-bottom_data[i])) : + log(1. + exp(bottom_data[i])); + } +} + +template +void BNLLLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const int count = bottom[0]->count(); + Dtype expval; + for (int i = 0; i < count; ++i) { + expval = exp(std::min(bottom_data[i], Dtype(kBNLL_THRESHOLD))); + bottom_diff[i] = top_diff[i] * expval / (expval + 1.); + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(BNLLLayer); +#endif + +INSTANTIATE_CLASS(BNLLLayer); +REGISTER_LAYER_CLASS(BNLL); + +} // namespace caffe diff --git a/src/caffe/layers/bnll_layer.cu b/src/caffe/layers/bnll_layer.cu new file mode 100755 index 0000000..d963d06 --- /dev/null +++ b/src/caffe/layers/bnll_layer.cu @@ -0,0 +1,60 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +const float kBNLL_THRESHOLD = 50.; + +template +__global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = in[index] > 0 ? + in[index] + log(1. + exp(-in[index])) : + log(1. + exp(in[index])); + } +} + +template +void BNLLLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + BNLLForward<<>>( + count, bottom_data, top_data); + CUDA_POST_KERNEL_CHECK; +} + +template +__global__ void BNLLBackward(const int n, const Dtype* in_diff, + const Dtype* in_data, Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) { + Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); + out_diff[index] = in_diff[index] * expval / (expval + 1.); + } +} + +template +void BNLLLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + BNLLBackward<<>>( + count, top_diff, bottom_data, bottom_diff); + CUDA_POST_KERNEL_CHECK; + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer); + + +} // namespace caffe diff --git a/src/caffe/layers/compress_conv_layer.cpp b/src/caffe/layers/compress_conv_layer.cpp new file mode 100755 index 0000000..f560b79 --- /dev/null +++ b/src/caffe/layers/compress_conv_layer.cpp @@ -0,0 +1,221 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" +#include + +namespace caffe { + +template +void CConvolutionLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + BaseConvolutionLayer ::LayerSetUp(bottom, top); + + /************ For dynamic network surgery ***************/ + CConvolutionParameter cconv_param = this->layer_param_.cconvolution_param(); + + if(this->blobs_.size()==2 && (this->bias_term_)){ + this->blobs_.resize(4); + // Intialize and fill the weightmask & biasmask + this->blobs_[2].reset(new Blob(this->blobs_[0]->shape())); + shared_ptr > weight_mask_filler(GetFiller( + cconv_param.weight_mask_filler())); + weight_mask_filler->Fill(this->blobs_[2].get()); + this->blobs_[3].reset(new Blob(this->blobs_[1]->shape())); + shared_ptr > bias_mask_filler(GetFiller( + cconv_param.bias_mask_filler())); + bias_mask_filler->Fill(this->blobs_[3].get()); + } + else if(this->blobs_.size()==1 && (!this->bias_term_)){ + this->blobs_.resize(2); + // Intialize and fill the weightmask + this->blobs_[1].reset(new Blob(this->blobs_[0]->shape())); + shared_ptr > bias_mask_filler(GetFiller( + cconv_param.bias_mask_filler())); + bias_mask_filler->Fill(this->blobs_[1].get()); + } + + // Intializing the tmp tensor + this->weight_tmp_.Reshape(this->blobs_[0]->shape()); + this->bias_tmp_.Reshape(this->blobs_[1]->shape()); + + // Intialize the hyper-parameters + this->std = 0;this->mu = 0; + this->gamma = cconv_param.gamma(); + this->power = cconv_param.power(); + this->crate = cconv_param.c_rate(); + this->iter_stop_ = cconv_param.iter_stop(); + /********************************************************/ +} + +template +void CConvolutionLayer::compute_output_shape() { + this->height_out_ = (this->height_ + 2 * this->pad_h_ - this->kernel_h_) + / this->stride_h_ + 1; + this->width_out_ = (this->width_ + 2 * this->pad_w_ - this->kernel_w_) + / this->stride_w_ + 1; +} + +template +void CConvolutionLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + + const Dtype* weight = this->blobs_[0]->mutable_cpu_data(); + Dtype* weightMask = this->blobs_[2]->mutable_cpu_data(); + Dtype* weightTmp = this->weight_tmp_.mutable_cpu_data(); + const Dtype* bias = NULL; + Dtype* biasMask = NULL; + Dtype* biasTmp = NULL; + if (this->bias_term_) { + bias = this->blobs_[1]->mutable_cpu_data(); + biasMask = this->blobs_[3]->mutable_cpu_data(); + biasTmp = this->bias_tmp_.mutable_cpu_data(); + } + + if (this->phase_ == TRAIN){ + // Calculate the mean and standard deviation of learnable parameters + if (this->std==0 && this->iter_==0){ + unsigned int ncount = 0; + for (unsigned int k = 0;k < this->blobs_[0]->count(); ++k) { + this->mu += fabs(weightMask[k]*weight[k]); + this->std += weightMask[k]*weight[k]*weight[k]; + if (weightMask[k]*weight[k]!=0) ncount++; + } + if (this->bias_term_) { + for (unsigned int k = 0;k < this->blobs_[1]->count(); ++k) { + this->mu += fabs(biasMask[k]*bias[k]); + this->std += biasMask[k]*bias[k]*bias[k]; + if (biasMask[k]*bias[k]!=0) ncount++; + } + } + this->mu /= ncount; this->std -= ncount*mu*mu; + this->std /= ncount; this->std = sqrt(std); +// LOG(INFO)<iter_%1000==0){ + unsigned int ncount = 0; + for (unsigned int k = 0;k < this->blobs_[0]->count(); ++k) { + if (weightMask[k]*weight[k]!=0) ncount++; + } + if (this->bias_term_) { + for (unsigned int k = 0;k < this->blobs_[1]->count(); ++k) { + if (biasMask[k]*bias[k]!=0) ncount++; + } + } + LOG(INFO)<(rand())/static_cast(RAND_MAX); + //if (pow(1+(this->gamma)*(this->iter_),-(this->power))>r && (this->iter_)<(this->iter_stop_)) { + bool _update = false; + if (weightMask[0] > 1){ + _update = true; + crate = weightMask[0] - 1; + weightMask[0] = 1; + } else if (weightMask[0] < 0){ + _update = true; + crate = -weightMask[0]; + weightMask[0] = 0; + } + if (_update) { + for (unsigned int k = 0;k < this->blobs_[0]->count(); ++k) { + if (weightMask[k]==1 && fabs(weight[k])<=0.9*std::max(mu+crate*std,Dtype(0))) + weightMask[k] = 0; + else if (weightMask[k]==0 && fabs(weight[k])>1.1*std::max(mu+crate*std,Dtype(0))) + weightMask[k] = 1; + } + if (this->bias_term_) { + for (unsigned int k = 0;k < this->blobs_[1]->count(); ++k) { + if (biasMask[k]==1 && fabs(bias[k])<=0.9*std::max(mu+crate*std,Dtype(0))) + biasMask[k] = 0; + else if (biasMask[k]==0 && fabs(bias[k])>1.1*std::max(mu+crate*std,Dtype(0))) + biasMask[k] = 1; + } + } + _update = false; + } + // ------Guiying--------- + } + + // Calculate the current (masked) weight and bias + for (unsigned int k = 0;k < this->blobs_[0]->count(); ++k) { + weightTmp[k] = weight[k]*weightMask[k]; + } + if (this->bias_term_){ + for (unsigned int k = 0;k < this->blobs_[1]->count(); ++k) { + biasTmp[k] = bias[k]*biasMask[k]; + } + } + + // Forward calculation with (masked) weight and bias + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->cpu_data(); + Dtype* top_data = top[i]->mutable_cpu_data(); + for (int n = 0; n < this->num_; ++n) { + this->forward_cpu_gemm(bottom_data + bottom[i]->offset(n), weightTmp, + top_data + top[i]->offset(n)); + if (this->bias_term_) { + this->forward_cpu_bias(top_data + top[i]->offset(n), biasTmp); + } + } + } +} + +template +void CConvolutionLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* weightTmp = this->weight_tmp_.cpu_data(); + const Dtype* weightMask = this->blobs_[2]->cpu_data(); + Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->cpu_diff(); + // Bias gradient, if necessary. + if (this->bias_term_ && this->param_propagate_down_[1]) { + const Dtype* biasMask = this->blobs_[3]->cpu_data(); + Dtype* bias_diff = this->blobs_[1]->mutable_cpu_diff(); + for (unsigned int k = 0;k < this->blobs_[1]->count(); ++k) { + bias_diff[k] = bias_diff[k]*biasMask[k]; + } + for (int n = 0; n < this->num_; ++n) { + this->backward_cpu_bias(bias_diff, top_diff + top[i]->offset(n)); + } + } + if (this->param_propagate_down_[0] || propagate_down[i]) { + const Dtype* bottom_data = bottom[i]->cpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_cpu_diff(); + for (unsigned int k = 0;k < this->blobs_[0]->count(); ++k) { + weight_diff[k] = weight_diff[k]*weightMask[k]; + } + for (int n = 0; n < this->num_; ++n) { + // gradient w.r.t. weight. Note that we will accumulate diffs. + if (this->param_propagate_down_[0]) { + this->weight_cpu_gemm(bottom_data + bottom[i]->offset(n), + top_diff + top[i]->offset(n), weight_diff); + } + // gradient w.r.t. bottom data, if necessary. + if (propagate_down[i]) { + this->backward_cpu_gemm(top_diff + top[i]->offset(n), weightTmp, + bottom_diff + bottom[i]->offset(n)); + } + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(CConvolutionLayer); +#endif + +INSTANTIATE_CLASS(CConvolutionLayer); + +} // namespace caffe diff --git a/src/caffe/layers/compress_conv_layer.cu b/src/caffe/layers/compress_conv_layer.cu new file mode 100755 index 0000000..2d251d7 --- /dev/null +++ b/src/caffe/layers/compress_conv_layer.cu @@ -0,0 +1,308 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" +#include + +namespace caffe { + +// The constant NUM_THREADS should be equal to the value in CCMomentCalc +template +__global__ void CCMomentCollect(const int n, const Dtype* wb, const Dtype* mask, + Dtype* mu, Dtype* std, unsigned int* count ) { + const int NUM_THREADS = 512; + __shared__ Dtype param [4*NUM_THREADS]; + __shared__ unsigned int tcount [2*NUM_THREADS]; + unsigned int t = threadIdx.x; + unsigned int s = 2 * blockIdx.x * NUM_THREADS; + if (s+t < n){ + param[t] = fabs(mask[s+t]*wb[s+t]); + param[t+2*NUM_THREADS] = mask[s+t]*wb[s+t]*wb[s+t]; + if(mask[s+t]*wb[s+t]!=0) tcount[t] = 1; + else tcount[t] = 0; + } + else{ + param[t] = 0;param[t+2*NUM_THREADS] = 0;tcount[t] = 0; + } + if (s+t+NUM_THREADS < n){ + param[t+NUM_THREADS] = fabs(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]); + param[t+3*NUM_THREADS] = mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]; + if(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]!=0) tcount[t+NUM_THREADS] = 1; + else tcount[t+NUM_THREADS] = 0; + } + else{ + param[t+NUM_THREADS] = 0;param[t+3*NUM_THREADS] = 0;tcount[t+NUM_THREADS] = 0; + } + __syncthreads(); + for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) { + if (t < stride ){ + param[t] += param[t+stride]; + param[t+2*NUM_THREADS] += param[t+2*NUM_THREADS+stride]; + tcount[t] += tcount[t+stride]; + } + __syncthreads(); + } + if (t == 0){ + mu [blockIdx.x] = param[0]; + std [blockIdx.x] = param[2*NUM_THREADS]; + count[blockIdx.x] = tcount[0]; + } +} + +// The constant NUM_THREADS should be equal to the value in CCMomentCalc +template +__global__ void CCNzeroCollect(const int n, const Dtype* mask, unsigned int* count ) { + const int NUM_THREADS = 512; + __shared__ unsigned int tcount [2*NUM_THREADS]; + unsigned int t = threadIdx.x; + unsigned int s = 2 * blockIdx.x * NUM_THREADS; + tcount[t] = 0; + if (s+t < n && mask[s+t]!=0){ + tcount[t] = 1; + } + tcount[t+NUM_THREADS] = 0; + if (s+t+NUM_THREADS < n && mask[s+t+NUM_THREADS]!=0){ + tcount[t+NUM_THREADS] = 1; + } + __syncthreads(); + for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) { + if (t < stride ){ + tcount[t] += tcount[t+stride]; + } + __syncthreads(); + } + if (t == 0){ + count[blockIdx.x] = tcount[0]; + } +} + +template +__global__ void CCMaskCalc(const int n, const Dtype* wb, + Dtype* mask, Dtype mu, Dtype std, Dtype r) { + CUDA_KERNEL_LOOP(index, n) { + // added by Guiying Li + //if (mask[index] > 1) + // mask[index] = 1; + //else if (mask[index] <0) + // mask[index] = 0; + // ----Guiying Li----- + if (mask[index]==1 && fabs(wb[index])<=0.9*max(mu+r*std,Dtype(0))) + mask[index] = 0; + else if (mask[index]==0 && fabs(wb[index])>1.1*max(mu+r*std,Dtype(0))) + mask[index] = 1; + } +} + +template +__global__ void CCcRateCalc(const int n, + Dtype* mask) { + CUDA_KERNEL_LOOP(index, n) { + if (index == 0){ + if (mask[0] > 1){ + mask[0] = 1; + } else if (mask[0] < 0){ + mask[0] = 0; + } + } + } +} + +template +__global__ void CCMaskApply(const int n, const Dtype* wb, + const Dtype* mask, Dtype* wb_t) { + CUDA_KERNEL_LOOP(index, n) { + wb_t[index] = wb[index] * mask[index]; + } +} + +template +void CCMomentCalc(const int n, const Dtype* wb, const Dtype* mask, Dtype* mu, Dtype* std, unsigned int* ncount){ + const unsigned int NUM_THREADS = 512; + Dtype* pmu_g; Dtype* pstd_g; unsigned int* pncount_g; + Dtype* pmu_c; Dtype* pstd_c; unsigned int* pncount_c; + int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1); + cudaMalloc(&pmu_g, sizeof(Dtype) * num_p); + cudaMalloc(&pstd_g, sizeof(Dtype) * num_p); + cudaMalloc(&pncount_g, sizeof(unsigned int) * num_p); + pmu_c = (Dtype*) malloc(num_p * sizeof(Dtype)); + pstd_c = (Dtype*) malloc(num_p * sizeof(Dtype)); + pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int)); + CCMomentCollect<<>>(n, wb, mask, pmu_g, pstd_g, pncount_g); + CUDA_POST_KERNEL_CHECK; + cudaMemcpy(pmu_c, pmu_g, sizeof(Dtype) * num_p, cudaMemcpyDeviceToHost); + cudaMemcpy(pstd_c, pstd_g, sizeof(Dtype) * num_p, cudaMemcpyDeviceToHost); + cudaMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, cudaMemcpyDeviceToHost); + for (int i = 0; i < num_p; i++) { + *mu += pmu_c[i];*std += pstd_c[i];*ncount += pncount_c[i]; + } + cudaFree(pmu_g);cudaFree(pstd_g);cudaFree(pncount_g); + free(pmu_c);free(pstd_c);free(pncount_c); +} + +template +void CCNZeroCalc(const int n, const Dtype* mask, unsigned int* ncount ){ + const unsigned int NUM_THREADS = 512; + unsigned int* pncount_g; + unsigned int* pncount_c; + int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1); + cudaMalloc(&pncount_g, sizeof(unsigned int) * num_p); + pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int)); + CCNzeroCollect<<>>(n, mask, pncount_g); + CUDA_POST_KERNEL_CHECK; + cudaMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, cudaMemcpyDeviceToHost); + for (int i = 0; i < num_p; i++) { + *ncount += pncount_c[i]; + } + cudaFree(pncount_g); + free(pncount_c); +} + +template +void CConvolutionLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + + const Dtype* weight = this->blobs_[0]->mutable_gpu_data(); + Dtype* weightMask = this->blobs_[2]->mutable_gpu_data(); + Dtype* weightTmp = this->weight_tmp_.mutable_gpu_data(); + const Dtype* bias = NULL; + Dtype* biasMask = NULL; + Dtype* biasTmp = NULL; + if (this->bias_term_) { + bias = this->blobs_[1]->mutable_gpu_data(); + biasMask = this->blobs_[3]->mutable_gpu_data(); + biasTmp = this->bias_tmp_.mutable_gpu_data(); + } + // added by Guiying Li + bool _update = false; + Dtype* tmp_weightMask = this->blobs_[2]->mutable_cpu_data(); + if (tmp_weightMask[0] > 1){ + _update = true; + this->crate = tmp_weightMask[0] - 1; + tmp_weightMask[0] = 1; + } else if (tmp_weightMask[0] < 0){ + _update = true; + this->crate = -tmp_weightMask[0]; + tmp_weightMask[0] = 0; + } + weightMask = this->blobs_[2]->mutable_gpu_data();//update data + // -------Guiying------ + + if (this->phase_ == TRAIN){ + // Calculate the mean and standard deviation of learnable parameters + if (this->std==0 && this->iter_==0){ + unsigned int ncount = 0; + CCMomentCalc(this->blobs_[0]->count(), weight, weightMask, &mu, &std, &ncount); + if (this->bias_term_) { + CCMomentCalc(this->blobs_[1]->count(), bias, biasMask, &mu, &std, &ncount); + } + this->mu /= ncount; this->std -= ncount*mu*mu; + this->std /= ncount; this->std = sqrt(std); + LOG(INFO)<iter_%1000==0){ + unsigned int ncount = 0; + CCNZeroCalc(this->blobs_[0]->count(), weightMask, &ncount); + if (this->bias_term_) { + CCNZeroCalc(this->blobs_[1]->count(), biasMask, &ncount); + } + LOG(INFO)<(rand())/static_cast(RAND_MAX); + //if (pow(1+(this->gamma)*(this->iter_),-(this->power))>r && (this->iter_)<(this->iter_stop_)) { +// CCcRateCalc<<blobs_[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(this->blobs_[0]->count(), weightMask); +// CUDA_POST_KERNEL_CHECK; + if (_update) { + CCMaskCalc<<blobs_[0]->count()), + CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight, + weightMask, this->mu, this->std, this->crate); + CUDA_POST_KERNEL_CHECK; + if (this->bias_term_) { + CCMaskCalc<<blobs_[1]->count()), + CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[1]->count(), bias, + biasMask, this->mu, this->std, this->crate); + CUDA_POST_KERNEL_CHECK; + } + _update = false; + } + // ------Guiying--------- + } + + // Calculate the current (masked) weight and bias + CCMaskApply<<blobs_[0]->count()), + CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight, weightMask, weightTmp); + CUDA_POST_KERNEL_CHECK; + if (this->bias_term_) { + CCMaskApply<<blobs_[1]->count()), + CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[1]->count(), bias, biasMask, biasTmp); + CUDA_POST_KERNEL_CHECK; + } + + // Forward calculation with (masked) weight and bias + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* top_data = top[i]->mutable_gpu_data(); + for (int n = 0; n < this->num_; ++n) { + this->forward_gpu_gemm(bottom_data + bottom[i]->offset(n), weightTmp, + top_data + top[i]->offset(n)); + if (this->bias_term_) { + this->forward_gpu_bias(top_data + top[i]->offset(n), biasTmp); + } + } + } +} + +template +void CConvolutionLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* weightTmp = this->weight_tmp_.gpu_data(); + const Dtype* weightMask = this->blobs_[2]->gpu_data(); + Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + // Bias gradient, if necessary. + if (this->bias_term_ && this->param_propagate_down_[1]) { + const Dtype* biasMask = this->blobs_[3]->gpu_data(); + Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); + CCMaskApply<<blobs_[3]->count()), + CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[3]->count(), bias_diff, biasMask, bias_diff); + CUDA_POST_KERNEL_CHECK; + for (int n = 0; n < this->num_; ++n) { + this->backward_gpu_bias(bias_diff, top_diff + top[i]->offset(n)); + } + } + if (this->param_propagate_down_[0] || propagate_down[i]) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + CCMaskApply<<blobs_[2]->count()), + CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[2]->count(), weight_diff, weightMask, weight_diff); + CUDA_POST_KERNEL_CHECK; + for (int n = 0; n < this->num_; ++n) { + // gradient w.r.t. weight. Note that we will accumulate diffs. + if (this->param_propagate_down_[0]) { + this->weight_gpu_gemm(bottom_data + bottom[i]->offset(n), + top_diff + top[i]->offset(n), weight_diff); + } + // gradient w.r.t. bottom data, if necessary. + if (propagate_down[i]) { + this->backward_gpu_gemm(top_diff + top[i]->offset(n), weightTmp, + bottom_diff + bottom[i]->offset(n)); + } + } + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(CConvolutionLayer); + +} // namespace caffe diff --git a/src/caffe/layers/compress_inner_product_layer.cpp b/src/caffe/layers/compress_inner_product_layer.cpp new file mode 100755 index 0000000..4077f34 --- /dev/null +++ b/src/caffe/layers/compress_inner_product_layer.cpp @@ -0,0 +1,267 @@ +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" +#include + +namespace caffe { + +template +void CInnerProductLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + const int num_output = this->layer_param_.inner_product_param().num_output(); + bias_term_ = this->layer_param_.inner_product_param().bias_term(); + N_ = num_output; + const int axis = bottom[0]->CanonicalAxisIndex( + this->layer_param_.inner_product_param().axis()); + // Dimensions starting from "axis" are "flattened" into a single + // length K_ vector. For example, if bottom[0]'s shape is (N, C, H, W), + // and axis == 1, N inner products with dimension CHW are performed. + K_ = bottom[0]->count(axis); + // Check if we need to set up the weights + if (this->blobs_.size() > 0) { + LOG(INFO) << "Skipping parameter initialization"; + } else { + if (this->bias_term_) { + this->blobs_.resize(2); + } else { + this->blobs_.resize(1); + } + // Intialize the weight + vector weight_shape(2); + weight_shape[0] = N_; + weight_shape[1] = K_; + this->blobs_[0].reset(new Blob(weight_shape)); + // fill the weights + shared_ptr > weight_filler(GetFiller( + this->layer_param_.inner_product_param().weight_filler())); + weight_filler->Fill(this->blobs_[0].get()); + // If necessary, intiialize and fill the bias term + if (this->bias_term_) { + vector bias_shape(1, N_); + this->blobs_[1].reset(new Blob(bias_shape)); + shared_ptr > bias_filler(GetFiller( + this->layer_param_.inner_product_param().bias_filler())); + bias_filler->Fill(this->blobs_[1].get()); + } + } // parameter initialization + this->param_propagate_down_.resize(this->blobs_.size(), true); + + /************ For dynamic network surgery ***************/ + CInnerProductParameter cinner_param = this->layer_param_.cinner_product_param(); + + if(this->blobs_.size()==2 && (this->bias_term_)){ + this->blobs_.resize(4); + // Intialize and fill the weightmask & biasmask + this->blobs_[2].reset(new Blob(this->blobs_[0]->shape())); + shared_ptr > weight_mask_filler(GetFiller( + cinner_param.weight_mask_filler())); + weight_mask_filler->Fill(this->blobs_[2].get()); + this->blobs_[3].reset(new Blob(this->blobs_[1]->shape())); + shared_ptr > bias_mask_filler(GetFiller( + cinner_param.bias_mask_filler())); + bias_mask_filler->Fill(this->blobs_[3].get()); + } + else if(this->blobs_.size()==1 && (!this->bias_term_)){ + this->blobs_.resize(2); + // Intialize and fill the weightmask + this->blobs_[1].reset(new Blob(this->blobs_[0]->shape())); + shared_ptr > bias_mask_filler(GetFiller( + cinner_param.bias_mask_filler())); + bias_mask_filler->Fill(this->blobs_[1].get()); + } + + // Intialize the tmp tensor + this->weight_tmp_.Reshape(this->blobs_[0]->shape()); + this->bias_tmp_.Reshape(this->blobs_[1]->shape()); + + // Intialize the hyper-parameters + this->std = 0;this->mu = 0; + this->gamma = cinner_param.gamma(); + this->power = cinner_param.power(); + this->crate = cinner_param.c_rate(); + this->iter_stop_ = cinner_param.iter_stop(); + /********************************************************/ +} + +template +void CInnerProductLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + // Figure out the dimensions + const int axis = bottom[0]->CanonicalAxisIndex( + this->layer_param_.inner_product_param().axis()); + const int new_K = bottom[0]->count(axis); + CHECK_EQ(K_, new_K) + << "Input size incompatible with inner product parameters."; + // The first "axis" dimensions are independent inner products; the total + // number of these is M_, the product over these dimensions. + M_ = bottom[0]->count(0, axis); + // The top shape will be the bottom shape with the flattened axes dropped, + // and replaced by a single axis with dimension num_output (N_). + vector top_shape = bottom[0]->shape(); + top_shape.resize(axis + 1); + top_shape[axis] = N_; + top[0]->Reshape(top_shape); + // Set up the bias multiplier + if (bias_term_) { + vector bias_shape(1, M_); + bias_multiplier_.Reshape(bias_shape); + caffe_set(M_, Dtype(1), bias_multiplier_.mutable_cpu_data()); + } +} + +template +void CInnerProductLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + + const Dtype* weight = this->blobs_[0]->mutable_cpu_data(); + Dtype* weightMask = this->blobs_[2]->mutable_cpu_data(); + Dtype* weightTmp = this->weight_tmp_.mutable_cpu_data(); + const Dtype* bias = NULL; + Dtype* biasMask = NULL; + Dtype* biasTmp = NULL; + if (this->bias_term_) { + bias = this->blobs_[1]->mutable_cpu_data(); + biasMask = this->blobs_[3]->mutable_cpu_data(); + biasTmp = this->bias_tmp_.mutable_cpu_data(); + } + + if (this->phase_ == TRAIN){ + // Calculate the mean and standard deviation of learnable parameters + if (this->std==0 && this->iter_==0){ + unsigned int ncount = 0; + for (unsigned int k = 0;k < this->blobs_[0]->count(); ++k) { + this->mu += fabs(weight[k]); + this->std += weight[k]*weight[k]; + if (weight[k]!=0) ncount++; + } + if (this->bias_term_) { + for (unsigned int k = 0;k < this->blobs_[1]->count(); ++k) { + this->mu += fabs(bias[k]); + this->std += bias[k]*bias[k]; + if (bias[k]!=0) ncount++; + } + } + this->mu /= ncount; this->std -= ncount*mu*mu; + this->std /= ncount; this->std = sqrt(std); + LOG(INFO)<iter_%100==0){ + unsigned int ncount = 0; + for (unsigned int k = 0;k < this->blobs_[0]->count(); ++k) { + if (weightMask[k]*weight[k]!=0) ncount++; + } + if (this->bias_term_) { + for (unsigned int k = 0;k < this->blobs_[1]->count(); ++k) { + if (biasMask[k]*bias[k]!=0) ncount++; + } + } + LOG(INFO)<(rand())/static_cast(RAND_MAX); + // if (pow(1+(this->gamma)*(this->iter_),-(this->power))>r && (this->iter_)<(this->iter_stop_)) { + bool _update = false; + if (weightMask[0] > 1){ + _update = true; + crate = weightMask[0] - 1; + weightMask[0] = 1; + } else if (weightMask[0] < 0){ + _update = true; + crate = -weightMask[0]; + weightMask[0] = 0; + } + if (_update) { + for (unsigned int k = 0;k < this->blobs_[0]->count(); ++k) { + if (weightMask[k]==1 && fabs(weight[k])<=0.9*std::max(mu+crate*std,Dtype(0))) + weightMask[k] = 0; + else if (weightMask[k]==0 && fabs(weight[k])>1.1*std::max(mu+crate*std,Dtype(0))) + weightMask[k] = 1; + } + if (this->bias_term_) { + for (unsigned int k = 0;k < this->blobs_[1]->count(); ++k) { + if (biasMask[k]==1 && fabs(bias[k])<=0.9*std::max(mu+crate*std,Dtype(0))) + biasMask[k] = 0; + else if (biasMask[k]==0 && fabs(bias[k])>1.1*std::max(mu+crate*std,Dtype(0))) + biasMask[k] = 1; + } + } + _update = false; + } + } + + // Calculate the current (masked) weight and bias + for (unsigned int k = 0;k < this->blobs_[0]->count(); ++k) { + weightTmp[k] = weight[k]*weightMask[k]; + } + if (this->bias_term_){ + for (unsigned int k = 0;k < this->blobs_[1]->count(); ++k) { + biasTmp[k] = bias[k]*biasMask[k]; + } + } + + // Forward calculation with (masked) weight and bias + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + caffe_cpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., + bottom_data, weightTmp, (Dtype)0., top_data); + if (bias_term_) { + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., + bias_multiplier_.cpu_data(), biasTmp, (Dtype)1., top_data); + } +} + +template +void CInnerProductLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + // Use the masked weight to propagate back + const Dtype* top_diff = top[0]->cpu_diff(); + if (this->param_propagate_down_[0]) { + const Dtype* weightMask = this->blobs_[2]->cpu_data(); + Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); + const Dtype* bottom_data = bottom[0]->cpu_data(); + // Gradient with respect to weight + for (unsigned int k = 0;k < this->blobs_[0]->count(); ++k) { + weight_diff[k] = weight_diff[k]*weightMask[k]; + } + caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., + top_diff, bottom_data, (Dtype)1., weight_diff); + } + if (bias_term_ && this->param_propagate_down_[1]) { + const Dtype* biasMask = this->blobs_[3]->cpu_data(); + Dtype* bias_diff = this->blobs_[1]->mutable_cpu_diff(); + // Gradient with respect to bias + for (unsigned int k = 0;k < this->blobs_[1]->count(); ++k) { + bias_diff[k] = bias_diff[k]*biasMask[k]; + } + caffe_cpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, + bias_multiplier_.cpu_data(), (Dtype)1., bias_diff); + } + if (propagate_down[0]) { + const Dtype* weightTmp = this->weight_tmp_.cpu_data(); + // Gradient with respect to bottom data + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., + top_diff, weightTmp, (Dtype)0., + bottom[0]->mutable_cpu_diff()); + } +} + +#ifdef CPU_ONLY +STUB_GPU(CInnerProductLayer); +#endif + +INSTANTIATE_CLASS(CInnerProductLayer); +REGISTER_LAYER_CLASS(CInnerProduct); + +} // namespace caffe diff --git a/src/caffe/layers/compress_inner_product_layer.cu b/src/caffe/layers/compress_inner_product_layer.cu new file mode 100755 index 0000000..0f34607 --- /dev/null +++ b/src/caffe/layers/compress_inner_product_layer.cu @@ -0,0 +1,304 @@ +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" +#include + +namespace caffe { + +// The constant NUM_THREADS should be equal to the value in CCMomentCalc +template +__global__ void CCMomentCollect(const int n, const Dtype* wb, const Dtype* mask, + Dtype* mu, Dtype* std, unsigned int* count ) { + const int NUM_THREADS = 512; + __shared__ Dtype param [4*NUM_THREADS]; + __shared__ unsigned int tcount [2*NUM_THREADS]; + unsigned int t = threadIdx.x; + unsigned int s = 2 * blockIdx.x * NUM_THREADS; + if (s+t < n){ + param[t] = fabs(mask[s+t]*wb[s+t]); + param[t+2*NUM_THREADS] = mask[s+t]*wb[s+t]*wb[s+t]; + if(mask[s+t]*wb[s+t]!=0) tcount[t] = 1; + else tcount[t] = 0; + } + else{ + param[t] = 0;param[t+2*NUM_THREADS] = 0;tcount[t] = 0; + } + if (s+t+NUM_THREADS < n){ + param[t+NUM_THREADS] = fabs(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]); + param[t+3*NUM_THREADS] = mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]; + if(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]!=0) tcount[t+NUM_THREADS] = 1; + else tcount[t+NUM_THREADS] = 0; + } + else{ + param[t+NUM_THREADS] = 0;param[t+3*NUM_THREADS] = 0;tcount[t+NUM_THREADS] = 0; + } + __syncthreads(); + for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) { + if (t < stride ){ + param[t] += param[t+stride]; + param[t+2*NUM_THREADS] += param[t+2*NUM_THREADS+stride]; + tcount[t] += tcount[t+stride]; + } + __syncthreads(); + } + if (t == 0){ + mu [blockIdx.x] = param[0]; + std [blockIdx.x] = param[2*NUM_THREADS]; + count[blockIdx.x] = tcount[0]; + } +} + +// The constant NUM_THREADS should be equal to the value in CCMomentCalc +template +__global__ void CCNzeroCollect(const int n, const Dtype* mask, unsigned int* count ) { + const int NUM_THREADS = 512; + __shared__ unsigned int tcount [2*NUM_THREADS]; + unsigned int t = threadIdx.x; + unsigned int s = 2 * blockIdx.x * NUM_THREADS; + tcount[t] = 0; + if (s+t < n && mask[s+t]!=0){ + tcount[t] = 1; + } + tcount[t+NUM_THREADS] = 0; + if (s+t+NUM_THREADS < n && mask[s+t+NUM_THREADS]!=0){ + tcount[t+NUM_THREADS] = 1; + } + __syncthreads(); + for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) { + if (t < stride ){ + tcount[t] += tcount[t+stride]; + } + __syncthreads(); + } + if (t == 0){ + count[blockIdx.x] = tcount[0]; + } +} + +template +__global__ void CCMaskCalc(const int n, const Dtype* wb, + Dtype* mask, Dtype mu, Dtype std, Dtype r) { + CUDA_KERNEL_LOOP(index, n) { + // added by Guiying Li + //if (mask[index] > 1) + // mask[index] = 1; + //else if (mask[index] <0) + // mask[index] = 0; + // ----Guiying Li----- + if (mask[index]==1 && fabs(wb[index])<=0.9*max(mu+r*std,Dtype(0))) + mask[index] = 0; + else if (mask[index]==0 && fabs(wb[index])>1.1*max(mu+r*std,Dtype(0))) + mask[index] = 1; + } +} + +template +__global__ void CCcRateCalc(const int n, + Dtype* mask) { + CUDA_KERNEL_LOOP(index, n) { + if (index == 0){ + if (mask[0] > 1){ + mask[0] = 1; + } else if (mask[0] < 0){ + mask[0] = 0; + } + } + } +} + +template +__global__ void CCMaskApply(const int n, const Dtype* wb, + const Dtype* mask, Dtype* wb_t) { + CUDA_KERNEL_LOOP(index, n) { + wb_t[index] = wb[index] * mask[index]; + } +} + +template +void CCMomentCalc(const int n, const Dtype* wb, const Dtype* mask, Dtype* mu, Dtype* std, unsigned int* ncount){ + const unsigned int NUM_THREADS = 512; + Dtype* pmu_g; Dtype* pstd_g; unsigned int* pncount_g; + Dtype* pmu_c; Dtype* pstd_c; unsigned int* pncount_c; + int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1); + cudaMalloc(&pmu_g, sizeof(Dtype) * num_p); + cudaMalloc(&pstd_g, sizeof(Dtype) * num_p); + cudaMalloc(&pncount_g, sizeof(unsigned int) * num_p); + pmu_c = (Dtype*) malloc(num_p * sizeof(Dtype)); + pstd_c = (Dtype*) malloc(num_p * sizeof(Dtype)); + pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int)); + CCMomentCollect<<>>(n, wb, mask, pmu_g, pstd_g, pncount_g); + CUDA_POST_KERNEL_CHECK; + cudaMemcpy(pmu_c, pmu_g, sizeof(Dtype) * num_p, cudaMemcpyDeviceToHost); + cudaMemcpy(pstd_c, pstd_g, sizeof(Dtype) * num_p, cudaMemcpyDeviceToHost); + cudaMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, cudaMemcpyDeviceToHost); + for (int i = 0; i < num_p; i++) { + *mu += pmu_c[i];*std += pstd_c[i];*ncount += pncount_c[i]; + } + cudaFree(pmu_g);cudaFree(pstd_g);cudaFree(pncount_g); + free(pmu_c);free(pstd_c);free(pncount_c); +} + +template +void CCNZeroCalc(const int n, const Dtype* mask, unsigned int* ncount ){ + const unsigned int NUM_THREADS = 512; + unsigned int* pncount_g; + unsigned int* pncount_c; + int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1); + cudaMalloc(&pncount_g, sizeof(unsigned int) * num_p); + pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int)); + CCNzeroCollect<<>>(n, mask, pncount_g); + CUDA_POST_KERNEL_CHECK; + cudaMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, cudaMemcpyDeviceToHost); + for (int i = 0; i < num_p; i++) { + *ncount += pncount_c[i]; + } + cudaFree(pncount_g); + free(pncount_c); +} + +template +void CInnerProductLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + + const Dtype* weight = this->blobs_[0]->mutable_gpu_data(); + Dtype* weightMask = this->blobs_[2]->mutable_gpu_data(); + Dtype* weightTmp = this->weight_tmp_.mutable_gpu_data(); + const Dtype* bias = NULL; + Dtype* biasMask = NULL; + Dtype* biasTmp = NULL; + if (this->bias_term_) { + bias = this->blobs_[1]->mutable_gpu_data(); + biasMask = this->blobs_[3]->mutable_gpu_data(); + biasTmp = this->bias_tmp_.mutable_gpu_data(); + } + // added by Guiying Li + bool _update = false; + Dtype* tmp_weightMask = this->blobs_[2]->mutable_cpu_data(); + if (tmp_weightMask[0] > 1){ + _update = true; + this->crate = tmp_weightMask[0] - 1; + tmp_weightMask[0] = 1; + } else if (tmp_weightMask[0] < 0){ + _update = true; + this->crate = -tmp_weightMask[0]; + tmp_weightMask[0] = 0; + } + weightMask = this->blobs_[2]->mutable_gpu_data();// update data + // ------Guiying-------- + + if (this->phase_ == TRAIN){ + // Calculate the mean and standard deviation of learnable parameters + if (this->std==0 && this->iter_==0){ + unsigned int ncount = 0; + CCMomentCalc(this->blobs_[0]->count(), weight, weightMask, &mu, &std, &ncount); + if (this->bias_term_) { + CCMomentCalc(this->blobs_[1]->count(), bias, biasMask, &mu, &std, &ncount); + } + this->mu /= ncount; this->std -= ncount*mu*mu; + this->std /= ncount; this->std = sqrt(std); + LOG(INFO)<iter_%100==0){ + unsigned int ncount = 0; + CCNZeroCalc(this->blobs_[0]->count(), weightMask, &ncount); + if (this->bias_term_) { + CCNZeroCalc(this->blobs_[1]->count(), biasMask, &ncount); + } + LOG(INFO)<(rand())/static_cast(RAND_MAX); + //if (pow(1+(this->gamma)*(this->iter_),-(this->power))>r && (this->iter_)<(this->iter_stop_)) { +// CCcRateCalc<<blobs_[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(this->blobs_[0]->count(), weightMask); +// CUDA_POST_KERNEL_CHECK; + if (_update) { + CCMaskCalc<<blobs_[0]->count()), + CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight, weightMask, this->mu, this->std, this->crate); + CUDA_POST_KERNEL_CHECK; + if (this->bias_term_) { + CCMaskCalc<<blobs_[1]->count()), + CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[1]->count(), bias, biasMask, this->mu, this->std, this->crate); + CUDA_POST_KERNEL_CHECK; + } + _update = false; + } + } + + // Calculate the current (masked) weight and bias + CCMaskApply<<blobs_[0]->count()), + CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight, weightMask, weightTmp); + CUDA_POST_KERNEL_CHECK; + if (this->bias_term_) { + CCMaskApply<<blobs_[1]->count()), + CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[1]->count(), bias, biasMask, biasTmp); + CUDA_POST_KERNEL_CHECK; + } + + // Forward calculation with (masked) weight and bias + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + if (M_ == 1) { + caffe_gpu_gemv(CblasNoTrans, N_, K_, (Dtype)1., + weightTmp, bottom_data, (Dtype)0., top_data); + if (this->bias_term_) + caffe_gpu_axpy(N_, bias_multiplier_.cpu_data()[0], + biasTmp, top_data); + } else { + caffe_gpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., + bottom_data, weightTmp, (Dtype)0., top_data); + if (this->bias_term_) + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., + bias_multiplier_.gpu_data(), + biasTmp, (Dtype)1., top_data); + } +} + +template +void CInnerProductLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + const Dtype* top_diff = top[0]->gpu_diff(); + if (this->param_propagate_down_[0]) { + const Dtype* weightMask = this->blobs_[2]->gpu_data(); + Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + // Gradient with respect to weight + CCMaskApply<<blobs_[2]->count()), + CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[2]->count(), weight_diff, weightMask, weight_diff); + CUDA_POST_KERNEL_CHECK; + caffe_gpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., + top_diff, bottom_data, (Dtype)1., weight_diff); + } + if (bias_term_ && this->param_propagate_down_[1]) { + const Dtype* biasMask = this->blobs_[3]->gpu_data(); + Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); + // Gradient with respect to bias + CCMaskApply<<blobs_[3]->count()), + CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[3]->count(), bias_diff, biasMask, bias_diff); + CUDA_POST_KERNEL_CHECK; + caffe_gpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, + bias_multiplier_.gpu_data(), (Dtype)1.,bias_diff); + } + if (propagate_down[0]) { + const Dtype* weightTmp = this->weight_tmp_.gpu_data(); + // Gradient with respect to bottom data + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., + top_diff, weightTmp, (Dtype)0., + bottom[0]->mutable_gpu_diff()); + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(CInnerProductLayer); + +} // namespace caffe diff --git a/src/caffe/layers/concat_layer.cpp b/src/caffe/layers/concat_layer.cpp new file mode 100755 index 0000000..1cac8fc --- /dev/null +++ b/src/caffe/layers/concat_layer.cpp @@ -0,0 +1,98 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void ConcatLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + const ConcatParameter& concat_param = this->layer_param_.concat_param(); + CHECK(!(concat_param.has_axis() && concat_param.has_concat_dim())) + << "Either axis or concat_dim should be specified; not both."; +} + +template +void ConcatLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + const int num_axes = bottom[0]->num_axes(); + const ConcatParameter& concat_param = this->layer_param_.concat_param(); + if (concat_param.has_concat_dim()) { + concat_axis_ = static_cast(concat_param.concat_dim()); + // Don't allow negative indexing for concat_dim, a uint32 -- almost + // certainly unintended. + CHECK_GE(concat_axis_, 0) << "casting concat_dim from uint32 to int32 " + << "produced negative result; concat_dim must satisfy " + << "0 <= concat_dim < " << kMaxBlobAxes; + CHECK_LT(concat_axis_, num_axes) << "concat_dim out of range."; + } else { + concat_axis_ = bottom[0]->CanonicalAxisIndex(concat_param.axis()); + } + // Initialize with the first blob. + vector top_shape = bottom[0]->shape(); + num_concats_ = bottom[0]->count(0, concat_axis_); + concat_input_size_ = bottom[0]->count(concat_axis_ + 1); + int bottom_count_sum = bottom[0]->count(); + for (int i = 1; i < bottom.size(); ++i) { + CHECK_EQ(num_axes, bottom[i]->num_axes()) + << "All inputs must have the same #axes."; + for (int j = 0; j < num_axes; ++j) { + if (j == concat_axis_) { continue; } + CHECK_EQ(top_shape[j], bottom[i]->shape(j)) + << "All inputs must have the same shape, except at concat_axis."; + } + bottom_count_sum += bottom[i]->count(); + top_shape[concat_axis_] += bottom[i]->shape(concat_axis_); + } + top[0]->Reshape(top_shape); + CHECK_EQ(bottom_count_sum, top[0]->count()); +} + +template +void ConcatLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + Dtype* top_data = top[0]->mutable_cpu_data(); + int offset_concat_axis = 0; + const int top_concat_axis = top[0]->shape(concat_axis_); + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->cpu_data(); + const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + for (int n = 0; n < num_concats_; ++n) { + caffe_copy(bottom_concat_axis * concat_input_size_, + bottom_data + n * bottom_concat_axis * concat_input_size_, + top_data + (n * top_concat_axis + offset_concat_axis) + * concat_input_size_); + } + offset_concat_axis += bottom_concat_axis; + } +} + +template +void ConcatLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* top_diff = top[0]->cpu_diff(); + int offset_concat_axis = 0; + const int top_concat_axis = top[0]->shape(concat_axis_); + for (int i = 0; i < bottom.size(); ++i) { + if (!propagate_down[i]) { continue; } + Dtype* bottom_diff = bottom[i]->mutable_cpu_diff(); + const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + for (int n = 0; n < num_concats_; ++n) { + caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + + (n * top_concat_axis + offset_concat_axis) * concat_input_size_, + bottom_diff + n * bottom_concat_axis * concat_input_size_); + } + offset_concat_axis += bottom_concat_axis; + } +} + +#ifdef CPU_ONLY +STUB_GPU(ConcatLayer); +#endif + +INSTANTIATE_CLASS(ConcatLayer); +REGISTER_LAYER_CLASS(Concat); + +} // namespace caffe diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu new file mode 100755 index 0000000..8f2e85d --- /dev/null +++ b/src/caffe/layers/concat_layer.cu @@ -0,0 +1,71 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +__global__ void Concat(const int nthreads, const Dtype* in_data, + const bool forward, const int num_concats, const int concat_size, + const int top_concat_axis, const int bottom_concat_axis, + const int offset_concat_axis, Dtype* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int total_concat_size = concat_size * bottom_concat_axis; + const int concat_num = index / total_concat_size; + const int concat_index = index % total_concat_size; + const int top_index = concat_index + + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; + if (forward) { + out_data[top_index] = in_data[index]; + } else { + out_data[index] = in_data[top_index]; + } + } +} + +template +void ConcatLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + Dtype* top_data = top[0]->mutable_gpu_data(); + int offset_concat_axis = 0; + const int top_concat_axis = top[0]->shape(concat_axis_); + const bool kForward = true; + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, bottom_data, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); + offset_concat_axis += bottom_concat_axis; + } +} + +template +void ConcatLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* top_diff = top[0]->gpu_diff(); + int offset_concat_axis = 0; + const int top_concat_axis = top[0]->shape(concat_axis_); + const bool kForward = false; + for (int i = 0; i < bottom.size(); ++i) { + if (!propagate_down[i]) { continue; } + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, top_diff, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); + offset_concat_axis += bottom_concat_axis; + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer); + +} // namespace caffe diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp new file mode 100755 index 0000000..25e1678 --- /dev/null +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -0,0 +1,121 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void ContrastiveLossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); + CHECK_EQ(bottom[0]->height(), 1); + CHECK_EQ(bottom[0]->width(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); + CHECK_EQ(bottom[2]->channels(), 1); + CHECK_EQ(bottom[2]->height(), 1); + CHECK_EQ(bottom[2]->width(), 1); + diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_sq_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1); + // vector of ones used to sum along channels + summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); + for (int i = 0; i < bottom[0]->channels(); ++i) + summer_vec_.mutable_cpu_data()[i] = Dtype(1); +} + +template +void ContrastiveLossLayer::Forward_cpu( + const vector*>& bottom, + const vector*>& top) { + int count = bottom[0]->count(); + caffe_sub( + count, + bottom[0]->cpu_data(), // a + bottom[1]->cpu_data(), // b + diff_.mutable_cpu_data()); // a_i-b_i + const int channels = bottom[0]->channels(); + Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); + Dtype loss(0.0); + for (int i = 0; i < bottom[0]->num(); ++i) { + dist_sq_.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_.cpu_data() + (i*channels), diff_.cpu_data() + (i*channels)); + if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs + loss += dist_sq_.cpu_data()[i]; + } else { // dissimilar pairs + if (legacy_version) { + loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); + } else { + Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), 0.0); + loss += dist*dist; + } + } + } + loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; +} + +template +void ContrastiveLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); + for (int i = 0; i < 2; ++i) { + if (propagate_down[i]) { + const Dtype sign = (i == 0) ? 1 : -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_cpu_diff(); + if (static_cast(bottom[2]->cpu_data()[j])) { // similar pairs + caffe_cpu_axpby( + channels, + alpha, + diff_.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } else { // dissimilar pairs + Dtype mdist(0.0); + Dtype beta(0.0); + if (legacy_version) { + mdist = margin - dist_sq_.cpu_data()[j]; + beta = -alpha; + } else { + Dtype dist = sqrt(dist_sq_.cpu_data()[j]); + mdist = margin - dist; + beta = -alpha * mdist / (dist + Dtype(1e-4)); + } + if (mdist > Dtype(0.0)) { + caffe_cpu_axpby( + channels, + beta, + diff_.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(ContrastiveLossLayer); +#endif + +INSTANTIATE_CLASS(ContrastiveLossLayer); +REGISTER_LAYER_CLASS(ContrastiveLoss); + +} // namespace caffe diff --git a/src/caffe/layers/contrastive_loss_layer.cu b/src/caffe/layers/contrastive_loss_layer.cu new file mode 100755 index 0000000..9312393 --- /dev/null +++ b/src/caffe/layers/contrastive_loss_layer.cu @@ -0,0 +1,111 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void ContrastiveLossLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + const int count = bottom[0]->count(); + caffe_gpu_sub( + count, + bottom[0]->gpu_data(), // a + bottom[1]->gpu_data(), // b + diff_.mutable_gpu_data()); // a_i-b_i + caffe_gpu_powx( + count, + diff_.mutable_gpu_data(), // a_i-b_i + Dtype(2), + diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2 + caffe_gpu_gemv( + CblasNoTrans, + bottom[0]->num(), + bottom[0]->channels(), + Dtype(1.0), + diff_sq_.gpu_data(), // (a_i-b_i)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2 + Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); + Dtype loss(0.0); + for (int i = 0; i < bottom[0]->num(); ++i) { + if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs + loss += dist_sq_.cpu_data()[i]; + } else { // dissimilar pairs + if (legacy_version) { + loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); + } else { + Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), + Dtype(0.0)); + loss += dist*dist; + } + } + } + loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; +} + +template +__global__ void CLLBackward(const int count, const int channels, + const Dtype margin, const bool legacy_version, const Dtype alpha, + const Dtype* y, const Dtype* diff, const Dtype* dist_sq, + Dtype *bottom_diff) { + CUDA_KERNEL_LOOP(i, count) { + int n = i / channels; // the num index, to access y and dist_sq + if (static_cast(y[n])) { // similar pairs + bottom_diff[i] = alpha * diff[i]; + } else { // dissimilar pairs + Dtype mdist(0.0); + Dtype beta(0.0); + if (legacy_version) { + mdist = (margin - dist_sq[n]); + beta = -alpha; + } else { + Dtype dist = sqrt(dist_sq[n]); + mdist = (margin - dist); + beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i]; + } + if (mdist > 0.0) { + bottom_diff[i] = beta; + } else { + bottom_diff[i] = 0; + } + } + } +} + +template +void ContrastiveLossLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + for (int i = 0; i < 2; ++i) { + if (propagate_down[i]) { + const int count = bottom[0]->count(); + const int channels = bottom[0]->channels(); + Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + const bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); + const Dtype sign = (i == 0) ? 1 : -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(bottom[0]->num()); + // NOLINT_NEXT_LINE(whitespace/operators) + CLLBackward<<>>( + count, channels, margin, legacy_version, alpha, + bottom[2]->gpu_data(), // pair similarity 0 or 1 + diff_.gpu_data(), // the cached eltwise difference between a and b + dist_sq_.gpu_data(), // the cached square distance between a and b + bottom[i]->mutable_gpu_diff()); + CUDA_POST_KERNEL_CHECK; + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(ContrastiveLossLayer); + +} // namespace caffe diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp new file mode 100755 index 0000000..928ef5e --- /dev/null +++ b/src/caffe/layers/conv_layer.cpp @@ -0,0 +1,76 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void ConvolutionLayer::compute_output_shape() { + this->height_out_ = (this->height_ + 2 * this->pad_h_ - this->kernel_h_) + / this->stride_h_ + 1; + this->width_out_ = (this->width_ + 2 * this->pad_w_ - this->kernel_w_) + / this->stride_w_ + 1; +} + +template +void ConvolutionLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* weight = this->blobs_[0]->cpu_data(); + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->cpu_data(); + Dtype* top_data = top[i]->mutable_cpu_data(); + for (int n = 0; n < this->num_; ++n) { + this->forward_cpu_gemm(bottom_data + bottom[i]->offset(n), weight, + top_data + top[i]->offset(n)); + if (this->bias_term_) { + const Dtype* bias = this->blobs_[1]->cpu_data(); + this->forward_cpu_bias(top_data + top[i]->offset(n), bias); + } + } + } +} + +template +void ConvolutionLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* weight = this->blobs_[0]->cpu_data(); + Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->cpu_diff(); + const Dtype* bottom_data = bottom[i]->cpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_cpu_diff(); + // Bias gradient, if necessary. + if (this->bias_term_ && this->param_propagate_down_[1]) { + Dtype* bias_diff = this->blobs_[1]->mutable_cpu_diff(); + for (int n = 0; n < this->num_; ++n) { + this->backward_cpu_bias(bias_diff, top_diff + top[i]->offset(n)); + } + } + if (this->param_propagate_down_[0] || propagate_down[i]) { + for (int n = 0; n < this->num_; ++n) { + // gradient w.r.t. weight. Note that we will accumulate diffs. + if (this->param_propagate_down_[0]) { + this->weight_cpu_gemm(bottom_data + bottom[i]->offset(n), + top_diff + top[i]->offset(n), weight_diff); + } + // gradient w.r.t. bottom data, if necessary. + if (propagate_down[i]) { + this->backward_cpu_gemm(top_diff + top[i]->offset(n), weight, + bottom_diff + bottom[i]->offset(n)); + } + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(ConvolutionLayer); +#endif + +INSTANTIATE_CLASS(ConvolutionLayer); + +} // namespace caffe diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu new file mode 100755 index 0000000..b8a98ff --- /dev/null +++ b/src/caffe/layers/conv_layer.cu @@ -0,0 +1,64 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void ConvolutionLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* weight = this->blobs_[0]->gpu_data(); + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* top_data = top[i]->mutable_gpu_data(); + for (int n = 0; n < this->num_; ++n) { + this->forward_gpu_gemm(bottom_data + bottom[i]->offset(n), weight, + top_data + top[i]->offset(n)); + if (this->bias_term_) { + const Dtype* bias = this->blobs_[1]->gpu_data(); + this->forward_gpu_bias(top_data + top[i]->offset(n), bias); + } + } + } +} + +template +void ConvolutionLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* weight = this->blobs_[0]->gpu_data(); + Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + // Bias gradient, if necessary. + if (this->bias_term_ && this->param_propagate_down_[1]) { + Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); + for (int n = 0; n < this->num_; ++n) { + this->backward_gpu_bias(bias_diff, top_diff + top[i]->offset(n)); + } + } + if (this->param_propagate_down_[0] || propagate_down[i]) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + for (int n = 0; n < this->num_; ++n) { + // gradient w.r.t. weight. Note that we will accumulate diffs. + if (this->param_propagate_down_[0]) { + this->weight_gpu_gemm(bottom_data + bottom[i]->offset(n), + top_diff + top[i]->offset(n), weight_diff); + } + // gradient w.r.t. bottom data, if necessary. + if (propagate_down[i]) { + this->backward_gpu_gemm(top_diff + top[i]->offset(n), weight, + bottom_diff + bottom[i]->offset(n)); + } + } + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(ConvolutionLayer); + +} // namespace caffe diff --git a/src/caffe/layers/cudnn_conv_layer.cpp b/src/caffe/layers/cudnn_conv_layer.cpp new file mode 100755 index 0000000..104d2b9 --- /dev/null +++ b/src/caffe/layers/cudnn_conv_layer.cpp @@ -0,0 +1,130 @@ +#ifdef USE_CUDNN +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +// Set to three for the benefit of the backward pass, which +// can use separate streams for calculating the gradient w.r.t. +// bias, filter weights, and bottom data for each group independently +#define CUDNN_STREAMS_PER_GROUP 3 + +/** + * TODO(dox) explain cuDNN interface + */ +template +void CuDNNConvolutionLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + ConvolutionLayer::LayerSetUp(bottom, top); + // Initialize CUDA streams and cuDNN. + stream_ = new cudaStream_t[this->group_ * CUDNN_STREAMS_PER_GROUP]; + handle_ = new cudnnHandle_t[this->group_ * CUDNN_STREAMS_PER_GROUP]; + workspaceSizeInBytes = 0; + workspace = NULL; + + for (int g = 0; g < this->group_ * CUDNN_STREAMS_PER_GROUP; g++) { + CUDA_CHECK(cudaStreamCreate(&stream_[g])); + CUDNN_CHECK(cudnnCreate(&handle_[g])); + CUDNN_CHECK(cudnnSetStream(handle_[g], stream_[g])); + } + + // Set the indexing parameters. + weight_offset_ = (this->num_output_ / this->group_) + * (this->channels_ / this->group_) * this->kernel_h_ * this->kernel_w_; + bias_offset_ = (this->num_output_ / this->group_); + + // Create filter descriptor. + cudnn::createFilterDesc(&filter_desc_, + this->num_output_ / this->group_, this->channels_ / this->group_, + this->kernel_h_, this->kernel_w_); + + // Create tensor descriptor(s) for data and corresponding convolution(s). + for (int i = 0; i < bottom.size(); i++) { + cudnnTensorDescriptor_t bottom_desc; + cudnn::createTensor4dDesc(&bottom_desc); + bottom_descs_.push_back(bottom_desc); + cudnnTensorDescriptor_t top_desc; + cudnn::createTensor4dDesc(&top_desc); + top_descs_.push_back(top_desc); + cudnnConvolutionDescriptor_t conv_desc; + cudnn::createConvolutionDesc(&conv_desc); + conv_descs_.push_back(conv_desc); + } + + // Tensor descriptor for bias. + if (this->bias_term_) { + cudnn::createTensor4dDesc(&bias_desc_); + } + + handles_setup_ = true; +} + +template +void CuDNNConvolutionLayer::Reshape( + const vector*>& bottom, const vector*>& top) { + ConvolutionLayer::Reshape(bottom, top); + bottom_offset_ = (this->channels_ / this->group_) + * this->height_ * this->width_; + top_offset_ = (this->num_output_ / this->group_) + * this->height_out_ * this->width_out_; + + for (int i = 0; i < bottom.size(); i++) { + cudnn::setTensor4dDesc(&bottom_descs_[i], + this->num_, + this->channels_ / this->group_, + this->height_, this->width_, + this->channels_ * this->height_ * this->width_, + this->height_ * this->width_, + this->width_, 1); + cudnn::setTensor4dDesc(&top_descs_[i], + this->num_, + this->num_output_ / this->group_, + this->height_out_, this->width_out_, + this->num_output_ * this->height_out_ * this->width_out_, + this->height_out_ * this->width_out_, + this->width_out_, 1); + cudnn::setConvolutionDesc(&conv_descs_[i], bottom_descs_[i], + filter_desc_, this->pad_h_, this->pad_w_, + this->stride_h_, this->stride_w_); + } + + // Tensor descriptor for bias. + if (this->bias_term_) { + cudnn::setTensor4dDesc(&bias_desc_, + 1, this->num_output_ / this->group_, 1, 1); + } +} + +template +CuDNNConvolutionLayer::~CuDNNConvolutionLayer() { + // Check that handles have been setup before destroying. + if (!handles_setup_) { return; } + + for (int i = 0; i < bottom_descs_.size(); i++) { + cudnnDestroyTensorDescriptor(bottom_descs_[i]); + cudnnDestroyTensorDescriptor(top_descs_[i]); + cudnnDestroyConvolutionDescriptor(conv_descs_[i]); + } + if (this->bias_term_) { + cudnnDestroyTensorDescriptor(bias_desc_); + } + cudnnDestroyFilterDescriptor(filter_desc_); + + for (int g = 0; g < this->group_ * CUDNN_STREAMS_PER_GROUP; g++) { + cudaStreamDestroy(stream_[g]); + cudnnDestroy(handle_[g]); + } + + delete [] stream_; + delete [] handle_; +} + +INSTANTIATE_CLASS(CuDNNConvolutionLayer); + +} // namespace caffe +#endif diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu new file mode 100755 index 0000000..b4e802e --- /dev/null +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -0,0 +1,160 @@ +#ifdef USE_CUDNN +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +__global__ void sync_conv_groups() { } + +template +void CuDNNConvolutionLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* top_data = top[i]->mutable_gpu_data(); + const Dtype* weight = this->blobs_[0]->gpu_data(); + + size_t workspace_limit_bytes = this->kernel_h_ * + this->kernel_w_ * + this->channels_ * + sizeof(int) + 1; + + // Forward through cuDNN in parallel over groups. + for (int g = 0; g < this->group_; g++) { + cudnnConvolutionFwdAlgo_t algo; + + // pick the convolution algorithm + // TODO(shelhamer) this should be done during reshape + // TODO(shelhamer) the choice of automatic or manual algorithm picking + // should be exposed in proto + CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(handle_[g], + bottom_descs_[i], + filter_desc_, + conv_descs_[i], + top_descs_[i], + CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, + workspace_limit_bytes, // memoryLimitInBytes, + &algo)); + + // get minimum size of the workspace needed for the desired algorithm + size_t workspaceSizeInBytes_temp = 0; + + CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle_[g], + bottom_descs_[i], + filter_desc_, + conv_descs_[i], + top_descs_[i], + algo, + &workspaceSizeInBytes_temp)); + + if (workspaceSizeInBytes_temp > workspaceSizeInBytes) { + workspaceSizeInBytes = workspaceSizeInBytes_temp; + // free the existing workspace and allocate a new (larger) one + cudaFree(this->workspace); + cudaError_t err = cudaMalloc(&(this->workspace), workspaceSizeInBytes); + if (err != cudaSuccess) { + // force zero memory path + algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM; + workspace = NULL; + workspaceSizeInBytes = 0; + } + } + + // Filters. + CUDNN_CHECK(cudnnConvolutionForward(handle_[g], + cudnn::dataType::one, + bottom_descs_[i], bottom_data + bottom_offset_ * g, + filter_desc_, weight + weight_offset_ * g, + conv_descs_[i], + algo, workspace, workspaceSizeInBytes, + cudnn::dataType::zero, + top_descs_[i], top_data + top_offset_ * g)); + + // Bias. + if (this->bias_term_) { + const Dtype* bias_data = this->blobs_[1]->gpu_data(); + CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C, + cudnn::dataType::one, + bias_desc_, bias_data + bias_offset_ * g, + cudnn::dataType::one, + top_descs_[i], top_data + top_offset_ * g)); + } + } + + // Synchronize the work across groups, each of which went into its own + // stream, by launching an empty kernel into the default (null) stream. + // NOLINT_NEXT_LINE(whitespace/operators) + sync_conv_groups<<<1, 1>>>(); + } +} + +template +void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* weight = NULL; + Dtype* weight_diff = NULL; + if (this->param_propagate_down_[0]) { + weight = this->blobs_[0]->gpu_data(); + weight_diff = this->blobs_[0]->mutable_gpu_diff(); + } + Dtype* bias_diff = NULL; + if (this->bias_term_ && this->param_propagate_down_[1]) { + bias_diff = this->blobs_[1]->mutable_gpu_diff(); + } + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + // Backward through cuDNN in parallel over groups and gradients. + for (int g = 0; g < this->group_; g++) { + // Gradient w.r.t. bias. + if (this->bias_term_ && this->param_propagate_down_[1]) { + CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], + cudnn::dataType::one, + top_descs_[i], top_diff + top_offset_ * g, + cudnn::dataType::one, + bias_desc_, bias_diff + bias_offset_ * g)); + } + + // Gradient w.r.t. weights. + if (this->param_propagate_down_[0]) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g], + cudnn::dataType::one, + bottom_descs_[i], bottom_data + bottom_offset_ * g, + top_descs_[i], top_diff + top_offset_ * g, + conv_descs_[i], + cudnn::dataType::one, + filter_desc_, weight_diff + weight_offset_ * g)); + } + + // Gradient w.r.t. bottom data. + if (propagate_down[i]) { + if (weight == NULL) { + weight = this->blobs_[0]->gpu_data(); + } + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g], + cudnn::dataType::one, + filter_desc_, weight + weight_offset_ * g, + top_descs_[i], top_diff + top_offset_ * g, + conv_descs_[i], + cudnn::dataType::zero, + bottom_descs_[i], bottom_diff + bottom_offset_ * g)); + } + } + + // Synchronize the work across groups, each of which went into its own + // stream, by launching an empty kernel into the default (null) stream. + // NOLINT_NEXT_LINE(whitespace/operators) + sync_conv_groups<<<1, 1>>>(); + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer); + +} // namespace caffe +#endif diff --git a/src/caffe/layers/cudnn_pooling_layer.cpp b/src/caffe/layers/cudnn_pooling_layer.cpp new file mode 100755 index 0000000..c92c4e4 --- /dev/null +++ b/src/caffe/layers/cudnn_pooling_layer.cpp @@ -0,0 +1,50 @@ +#ifdef USE_CUDNN +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void CuDNNPoolingLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + PoolingLayer::LayerSetUp(bottom, top); + CUDNN_CHECK(cudnnCreate(&handle_)); + cudnn::createTensor4dDesc(&bottom_desc_); + cudnn::createTensor4dDesc(&top_desc_); + cudnn::createPoolingDesc(&pooling_desc_, + this->layer_param_.pooling_param().pool(), &mode_, + this->kernel_h_, this->kernel_w_, this->pad_h_, this->pad_w_, + this->stride_h_, this->stride_w_); + handles_setup_ = true; +} + +template +void CuDNNPoolingLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + PoolingLayer::Reshape(bottom, top); + cudnn::setTensor4dDesc(&bottom_desc_, bottom[0]->num(), + this->channels_, this->height_, this->width_); + cudnn::setTensor4dDesc(&top_desc_, bottom[0]->num(), + this->channels_, this->pooled_height_, this->pooled_width_); +} + +template +CuDNNPoolingLayer::~CuDNNPoolingLayer() { + // Check that handles have been setup before destroying. + if (!handles_setup_) { return; } + + cudnnDestroyTensorDescriptor(bottom_desc_); + cudnnDestroyTensorDescriptor(top_desc_); + cudnnDestroyPoolingDescriptor(pooling_desc_); + cudnnDestroy(handle_); +} + +INSTANTIATE_CLASS(CuDNNPoolingLayer); + +} // namespace caffe +#endif diff --git a/src/caffe/layers/cudnn_pooling_layer.cu b/src/caffe/layers/cudnn_pooling_layer.cu new file mode 100755 index 0000000..a952b85 --- /dev/null +++ b/src/caffe/layers/cudnn_pooling_layer.cu @@ -0,0 +1,45 @@ +#ifdef USE_CUDNN +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void CuDNNPoolingLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + CUDNN_CHECK(cudnnPoolingForward(handle_, pooling_desc_, + cudnn::dataType::one, + bottom_desc_, bottom_data, + cudnn::dataType::zero, + top_desc_, top_data)); +} + +template +void CuDNNPoolingLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (!propagate_down[0]) { + return; + } + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* top_data = top[0]->gpu_data(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + CUDNN_CHECK(cudnnPoolingBackward(handle_, pooling_desc_, + cudnn::dataType::one, + top_desc_, top_data, top_desc_, top_diff, + bottom_desc_, bottom_data, + cudnn::dataType::zero, + bottom_desc_, bottom_diff)); +} + +INSTANTIATE_LAYER_GPU_FUNCS(CuDNNPoolingLayer); + +} // namespace caffe +#endif diff --git a/src/caffe/layers/cudnn_relu_layer.cpp b/src/caffe/layers/cudnn_relu_layer.cpp new file mode 100755 index 0000000..759d839 --- /dev/null +++ b/src/caffe/layers/cudnn_relu_layer.cpp @@ -0,0 +1,46 @@ +#ifdef USE_CUDNN +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void CuDNNReLULayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + ReLULayer::LayerSetUp(bottom, top); + // initialize cuDNN + CUDNN_CHECK(cudnnCreate(&handle_)); + cudnn::createTensor4dDesc(&bottom_desc_); + cudnn::createTensor4dDesc(&top_desc_); + handles_setup_ = true; +} + +template +void CuDNNReLULayer::Reshape(const vector*>& bottom, + const vector*>& top) { + ReLULayer::Reshape(bottom, top); + const int N = bottom[0]->num(); + const int K = bottom[0]->channels(); + const int H = bottom[0]->height(); + const int W = bottom[0]->width(); + cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); + cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); +} + +template +CuDNNReLULayer::~CuDNNReLULayer() { + // Check that handles have been setup before destroying. + if (!handles_setup_) { return; } + + cudnnDestroyTensorDescriptor(this->bottom_desc_); + cudnnDestroyTensorDescriptor(this->top_desc_); + cudnnDestroy(this->handle_); +} + +INSTANTIATE_CLASS(CuDNNReLULayer); + +} // namespace caffe +#endif diff --git a/src/caffe/layers/cudnn_relu_layer.cu b/src/caffe/layers/cudnn_relu_layer.cu new file mode 100755 index 0000000..21d1485 --- /dev/null +++ b/src/caffe/layers/cudnn_relu_layer.cu @@ -0,0 +1,57 @@ +#ifdef USE_CUDNN +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void CuDNNReLULayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + // Fallback to standard Caffe for leaky ReLU. + if (ReLULayer::layer_param_.relu_param().negative_slope() != 0) { + return ReLULayer::Forward_gpu(bottom, top); + } + + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + CUDNN_CHECK(cudnnActivationForward(this->handle_, + CUDNN_ACTIVATION_RELU, + cudnn::dataType::one, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->top_desc_, top_data)); +} + +template +void CuDNNReLULayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (!propagate_down[0]) { + return; + } + + // Fallback to standard Caffe for leaky ReLU. + if (ReLULayer::layer_param_.relu_param().negative_slope() != 0) { + return ReLULayer::Backward_gpu(top, propagate_down, bottom); + } + + const Dtype* top_data = top[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + CUDNN_CHECK(cudnnActivationBackward(this->handle_, + CUDNN_ACTIVATION_RELU, + cudnn::dataType::one, + this->top_desc_, top_data, this->top_desc_, top_diff, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->bottom_desc_, bottom_diff)); +} + +INSTANTIATE_LAYER_GPU_FUNCS(CuDNNReLULayer); + +} // namespace caffe +#endif diff --git a/src/caffe/layers/cudnn_sigmoid_layer.cpp b/src/caffe/layers/cudnn_sigmoid_layer.cpp new file mode 100755 index 0000000..3263787 --- /dev/null +++ b/src/caffe/layers/cudnn_sigmoid_layer.cpp @@ -0,0 +1,46 @@ +#ifdef USE_CUDNN +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void CuDNNSigmoidLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + SigmoidLayer::LayerSetUp(bottom, top); + // initialize cuDNN + CUDNN_CHECK(cudnnCreate(&handle_)); + cudnn::createTensor4dDesc(&bottom_desc_); + cudnn::createTensor4dDesc(&top_desc_); + handles_setup_ = true; +} + +template +void CuDNNSigmoidLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + SigmoidLayer::Reshape(bottom, top); + const int N = bottom[0]->num(); + const int K = bottom[0]->channels(); + const int H = bottom[0]->height(); + const int W = bottom[0]->width(); + cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); + cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); +} + +template +CuDNNSigmoidLayer::~CuDNNSigmoidLayer() { + // Check that handles have been setup before destroying. + if (!handles_setup_) { return; } + + cudnnDestroyTensorDescriptor(this->bottom_desc_); + cudnnDestroyTensorDescriptor(this->top_desc_); + cudnnDestroy(this->handle_); +} + +INSTANTIATE_CLASS(CuDNNSigmoidLayer); + +} // namespace caffe +#endif diff --git a/src/caffe/layers/cudnn_sigmoid_layer.cu b/src/caffe/layers/cudnn_sigmoid_layer.cu new file mode 100755 index 0000000..7a06cf7 --- /dev/null +++ b/src/caffe/layers/cudnn_sigmoid_layer.cu @@ -0,0 +1,47 @@ +#ifdef USE_CUDNN +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void CuDNNSigmoidLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + CUDNN_CHECK(cudnnActivationForward(this->handle_, + CUDNN_ACTIVATION_SIGMOID, + cudnn::dataType::one, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->top_desc_, top_data)); +} + +template +void CuDNNSigmoidLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (!propagate_down[0]) { + return; + } + + const Dtype* top_data = top[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + CUDNN_CHECK(cudnnActivationBackward(this->handle_, + CUDNN_ACTIVATION_SIGMOID, + cudnn::dataType::one, + this->top_desc_, top_data, this->top_desc_, top_diff, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->bottom_desc_, bottom_diff)); +} + +INSTANTIATE_LAYER_GPU_FUNCS(CuDNNSigmoidLayer); + +} // namespace caffe +#endif diff --git a/src/caffe/layers/cudnn_softmax_layer.cpp b/src/caffe/layers/cudnn_softmax_layer.cpp new file mode 100755 index 0000000..77a3225 --- /dev/null +++ b/src/caffe/layers/cudnn_softmax_layer.cpp @@ -0,0 +1,50 @@ +#ifdef USE_CUDNN +#include +#include +#include + +#include "thrust/device_vector.h" + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void CuDNNSoftmaxLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + SoftmaxLayer::LayerSetUp(bottom, top); + // Initialize CUDNN. + CUDNN_CHECK(cudnnCreate(&handle_)); + cudnn::createTensor4dDesc(&bottom_desc_); + cudnn::createTensor4dDesc(&top_desc_); + handles_setup_ = true; +} + +template +void CuDNNSoftmaxLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + SoftmaxLayer::Reshape(bottom, top); + int N = this->outer_num_; + int K = bottom[0]->shape(this->softmax_axis_); + int H = this->inner_num_; + int W = 1; + cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); + cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); +} + +template +CuDNNSoftmaxLayer::~CuDNNSoftmaxLayer() { + // Check that handles have been setup before destroying. + if (!handles_setup_) { return; } + + cudnnDestroyTensorDescriptor(bottom_desc_); + cudnnDestroyTensorDescriptor(top_desc_); + cudnnDestroy(handle_); +} + +INSTANTIATE_CLASS(CuDNNSoftmaxLayer); + +} // namespace caffe +#endif diff --git a/src/caffe/layers/cudnn_softmax_layer.cu b/src/caffe/layers/cudnn_softmax_layer.cu new file mode 100755 index 0000000..a9e2fce --- /dev/null +++ b/src/caffe/layers/cudnn_softmax_layer.cu @@ -0,0 +1,48 @@ +#ifdef USE_CUDNN +#include +#include +#include + +#include "thrust/device_vector.h" + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void CuDNNSoftmaxLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + CUDNN_CHECK(cudnnSoftmaxForward(handle_, CUDNN_SOFTMAX_ACCURATE, + CUDNN_SOFTMAX_MODE_CHANNEL, + cudnn::dataType::one, + bottom_desc_, bottom_data, + cudnn::dataType::zero, + top_desc_, top_data)); +} + +template +void CuDNNSoftmaxLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* top_data = top[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + + CUDNN_CHECK(cudnnSoftmaxBackward(handle_, CUDNN_SOFTMAX_ACCURATE, + CUDNN_SOFTMAX_MODE_CHANNEL, + cudnn::dataType::one, + top_desc_, top_data, top_desc_, top_diff, + cudnn::dataType::zero, + bottom_desc_, bottom_diff)); + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(CuDNNSoftmaxLayer); + +} // namespace caffe +#endif diff --git a/src/caffe/layers/cudnn_tanh_layer.cpp b/src/caffe/layers/cudnn_tanh_layer.cpp new file mode 100755 index 0000000..376faad --- /dev/null +++ b/src/caffe/layers/cudnn_tanh_layer.cpp @@ -0,0 +1,46 @@ +#ifdef USE_CUDNN +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void CuDNNTanHLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + TanHLayer::LayerSetUp(bottom, top); + // initialize cuDNN + CUDNN_CHECK(cudnnCreate(&handle_)); + cudnn::createTensor4dDesc(&bottom_desc_); + cudnn::createTensor4dDesc(&top_desc_); + handles_setup_ = true; +} + +template +void CuDNNTanHLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + TanHLayer::Reshape(bottom, top); + const int N = bottom[0]->num(); + const int K = bottom[0]->channels(); + const int H = bottom[0]->height(); + const int W = bottom[0]->width(); + cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); + cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); +} + +template +CuDNNTanHLayer::~CuDNNTanHLayer() { + // Check that handles have been setup before destroying. + if (!handles_setup_) { return; } + + cudnnDestroyTensorDescriptor(this->bottom_desc_); + cudnnDestroyTensorDescriptor(this->top_desc_); + cudnnDestroy(this->handle_); +} + +INSTANTIATE_CLASS(CuDNNTanHLayer); + +} // namespace caffe +#endif diff --git a/src/caffe/layers/cudnn_tanh_layer.cu b/src/caffe/layers/cudnn_tanh_layer.cu new file mode 100755 index 0000000..d287f6f --- /dev/null +++ b/src/caffe/layers/cudnn_tanh_layer.cu @@ -0,0 +1,48 @@ +#ifdef USE_CUDNN +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void CuDNNTanHLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + CUDNN_CHECK(cudnnActivationForward(this->handle_, + CUDNN_ACTIVATION_TANH, + cudnn::dataType::one, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->top_desc_, top_data)); +} + +template +void CuDNNTanHLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (!propagate_down[0]) { + return; + } + + const Dtype* top_data = top[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + + CUDNN_CHECK(cudnnActivationBackward(this->handle_, + CUDNN_ACTIVATION_TANH, + cudnn::dataType::one, + this->top_desc_, top_data, this->top_desc_, top_diff, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->bottom_desc_, bottom_diff)); +} + +INSTANTIATE_LAYER_GPU_FUNCS(CuDNNTanHLayer); + +} // namespace caffe +#endif diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp new file mode 100755 index 0000000..0932d9f --- /dev/null +++ b/src/caffe/layers/data_layer.cpp @@ -0,0 +1,113 @@ +#include + +#include + +#include +#include + +#include "caffe/common.hpp" +#include "caffe/data_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/benchmark.hpp" +#include "caffe/util/io.hpp" + +namespace caffe { + +template +DataLayer::DataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param), + reader_(param) { +} + +template +DataLayer::~DataLayer() { + this->StopInternalThread(); +} + +template +void DataLayer::DataLayerSetUp(const vector*>& bottom, + const vector*>& top) { + const int batch_size = this->layer_param_.data_param().batch_size(); + // Read a data point, and use it to initialize the top blob. + Datum& datum = *(reader_.full().peek()); + + // Use data_transformer to infer the expected blob shape from datum. + vector top_shape = this->data_transformer_->InferBlobShape(datum); + this->transformed_data_.Reshape(top_shape); + // Reshape top[0] and prefetch_data according to the batch_size. + top_shape[0] = batch_size; + top[0]->Reshape(top_shape); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].data_.Reshape(top_shape); + } + LOG(INFO) << "output data size: " << top[0]->num() << "," + << top[0]->channels() << "," << top[0]->height() << "," + << top[0]->width(); + // label + if (this->output_labels_) { + vector label_shape(1, batch_size); + top[1]->Reshape(label_shape); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].label_.Reshape(label_shape); + } + } +} + +// This function is called on prefetch thread +template +void DataLayer::load_batch(Batch* batch) { + CPUTimer batch_timer; + batch_timer.Start(); + double read_time = 0; + double trans_time = 0; + CPUTimer timer; + CHECK(batch->data_.count()); + CHECK(this->transformed_data_.count()); + + // Reshape according to the first datum of each batch + // on single input batches allows for inputs of varying dimension. + const int batch_size = this->layer_param_.data_param().batch_size(); + Datum& datum = *(reader_.full().peek()); + // Use data_transformer to infer the expected blob shape from datum. + vector top_shape = this->data_transformer_->InferBlobShape(datum); + this->transformed_data_.Reshape(top_shape); + // Reshape batch according to the batch_size. + top_shape[0] = batch_size; + batch->data_.Reshape(top_shape); + + Dtype* top_data = batch->data_.mutable_cpu_data(); + Dtype* top_label = NULL; // suppress warnings about uninitialized variables + + if (this->output_labels_) { + top_label = batch->label_.mutable_cpu_data(); + } + for (int item_id = 0; item_id < batch_size; ++item_id) { + timer.Start(); + // get a datum + Datum& datum = *(reader_.full().pop("Waiting for data")); + read_time += timer.MicroSeconds(); + timer.Start(); + // Apply data transformations (mirror, scale, crop...) + int offset = batch->data_.offset(item_id); + this->transformed_data_.set_cpu_data(top_data + offset); + this->data_transformer_->Transform(datum, &(this->transformed_data_)); + // Copy label. + if (this->output_labels_) { + top_label[item_id] = datum.label(); + } + trans_time += timer.MicroSeconds(); + + reader_.free().push(const_cast(&datum)); + } + timer.Stop(); + batch_timer.Stop(); + DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; + DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; + DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; +} + +INSTANTIATE_CLASS(DataLayer); +REGISTER_LAYER_CLASS(Data); + +} // namespace caffe diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp new file mode 100755 index 0000000..a461296 --- /dev/null +++ b/src/caffe/layers/deconv_layer.cpp @@ -0,0 +1,79 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void DeconvolutionLayer::compute_output_shape() { + this->height_out_ = this->stride_h_ * (this->height_ - 1) + this->kernel_h_ + - 2 * this->pad_h_; + this->width_out_ = this->stride_w_ * (this->width_ - 1) + this->kernel_w_ + - 2 * this->pad_w_; +} + +template +void DeconvolutionLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* weight = this->blobs_[0]->cpu_data(); + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->cpu_data(); + Dtype* top_data = top[i]->mutable_cpu_data(); + for (int n = 0; n < this->num_; ++n) { + this->backward_cpu_gemm(bottom_data + bottom[i]->offset(n), weight, + top_data + top[i]->offset(n)); + if (this->bias_term_) { + const Dtype* bias = this->blobs_[1]->cpu_data(); + this->forward_cpu_bias(top_data + top[i]->offset(n), bias); + } + } + } +} + +template +void DeconvolutionLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* weight = this->blobs_[0]->cpu_data(); + Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->cpu_diff(); + const Dtype* bottom_data = bottom[i]->cpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_cpu_diff(); + // Bias gradient, if necessary. + if (this->bias_term_ && this->param_propagate_down_[1]) { + Dtype* bias_diff = this->blobs_[1]->mutable_cpu_diff(); + for (int n = 0; n < this->num_; ++n) { + this->backward_cpu_bias(bias_diff, top_diff + top[i]->offset(n)); + } + } + if (this->param_propagate_down_[0] || propagate_down[i]) { + for (int n = 0; n < this->num_; ++n) { + // Gradient w.r.t. weight. Note that we will accumulate diffs. + if (this->param_propagate_down_[0]) { + this->weight_cpu_gemm(top_diff + top[i]->offset(n), + bottom_data + bottom[i]->offset(n), weight_diff); + } + // Gradient w.r.t. bottom data, if necessary, reusing the column buffer + // we might have just computed above. + if (propagate_down[i]) { + this->forward_cpu_gemm(top_diff + top[i]->offset(n), weight, + bottom_diff + bottom[i]->offset(n), + this->param_propagate_down_[0]); + } + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(DeconvolutionLayer); +#endif + +INSTANTIATE_CLASS(DeconvolutionLayer); +REGISTER_LAYER_CLASS(Deconvolution); + +} // namespace caffe diff --git a/src/caffe/layers/deconv_layer.cu b/src/caffe/layers/deconv_layer.cu new file mode 100755 index 0000000..39bc4de --- /dev/null +++ b/src/caffe/layers/deconv_layer.cu @@ -0,0 +1,64 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void DeconvolutionLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* weight = this->blobs_[0]->gpu_data(); + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* top_data = top[i]->mutable_gpu_data(); + for (int n = 0; n < this->num_; ++n) { + this->backward_gpu_gemm(bottom_data + bottom[i]->offset(n), weight, + top_data + top[i]->offset(n)); + if (this->bias_term_) { + const Dtype* bias = this->blobs_[1]->gpu_data(); + this->forward_gpu_bias(top_data + top[i]->offset(n), bias); + } + } + } +} + +template +void DeconvolutionLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* weight = this->blobs_[0]->gpu_data(); + Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + // Bias gradient, if necessary. + if (this->bias_term_ && this->param_propagate_down_[1]) { + Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); + for (int n = 0; n < this->num_; ++n) { + this->backward_gpu_bias(bias_diff, top_diff + top[i]->offset(n)); + } + } + if (this->param_propagate_down_[0] || propagate_down[i]) { + for (int n = 0; n < this->num_; ++n) { + // gradient w.r.t. weight. Note that we will accumulate diffs. + if (this->param_propagate_down_[0]) { + this->weight_gpu_gemm(top_diff + top[i]->offset(n), + bottom_data + bottom[i]->offset(n), weight_diff); + } + // gradient w.r.t. bottom data, if necessary. + if (propagate_down[i]) { + this->forward_gpu_gemm(top_diff + top[i]->offset(n), weight, + bottom_diff + bottom[i]->offset(n)); + } + } + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(DeconvolutionLayer); + +} // namespace caffe diff --git a/src/caffe/layers/dropout_layer.cpp b/src/caffe/layers/dropout_layer.cpp new file mode 100755 index 0000000..ec1256f --- /dev/null +++ b/src/caffe/layers/dropout_layer.cpp @@ -0,0 +1,78 @@ +// TODO (sergeyk): effect should not be dependent on phase. wasted memcpy. + +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void DropoutLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + NeuronLayer::LayerSetUp(bottom, top); + threshold_ = this->layer_param_.dropout_param().dropout_ratio(); + DCHECK(threshold_ > 0.); + DCHECK(threshold_ < 1.); + scale_ = 1. / (1. - threshold_); + uint_thres_ = static_cast(UINT_MAX * threshold_); +} + +template +void DropoutLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + NeuronLayer::Reshape(bottom, top); + // Set up the cache for random number generation + rand_vec_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); +} + +template +void DropoutLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + unsigned int* mask = rand_vec_.mutable_cpu_data(); + const int count = bottom[0]->count(); + if (this->phase_ == TRAIN) { + // Create random numbers + caffe_rng_bernoulli(count, 1. - threshold_, mask); + for (int i = 0; i < count; ++i) { + top_data[i] = bottom_data[i] * mask[i] * scale_; + } + } else { + caffe_copy(bottom[0]->count(), bottom_data, top_data); + } +} + +template +void DropoutLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + if (this->phase_ == TRAIN) { + const unsigned int* mask = rand_vec_.cpu_data(); + const int count = bottom[0]->count(); + for (int i = 0; i < count; ++i) { + bottom_diff[i] = top_diff[i] * mask[i] * scale_; + } + } else { + caffe_copy(top[0]->count(), top_diff, bottom_diff); + } + } +} + + +#ifdef CPU_ONLY +STUB_GPU(DropoutLayer); +#endif + +INSTANTIATE_CLASS(DropoutLayer); +REGISTER_LAYER_CLASS(Dropout); + +} // namespace caffe diff --git a/src/caffe/layers/dropout_layer.cu b/src/caffe/layers/dropout_layer.cu new file mode 100755 index 0000000..f9ea04f --- /dev/null +++ b/src/caffe/layers/dropout_layer.cu @@ -0,0 +1,77 @@ +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + + +template +__global__ void DropoutForward(const int n, const Dtype* in, + const unsigned int* mask, const unsigned int threshold, const float scale, + Dtype* out) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = in[index] * (mask[index] > threshold) * scale; + } +} + +template +void DropoutLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + if (this->phase_ == TRAIN) { + unsigned int* mask = + static_cast(rand_vec_.mutable_gpu_data()); + caffe_gpu_rng_uniform(count, mask); + // set thresholds + // NOLINT_NEXT_LINE(whitespace/operators) + DropoutForward<<>>( + count, bottom_data, mask, uint_thres_, scale_, top_data); + CUDA_POST_KERNEL_CHECK; + } else { + caffe_copy(count, bottom_data, top_data); + } +} + +template +__global__ void DropoutBackward(const int n, const Dtype* in_diff, + const unsigned int* mask, const unsigned int threshold, const float scale, + Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) { + out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); + } +} + +template +void DropoutLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + if (this->phase_ == TRAIN) { + const unsigned int* mask = + static_cast(rand_vec_.gpu_data()); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + DropoutBackward<<>>( + count, top_diff, mask, uint_thres_, scale_, bottom_diff); + CUDA_POST_KERNEL_CHECK; + } else { + caffe_copy(top[0]->count(), top_diff, bottom_diff); + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer); + + +} // namespace caffe diff --git a/src/caffe/layers/dummy_data_layer.cpp b/src/caffe/layers/dummy_data_layer.cpp new file mode 100755 index 0000000..6b0d617 --- /dev/null +++ b/src/caffe/layers/dummy_data_layer.cpp @@ -0,0 +1,115 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void DummyDataLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + const int num_top = top.size(); + const DummyDataParameter& param = this->layer_param_.dummy_data_param(); + const int num_data_filler = param.data_filler_size(); + CHECK(num_data_filler == 0 || num_data_filler == 1 || + num_data_filler == num_top) + << "Number of data fillers must be 0, 1 or equal to the number of tops: " + << num_top << "; you specified " << num_data_filler << " data fillers."; + + const bool legacy_dims = param.num_size() || param.channels_size() || + param.height_size() || param.width_size(); + if (legacy_dims) { + CHECK_EQ(0, param.shape_size()) + << "Both shape and legacy fields were specified"; + // Using deprecated 4D output dim specifiers. + CHECK(param.num_size() == 1 || param.num_size() == num_top) + << "Must specify 'num' once, or once per top blob " + << "(" << num_top << "); specified " << param.num_size() << "."; + CHECK(param.channels_size() == 1 || param.channels_size() == num_top) + << "Must specify 'channels' once, or once per top blob " + << "(" << num_top << "); specified " << param.channels_size() << "."; + CHECK(param.height_size() == 1 || param.height_size() == num_top) + << "Must specify 'height' once, or once per top blob " + << "(" << num_top << "); specified " << param.height_size() << "."; + CHECK(param.width_size() == 1 || param.width_size() == num_top) + << "Must specify 'width' once, or once per top blob " + << "(" << num_top << "); specified " << param.width_size() << "."; + } else { + CHECK(param.shape_size() == 1 || param.shape_size() == num_top) + << "Must specify 'shape' once, or once per top blob " + << "(" << num_top << "); specified " << param.shape_size() << "."; + } + // refill_[i] tells Forward i whether or not to actually refill top Blob i. + // If refill_[i] is false, Forward does nothing for Blob i. We use this to + // avoid wastefully refilling "constant" Blobs in every forward pass. + // We first fill refill_ in with the INVERSE of its final values. + // The first time we run Forward from the LayerSetUp method, we'll fill only + // Blobs for which refill_ is normally false. These Blobs will never be + // filled again. + refill_.clear(); + fillers_.clear(); + if (num_data_filler <= 1) { + FillerParameter filler_param; + if (num_data_filler == 0) { + filler_param.set_type("constant"); + filler_param.set_value(0); + } else { + filler_param.CopyFrom(param.data_filler(0)); + } + // Refill on each iteration iff not using a constant filler, + // but use the inverse of this rule for the first run. + refill_.resize(1); + refill_[0] = (strcmp(filler_param.type().c_str(), "constant") == 0); + fillers_.resize(1); + fillers_[0].reset(GetFiller(filler_param)); + } else { + refill_.resize(num_top); + fillers_.resize(num_top); + for (int i = 0; i < num_top; ++i) { + fillers_[i].reset(GetFiller(param.data_filler(i))); + // Refill on each iteration iff not using a constant filler, + // but use the inverse of this rule for the first run. + refill_[i] = + (strcmp(param.data_filler(i).type().c_str(), "constant") == 0); + } + } + for (int i = 0; i < num_top; ++i) { + if (legacy_dims) { + const int num = (param.num_size() == 1) ? param.num(0) : param.num(i); + const int channels = + (param.channels_size() == 1) ? param.channels(0) : param.channels(i); + const int height = + (param.height_size() == 1) ? param.height(0) : param.height(i); + const int width = + (param.width_size() == 1) ? param.width(0) : param.width(i); + top[i]->Reshape(num, channels, height, width); + } else { + const int shape_index = (param.shape_size() == 1) ? 0 : i; + top[i]->Reshape(param.shape(shape_index)); + } + } + // Run Forward once, with refill_ inverted, to fill the constant Blobs. + this->Forward(bottom, top); + // Invert the inverted refill_ values to refill the desired (non-constant) + // Blobs in every usual forward pass. + for (int i = 0; i < refill_.size(); ++i) { + refill_[i] = !refill_[i]; + } +} + +template +void DummyDataLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + for (int i = 0; i < top.size(); ++i) { + const int filler_id = (fillers_.size() > 1) ? i : 0; + if (refill_[filler_id]) { + fillers_[filler_id]->Fill(top[i]); + } + } +} + +INSTANTIATE_CLASS(DummyDataLayer); +REGISTER_LAYER_CLASS(DummyData); + +} // namespace caffe diff --git a/src/caffe/layers/eltwise_layer.cpp b/src/caffe/layers/eltwise_layer.cpp new file mode 100755 index 0000000..a807007 --- /dev/null +++ b/src/caffe/layers/eltwise_layer.cpp @@ -0,0 +1,161 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void EltwiseLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + CHECK(this->layer_param().eltwise_param().coeff_size() == 0 + || this->layer_param().eltwise_param().coeff_size() == bottom.size()) << + "Eltwise Layer takes one coefficient per bottom blob."; + CHECK(!(this->layer_param().eltwise_param().operation() + == EltwiseParameter_EltwiseOp_PROD + && this->layer_param().eltwise_param().coeff_size())) << + "Eltwise layer only takes coefficients for summation."; + op_ = this->layer_param_.eltwise_param().operation(); + // Blob-wise coefficients for the elementwise operation. + coeffs_ = vector(bottom.size(), 1); + if (this->layer_param().eltwise_param().coeff_size()) { + for (int i = 0; i < bottom.size(); ++i) { + coeffs_[i] = this->layer_param().eltwise_param().coeff(i); + } + } + stable_prod_grad_ = this->layer_param_.eltwise_param().stable_prod_grad(); +} + +template +void EltwiseLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + for (int i = 1; i < bottom.size(); ++i) { + CHECK(bottom[i]->shape() == bottom[0]->shape()); + } + top[0]->ReshapeLike(*bottom[0]); + // If max operation, we will initialize the vector index part. + if (this->layer_param_.eltwise_param().operation() == + EltwiseParameter_EltwiseOp_MAX && top.size() == 1) { + max_idx_.Reshape(bottom[0]->shape()); + } +} + +template +void EltwiseLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + int* mask = NULL; + const Dtype* bottom_data_a = NULL; + const Dtype* bottom_data_b = NULL; + const int count = top[0]->count(); + Dtype* top_data = top[0]->mutable_cpu_data(); + switch (op_) { + case EltwiseParameter_EltwiseOp_PROD: + caffe_mul(count, bottom[0]->cpu_data(), bottom[1]->cpu_data(), top_data); + for (int i = 2; i < bottom.size(); ++i) { + caffe_mul(count, top_data, bottom[i]->cpu_data(), top_data); + } + break; + case EltwiseParameter_EltwiseOp_SUM: + caffe_set(count, Dtype(0), top_data); + // TODO(shelhamer) does BLAS optimize to sum for coeff = 1? + for (int i = 0; i < bottom.size(); ++i) { + caffe_axpy(count, coeffs_[i], bottom[i]->cpu_data(), top_data); + } + break; + case EltwiseParameter_EltwiseOp_MAX: + // Initialize + mask = max_idx_.mutable_cpu_data(); + caffe_set(count, -1, mask); + caffe_set(count, Dtype(-FLT_MAX), top_data); + // bottom 0 & 1 + bottom_data_a = bottom[0]->cpu_data(); + bottom_data_b = bottom[1]->cpu_data(); + for (int idx = 0; idx < count; ++idx) { + if (bottom_data_a[idx] > bottom_data_b[idx]) { + top_data[idx] = bottom_data_a[idx]; // maxval + mask[idx] = 0; // maxid + } else { + top_data[idx] = bottom_data_b[idx]; // maxval + mask[idx] = 1; // maxid + } + } + // bottom 2++ + for (int blob_idx = 2; blob_idx < bottom.size(); ++blob_idx) { + bottom_data_b = bottom[blob_idx]->cpu_data(); + for (int idx = 0; idx < count; ++idx) { + if (bottom_data_b[idx] > top_data[idx]) { + top_data[idx] = bottom_data_b[idx]; // maxval + mask[idx] = blob_idx; // maxid + } + } + } + break; + default: + LOG(FATAL) << "Unknown elementwise operation."; + } +} + +template +void EltwiseLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const int* mask = NULL; + const int count = top[0]->count(); + const Dtype* top_data = top[0]->cpu_data(); + const Dtype* top_diff = top[0]->cpu_diff(); + for (int i = 0; i < bottom.size(); ++i) { + if (propagate_down[i]) { + const Dtype* bottom_data = bottom[i]->cpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_cpu_diff(); + switch (op_) { + case EltwiseParameter_EltwiseOp_PROD: + if (stable_prod_grad_) { + bool initialized = false; + for (int j = 0; j < bottom.size(); ++j) { + if (i == j) { continue; } + if (!initialized) { + caffe_copy(count, bottom[j]->cpu_data(), bottom_diff); + initialized = true; + } else { + caffe_mul(count, bottom[j]->cpu_data(), bottom_diff, + bottom_diff); + } + } + } else { + caffe_div(count, top_data, bottom_data, bottom_diff); + } + caffe_mul(count, bottom_diff, top_diff, bottom_diff); + break; + case EltwiseParameter_EltwiseOp_SUM: + if (coeffs_[i] == Dtype(1)) { + caffe_copy(count, top_diff, bottom_diff); + } else { + caffe_cpu_scale(count, coeffs_[i], top_diff, bottom_diff); + } + break; + case EltwiseParameter_EltwiseOp_MAX: + mask = max_idx_.cpu_data(); + for (int index = 0; index < count; ++index) { + Dtype gradient = 0; + if (mask[index] == i) { + gradient += top_diff[index]; + } + bottom_diff[index] = gradient; + } + break; + default: + LOG(FATAL) << "Unknown elementwise operation."; + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(EltwiseLayer); +#endif + +INSTANTIATE_CLASS(EltwiseLayer); +REGISTER_LAYER_CLASS(Eltwise); + +} // namespace caffe diff --git a/src/caffe/layers/eltwise_layer.cu b/src/caffe/layers/eltwise_layer.cu new file mode 100755 index 0000000..2247870 --- /dev/null +++ b/src/caffe/layers/eltwise_layer.cu @@ -0,0 +1,135 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, + const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data, + int* mask) { + CUDA_KERNEL_LOOP(index, nthreads) { + Dtype maxval = -FLT_MAX; + int maxidx = -1; + if (bottom_data_a[index] > bottom_data_b[index]) { + // only update for very first bottom_data blob (blob_idx == 0) + if (blob_idx == 0) { + maxval = bottom_data_a[index]; + top_data[index] = maxval; + maxidx = blob_idx; + mask[index] = maxidx; + } + } else { + maxval = bottom_data_b[index]; + top_data[index] = maxval; + maxidx = blob_idx + 1; + mask[index] = maxidx; + } + } +} + +template +void EltwiseLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + int* mask = NULL; + const int count = top[0]->count(); + Dtype* top_data = top[0]->mutable_gpu_data(); + switch (op_) { + case EltwiseParameter_EltwiseOp_PROD: + caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), + top_data); + for (int i = 2; i < bottom.size(); ++i) { + caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); + } + break; + case EltwiseParameter_EltwiseOp_SUM: + caffe_gpu_set(count, Dtype(0.), top_data); + // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? + for (int i = 0; i < bottom.size(); ++i) { + caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); + } + break; + case EltwiseParameter_EltwiseOp_MAX: + mask = max_idx_.mutable_gpu_data(); + // NOLINT_NEXT_LINE(whitespace/operators) + MaxForward <<>>( + count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); + for (int i = 2; i < bottom.size(); ++i) { + // NOLINT_NEXT_LINE(whitespace/operators) + MaxForward<<>>( + count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); + } + break; + default: + LOG(FATAL) << "Unknown elementwise operation."; + } +} + +template +__global__ void MaxBackward(const int nthreads, const Dtype* top_diff, + const int blob_idx, const int* mask, Dtype* bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + Dtype gradient = 0; + if (mask[index] == blob_idx) { + gradient += top_diff[index]; + } + bottom_diff[index] = gradient; + } +} + +template +void EltwiseLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const int* mask = NULL; + const int count = top[0]->count(); + const Dtype* top_data = top[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + for (int i = 0; i < bottom.size(); ++i) { + if (propagate_down[i]) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + switch (op_) { + case EltwiseParameter_EltwiseOp_PROD: + if (stable_prod_grad_) { + bool initialized = false; + for (int j = 0; j < bottom.size(); ++j) { + if (i == j) { continue; } + if (!initialized) { + caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); + initialized = true; + } else { + caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, + bottom_diff); + } + } + } else { + caffe_gpu_div(count, top_data, bottom_data, bottom_diff); + } + caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); + break; + case EltwiseParameter_EltwiseOp_SUM: + if (coeffs_[i] == Dtype(1.)) { + caffe_copy(count, top_diff, bottom_diff); + } else { + caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); + } + break; + case EltwiseParameter_EltwiseOp_MAX: + mask = max_idx_.gpu_data(); + MaxBackward // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + count, top_diff, i, mask, bottom_diff); + break; + default: + LOG(FATAL) << "Unknown elementwise operation."; + } + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer); + +} // namespace caffe diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp new file mode 100755 index 0000000..80efa31 --- /dev/null +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -0,0 +1,57 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void EuclideanLossLayer::Reshape( + const vector*>& bottom, const vector*>& top) { + LossLayer::Reshape(bottom, top); + CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1)) + << "Inputs must have the same dimension."; + diff_.ReshapeLike(*bottom[0]); +} + +template +void EuclideanLossLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + int count = bottom[0]->count(); + caffe_sub( + count, + bottom[0]->cpu_data(), + bottom[1]->cpu_data(), + diff_.mutable_cpu_data()); + Dtype dot = caffe_cpu_dot(count, diff_.cpu_data(), diff_.cpu_data()); + Dtype loss = dot / bottom[0]->num() / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; +} + +template +void EuclideanLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + for (int i = 0; i < 2; ++i) { + if (propagate_down[i]) { + const Dtype sign = (i == 0) ? 1 : -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); + caffe_cpu_axpby( + bottom[i]->count(), // count + alpha, // alpha + diff_.cpu_data(), // a + Dtype(0), // beta + bottom[i]->mutable_cpu_diff()); // b + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(EuclideanLossLayer); +#endif + +INSTANTIATE_CLASS(EuclideanLossLayer); +REGISTER_LAYER_CLASS(EuclideanLoss); + +} // namespace caffe diff --git a/src/caffe/layers/euclidean_loss_layer.cu b/src/caffe/layers/euclidean_loss_layer.cu new file mode 100755 index 0000000..5b1de3a --- /dev/null +++ b/src/caffe/layers/euclidean_loss_layer.cu @@ -0,0 +1,44 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + int count = bottom[0]->count(); + caffe_gpu_sub( + count, + bottom[0]->gpu_data(), + bottom[1]->gpu_data(), + diff_.mutable_gpu_data()); + Dtype dot; + caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); + Dtype loss = dot / bottom[0]->num() / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; +} + +template +void EuclideanLossLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + for (int i = 0; i < 2; ++i) { + if (propagate_down[i]) { + const Dtype sign = (i == 0) ? 1 : -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); + caffe_gpu_axpby( + bottom[i]->count(), // count + alpha, // alpha + diff_.gpu_data(), // a + Dtype(0), // beta + bottom[i]->mutable_gpu_diff()); // b + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(EuclideanLossLayer); + +} // namespace caffe diff --git a/src/caffe/layers/exp_layer.cpp b/src/caffe/layers/exp_layer.cpp new file mode 100755 index 0000000..c7e7c60 --- /dev/null +++ b/src/caffe/layers/exp_layer.cpp @@ -0,0 +1,69 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void ExpLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + NeuronLayer::LayerSetUp(bottom, top); + const Dtype base = this->layer_param_.exp_param().base(); + if (base != Dtype(-1)) { + CHECK_GT(base, 0) << "base must be strictly positive."; + } + // If base == -1, interpret the base as e and set log_base = 1 exactly. + // Otherwise, calculate its log explicitly. + const Dtype log_base = (base == Dtype(-1)) ? Dtype(1) : log(base); + CHECK(!isnan(log_base)) + << "NaN result: log(base) = log(" << base << ") = " << log_base; + CHECK(!isinf(log_base)) + << "Inf result: log(base) = log(" << base << ") = " << log_base; + const Dtype input_scale = this->layer_param_.exp_param().scale(); + const Dtype input_shift = this->layer_param_.exp_param().shift(); + inner_scale_ = log_base * input_scale; + outer_scale_ = (input_shift == Dtype(0)) ? Dtype(1) : pow(base, input_shift); +} + +template +void ExpLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const int count = bottom[0]->count(); + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + if (inner_scale_ == Dtype(1)) { + caffe_exp(count, bottom_data, top_data); + } else { + caffe_cpu_scale(count, inner_scale_, bottom_data, top_data); + caffe_exp(count, top_data, top_data); + } + if (outer_scale_ != Dtype(1)) { + caffe_scal(count, outer_scale_, top_data); + } +} + +template +void ExpLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (!propagate_down[0]) { return; } + const int count = bottom[0]->count(); + const Dtype* top_data = top[0]->cpu_data(); + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + caffe_mul(count, top_data, top_diff, bottom_diff); + if (inner_scale_ != Dtype(1)) { + caffe_scal(count, inner_scale_, bottom_diff); + } +} + +#ifdef CPU_ONLY +STUB_GPU(ExpLayer); +#endif + +INSTANTIATE_CLASS(ExpLayer); +REGISTER_LAYER_CLASS(Exp); + +} // namespace caffe diff --git a/src/caffe/layers/exp_layer.cu b/src/caffe/layers/exp_layer.cu new file mode 100755 index 0000000..2d75d8d --- /dev/null +++ b/src/caffe/layers/exp_layer.cu @@ -0,0 +1,44 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void ExpLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const int count = bottom[0]->count(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + if (inner_scale_ == Dtype(1)) { + caffe_gpu_exp(count, bottom_data, top_data); + } else { + caffe_gpu_scale(count, inner_scale_, bottom_data, top_data); + caffe_gpu_exp(count, top_data, top_data); + } + if (outer_scale_ != Dtype(1)) { + caffe_gpu_scal(count, outer_scale_, top_data); + } +} + +template +void ExpLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (!propagate_down[0]) { return; } + const int count = bottom[0]->count(); + const Dtype* top_data = top[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + caffe_gpu_mul(count, top_data, top_diff, bottom_diff); + if (inner_scale_ != Dtype(1)) { + caffe_gpu_scal(count, inner_scale_, bottom_diff); + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(ExpLayer); + + +} // namespace caffe diff --git a/src/caffe/layers/filter_layer.cpp b/src/caffe/layers/filter_layer.cpp new file mode 100755 index 0000000..be1db32 --- /dev/null +++ b/src/caffe/layers/filter_layer.cpp @@ -0,0 +1,127 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void FilterLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + CHECK_EQ(top.size(), bottom.size() - 1); + first_reshape_ = true; +} + +template +void FilterLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + // bottom[0...k-1] are the blobs to filter + // bottom[last] is the "selector_blob" + int selector_index = bottom.size() - 1; + for (int i = 1; i < bottom[selector_index]->num_axes(); ++i) { + CHECK_EQ(bottom[selector_index]->shape(i), 1) + << "Selector blob dimensions must be singletons (1), except the first"; + } + for (int i = 0; i < bottom.size() - 1; ++i) { + CHECK_EQ(bottom[selector_index]->shape(0), bottom[i]->shape(0)) << + "Each bottom should have the same 0th dimension as the selector blob"; + } + + const Dtype* bottom_data_selector = bottom[selector_index]->cpu_data(); + indices_to_forward_.clear(); + + // look for non-zero elements in bottom[0]. Items of each bottom that + // have the same index as the items in bottom[0] with value == non-zero + // will be forwarded + for (int item_id = 0; item_id < bottom[selector_index]->shape(0); ++item_id) { + // we don't need an offset because item size == 1 + const Dtype* tmp_data_selector = bottom_data_selector + item_id; + if (*tmp_data_selector) { + indices_to_forward_.push_back(item_id); + } + } + // only filtered items will be forwarded + int new_tops_num = indices_to_forward_.size(); + // init + if (first_reshape_) { + new_tops_num = bottom[0]->shape(0); + first_reshape_ = false; + } + for (int t = 0; t < top.size(); ++t) { + int num_axes = bottom[t]->num_axes(); + vector shape_top(num_axes); + shape_top[0] = new_tops_num; + for (int ts = 1; ts < num_axes; ++ts) + shape_top[ts] = bottom[t]->shape(ts); + top[t]->Reshape(shape_top); + } +} + +template +void FilterLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + int new_tops_num = indices_to_forward_.size(); + // forward all filtered items for all bottoms but the Selector (bottom[last]) + for (int t = 0; t < top.size(); ++t) { + const Dtype* bottom_data = bottom[t]->cpu_data(); + Dtype* top_data = top[t]->mutable_cpu_data(); + int dim = bottom[t]->count() / bottom[t]->shape(0); + for (int n = 0; n < new_tops_num; ++n) { + int data_offset_top = n * dim; + int data_offset_bottom = indices_to_forward_[n] * bottom[t]->count(1); + caffe_copy(dim, bottom_data + data_offset_bottom, + top_data + data_offset_top); + } + } +} + +template +void FilterLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (propagate_down[bottom.size() - 1]) { + LOG(FATAL) << this->type() + << "Layer cannot backpropagate to filter index inputs"; + } + for (int i = 0; i < top.size(); i++) { + // bottom[last] is the selector and never needs backpropagation + // so we can iterate over top vector because top.size() == bottom.size() -1 + if (propagate_down[i]) { + const int dim = top[i]->count() / top[i]->shape(0); + int next_to_backward_offset = 0; + int batch_offset = 0; + int data_offset_bottom = 0; + int data_offset_top = 0; + for (int n = 0; n < bottom[i]->shape(0); n++) { + data_offset_bottom = n * dim; + if (next_to_backward_offset >= indices_to_forward_.size()) { + // we already visited all items that were been forwarded, so + // just set to zero remaining ones + caffe_set(dim, Dtype(0), + bottom[i]->mutable_cpu_diff() + data_offset_bottom); + } else { + batch_offset = indices_to_forward_[next_to_backward_offset]; + if (n != batch_offset) { // this data was not been forwarded + caffe_set(dim, Dtype(0), + bottom[i]->mutable_cpu_diff() + data_offset_bottom); + } else { // this data was been forwarded + data_offset_top = next_to_backward_offset * dim; + next_to_backward_offset++; // point to next forwarded item index + caffe_copy(dim, top[i]->mutable_cpu_diff() + data_offset_top, + bottom[i]->mutable_cpu_diff() + data_offset_bottom); + } + } + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(FilterLayer); +#endif + +INSTANTIATE_CLASS(FilterLayer); +REGISTER_LAYER_CLASS(Filter); + +} // namespace caffe diff --git a/src/caffe/layers/filter_layer.cu b/src/caffe/layers/filter_layer.cu new file mode 100755 index 0000000..cf929ee --- /dev/null +++ b/src/caffe/layers/filter_layer.cu @@ -0,0 +1,70 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void FilterLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + int new_tops_num = indices_to_forward_.size(); + // forward all filtered items for all bottoms but the Selector (bottom[last]) + for (int t = 0; t < top.size(); ++t) { + const Dtype* bottom_data = bottom[t]->gpu_data(); + Dtype* top_data = top[t]->mutable_gpu_data(); + int dim = bottom[t]->count() / bottom[t]->shape(0); + for (int n = 0; n < new_tops_num; ++n) { + int data_offset_top = n * dim; + int data_offset_bottom = indices_to_forward_[n] * dim; + caffe_copy(dim, bottom_data + data_offset_bottom, + top_data + data_offset_top); + } + } +} + +template +void FilterLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (propagate_down[bottom.size() - 1]) { + LOG(FATAL) << this->type() + << "Layer cannot backpropagate to filter index inputs"; + } + for (int i = 0; i < top.size(); ++i) { + // bottom[last] is the selector and never needs backpropagation + // so we can iterate over top vector because top.size() == bottom.size() -1 + if (propagate_down[i]) { + const int dim = top[i]->count() / top[i]->shape(0); + int next_to_backward_offset = 0; + int batch_offset = 0; + int data_offset_bottom = 0; + int data_offset_top = 0; + for (int n = 0; n < bottom[i]->shape(0); ++n) { + if (next_to_backward_offset >= indices_to_forward_.size()) { + // we already visited all items that were been forwarded, so + // just set to zero remaining ones + data_offset_bottom = n * dim; + caffe_gpu_set(dim, Dtype(0), + bottom[i]->mutable_gpu_diff() + data_offset_bottom); + } else { + batch_offset = indices_to_forward_[next_to_backward_offset]; + data_offset_bottom = n * dim; + if (n != batch_offset) { // this data was not been forwarded + caffe_gpu_set(dim, Dtype(0), + bottom[i]->mutable_gpu_diff() + data_offset_bottom); + } else { // this data was been forwarded + data_offset_top = next_to_backward_offset * dim; + ++next_to_backward_offset; // point to next forwarded item index + caffe_copy(dim, top[i]->mutable_gpu_diff() + data_offset_top, + bottom[i]->mutable_gpu_diff() + data_offset_bottom); + } + } + } + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(FilterLayer); + +} // namespace caffe diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp new file mode 100755 index 0000000..f7e5c9c --- /dev/null +++ b/src/caffe/layers/flatten_layer.cpp @@ -0,0 +1,44 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void FlattenLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + const int start_axis = bottom[0]->CanonicalAxisIndex( + this->layer_param_.flatten_param().axis()); + const int end_axis = bottom[0]->CanonicalAxisIndex( + this->layer_param_.flatten_param().end_axis()); + vector top_shape; + for (int i = 0; i < start_axis; ++i) { + top_shape.push_back(bottom[0]->shape(i)); + } + const int flattened_dim = bottom[0]->count(start_axis, end_axis + 1); + top_shape.push_back(flattened_dim); + for (int i = end_axis + 1; i < bottom[0]->num_axes(); ++i) { + top_shape.push_back(bottom[0]->shape(i)); + } + top[0]->Reshape(top_shape); + CHECK_EQ(top[0]->count(), bottom[0]->count()); +} + +template +void FlattenLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + top[0]->ShareData(*bottom[0]); +} + +template +void FlattenLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + bottom[0]->ShareDiff(*top[0]); +} + +INSTANTIATE_CLASS(FlattenLayer); +REGISTER_LAYER_CLASS(Flatten); + +} // namespace caffe diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp new file mode 100755 index 0000000..8ced510 --- /dev/null +++ b/src/caffe/layers/hdf5_data_layer.cpp @@ -0,0 +1,167 @@ +/* +TODO: +- load file in a separate thread ("prefetch") +- can be smarter about the memcpy call instead of doing it row-by-row + :: use util functions caffe_copy, and Blob->offset() + :: don't forget to update hdf5_daa_layer.cu accordingly +- add ability to shuffle filenames if flag is set +*/ +#include // NOLINT(readability/streams) +#include +#include + +#include "hdf5.h" +#include "hdf5_hl.h" +#include "stdint.h" + +#include "caffe/data_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/hdf5.hpp" + +namespace caffe { + +template +HDF5DataLayer::~HDF5DataLayer() { } + +// Load data and label from HDF5 filename into the class property blobs. +template +void HDF5DataLayer::LoadHDF5FileData(const char* filename) { + DLOG(INFO) << "Loading HDF5 file: " << filename; + hid_t file_id = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); + if (file_id < 0) { + LOG(FATAL) << "Failed opening HDF5 file: " << filename; + } + + int top_size = this->layer_param_.top_size(); + hdf_blobs_.resize(top_size); + + const int MIN_DATA_DIM = 1; + const int MAX_DATA_DIM = INT_MAX; + + for (int i = 0; i < top_size; ++i) { + hdf_blobs_[i] = shared_ptr >(new Blob()); + hdf5_load_nd_dataset(file_id, this->layer_param_.top(i).c_str(), + MIN_DATA_DIM, MAX_DATA_DIM, hdf_blobs_[i].get()); + } + + herr_t status = H5Fclose(file_id); + CHECK_GE(status, 0) << "Failed to close HDF5 file: " << filename; + + // MinTopBlobs==1 guarantees at least one top blob + CHECK_GE(hdf_blobs_[0]->num_axes(), 1) << "Input must have at least 1 axis."; + const int num = hdf_blobs_[0]->shape(0); + for (int i = 1; i < top_size; ++i) { + CHECK_EQ(hdf_blobs_[i]->shape(0), num); + } + // Default to identity permutation. + data_permutation_.clear(); + data_permutation_.resize(hdf_blobs_[0]->shape(0)); + for (int i = 0; i < hdf_blobs_[0]->shape(0); i++) + data_permutation_[i] = i; + + // Shuffle if needed. + if (this->layer_param_.hdf5_data_param().shuffle()) { + std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); + DLOG(INFO) << "Successully loaded " << hdf_blobs_[0]->shape(0) + << " rows (shuffled)"; + } else { + DLOG(INFO) << "Successully loaded " << hdf_blobs_[0]->shape(0) << " rows"; + } +} + +template +void HDF5DataLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + // Refuse transformation parameters since HDF5 is totally generic. + CHECK(!this->layer_param_.has_transform_param()) << + this->type() << " does not transform data."; + // Read the source to parse the filenames. + const string& source = this->layer_param_.hdf5_data_param().source(); + LOG(INFO) << "Loading list of HDF5 filenames from: " << source; + hdf_filenames_.clear(); + std::ifstream source_file(source.c_str()); + if (source_file.is_open()) { + std::string line; + while (source_file >> line) { + hdf_filenames_.push_back(line); + } + } else { + LOG(FATAL) << "Failed to open source file: " << source; + } + source_file.close(); + num_files_ = hdf_filenames_.size(); + current_file_ = 0; + LOG(INFO) << "Number of HDF5 files: " << num_files_; + CHECK_GE(num_files_, 1) << "Must have at least 1 HDF5 filename listed in " + << source; + + file_permutation_.clear(); + file_permutation_.resize(num_files_); + // Default to identity permutation. + for (int i = 0; i < num_files_; i++) { + file_permutation_[i] = i; + } + + // Shuffle if needed. + if (this->layer_param_.hdf5_data_param().shuffle()) { + std::random_shuffle(file_permutation_.begin(), file_permutation_.end()); + } + + // Load the first HDF5 file and initialize the line counter. + LoadHDF5FileData(hdf_filenames_[file_permutation_[current_file_]].c_str()); + current_row_ = 0; + + // Reshape blobs. + const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); + const int top_size = this->layer_param_.top_size(); + vector top_shape; + for (int i = 0; i < top_size; ++i) { + top_shape.resize(hdf_blobs_[i]->num_axes()); + top_shape[0] = batch_size; + for (int j = 1; j < top_shape.size(); ++j) { + top_shape[j] = hdf_blobs_[i]->shape(j); + } + top[i]->Reshape(top_shape); + } +} + +template +void HDF5DataLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); + for (int i = 0; i < batch_size; ++i, ++current_row_) { + if (current_row_ == hdf_blobs_[0]->shape(0)) { + if (num_files_ > 1) { + ++current_file_; + if (current_file_ == num_files_) { + current_file_ = 0; + if (this->layer_param_.hdf5_data_param().shuffle()) { + std::random_shuffle(file_permutation_.begin(), + file_permutation_.end()); + } + DLOG(INFO) << "Looping around to first file."; + } + LoadHDF5FileData( + hdf_filenames_[file_permutation_[current_file_]].c_str()); + } + current_row_ = 0; + if (this->layer_param_.hdf5_data_param().shuffle()) + std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); + } + for (int j = 0; j < this->layer_param_.top_size(); ++j) { + int data_dim = top[j]->count() / top[j]->shape(0); + caffe_copy(data_dim, + &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] + * data_dim], &top[j]->mutable_cpu_data()[i * data_dim]); + } + } +} + +#ifdef CPU_ONLY +STUB_GPU_FORWARD(HDF5DataLayer, Forward); +#endif + +INSTANTIATE_CLASS(HDF5DataLayer); +REGISTER_LAYER_CLASS(HDF5Data); + +} // namespace caffe diff --git a/src/caffe/layers/hdf5_data_layer.cu b/src/caffe/layers/hdf5_data_layer.cu new file mode 100755 index 0000000..5e3e4ce --- /dev/null +++ b/src/caffe/layers/hdf5_data_layer.cu @@ -0,0 +1,53 @@ +/* +TODO: +- only load parts of the file, in accordance with a prototxt param "max_mem" +*/ + +#include +#include +#include + +#include "hdf5.h" +#include "hdf5_hl.h" + +#include "caffe/data_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" + +namespace caffe { + +template +void HDF5DataLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); + for (int i = 0; i < batch_size; ++i, ++current_row_) { + if (current_row_ == hdf_blobs_[0]->shape(0)) { + if (num_files_ > 1) { + current_file_ += 1; + if (current_file_ == num_files_) { + current_file_ = 0; + if (this->layer_param_.hdf5_data_param().shuffle()) { + std::random_shuffle(file_permutation_.begin(), + file_permutation_.end()); + } + DLOG(INFO) << "Looping around to first file."; + } + LoadHDF5FileData( + hdf_filenames_[file_permutation_[current_file_]].c_str()); + } + current_row_ = 0; + if (this->layer_param_.hdf5_data_param().shuffle()) + std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); + } + for (int j = 0; j < this->layer_param_.top_size(); ++j) { + int data_dim = top[j]->count() / top[j]->shape(0); + caffe_copy(data_dim, + &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] + * data_dim], &top[j]->mutable_gpu_data()[i * data_dim]); + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(HDF5DataLayer); + +} // namespace caffe diff --git a/src/caffe/layers/hdf5_output_layer.cpp b/src/caffe/layers/hdf5_output_layer.cpp new file mode 100755 index 0000000..56788c2 --- /dev/null +++ b/src/caffe/layers/hdf5_output_layer.cpp @@ -0,0 +1,77 @@ +#include + +#include "hdf5.h" +#include "hdf5_hl.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/hdf5.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void HDF5OutputLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + file_name_ = this->layer_param_.hdf5_output_param().file_name(); + file_id_ = H5Fcreate(file_name_.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, + H5P_DEFAULT); + CHECK_GE(file_id_, 0) << "Failed to open HDF5 file" << file_name_; + file_opened_ = true; +} + +template +HDF5OutputLayer::~HDF5OutputLayer() { + if (file_opened_) { + herr_t status = H5Fclose(file_id_); + CHECK_GE(status, 0) << "Failed to close HDF5 file " << file_name_; + } +} + +template +void HDF5OutputLayer::SaveBlobs() { + // TODO: no limit on the number of blobs + LOG(INFO) << "Saving HDF5 file " << file_name_; + CHECK_EQ(data_blob_.num(), label_blob_.num()) << + "data blob and label blob must have the same batch size"; + hdf5_save_nd_dataset(file_id_, HDF5_DATA_DATASET_NAME, data_blob_); + hdf5_save_nd_dataset(file_id_, HDF5_DATA_LABEL_NAME, label_blob_); + LOG(INFO) << "Successfully saved " << data_blob_.num() << " rows"; +} + +template +void HDF5OutputLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + CHECK_GE(bottom.size(), 2); + CHECK_EQ(bottom[0]->num(), bottom[1]->num()); + data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), + bottom[1]->height(), bottom[1]->width()); + const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); + const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); + + for (int i = 0; i < bottom[0]->num(); ++i) { + caffe_copy(data_datum_dim, &bottom[0]->cpu_data()[i * data_datum_dim], + &data_blob_.mutable_cpu_data()[i * data_datum_dim]); + caffe_copy(label_datum_dim, &bottom[1]->cpu_data()[i * label_datum_dim], + &label_blob_.mutable_cpu_data()[i * label_datum_dim]); + } + SaveBlobs(); +} + +template +void HDF5OutputLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + return; +} + +#ifdef CPU_ONLY +STUB_GPU(HDF5OutputLayer); +#endif + +INSTANTIATE_CLASS(HDF5OutputLayer); +REGISTER_LAYER_CLASS(HDF5Output); + +} // namespace caffe diff --git a/src/caffe/layers/hdf5_output_layer.cu b/src/caffe/layers/hdf5_output_layer.cu new file mode 100755 index 0000000..eb6d0e4 --- /dev/null +++ b/src/caffe/layers/hdf5_output_layer.cu @@ -0,0 +1,42 @@ +#include + +#include "hdf5.h" +#include "hdf5_hl.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + CHECK_GE(bottom.size(), 2); + CHECK_EQ(bottom[0]->num(), bottom[1]->num()); + data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), + bottom[1]->height(), bottom[1]->width()); + const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); + const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); + + for (int i = 0; i < bottom[0]->num(); ++i) { + caffe_copy(data_datum_dim, &bottom[0]->gpu_data()[i * data_datum_dim], + &data_blob_.mutable_cpu_data()[i * data_datum_dim]); + caffe_copy(label_datum_dim, &bottom[1]->gpu_data()[i * label_datum_dim], + &label_blob_.mutable_cpu_data()[i * label_datum_dim]); + } + SaveBlobs(); +} + +template +void HDF5OutputLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + return; +} + +INSTANTIATE_LAYER_GPU_FUNCS(HDF5OutputLayer); + +} // namespace caffe diff --git a/src/caffe/layers/hinge_loss_layer.cpp b/src/caffe/layers/hinge_loss_layer.cpp new file mode 100755 index 0000000..a2fb2a1 --- /dev/null +++ b/src/caffe/layers/hinge_loss_layer.cpp @@ -0,0 +1,82 @@ +#include +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void HingeLossLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const Dtype* label = bottom[1]->cpu_data(); + int num = bottom[0]->num(); + int count = bottom[0]->count(); + int dim = count / num; + + caffe_copy(count, bottom_data, bottom_diff); + for (int i = 0; i < num; ++i) { + bottom_diff[i * dim + static_cast(label[i])] *= -1; + } + for (int i = 0; i < num; ++i) { + for (int j = 0; j < dim; ++j) { + bottom_diff[i * dim + j] = std::max( + Dtype(0), 1 + bottom_diff[i * dim + j]); + } + } + Dtype* loss = top[0]->mutable_cpu_data(); + switch (this->layer_param_.hinge_loss_param().norm()) { + case HingeLossParameter_Norm_L1: + loss[0] = caffe_cpu_asum(count, bottom_diff) / num; + break; + case HingeLossParameter_Norm_L2: + loss[0] = caffe_cpu_dot(count, bottom_diff, bottom_diff) / num; + break; + default: + LOG(FATAL) << "Unknown Norm"; + } +} + +template +void HingeLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (propagate_down[1]) { + LOG(FATAL) << this->type() + << " Layer cannot backpropagate to label inputs."; + } + if (propagate_down[0]) { + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const Dtype* label = bottom[1]->cpu_data(); + int num = bottom[0]->num(); + int count = bottom[0]->count(); + int dim = count / num; + + for (int i = 0; i < num; ++i) { + bottom_diff[i * dim + static_cast(label[i])] *= -1; + } + + const Dtype loss_weight = top[0]->cpu_diff()[0]; + switch (this->layer_param_.hinge_loss_param().norm()) { + case HingeLossParameter_Norm_L1: + caffe_cpu_sign(count, bottom_diff, bottom_diff); + caffe_scal(count, loss_weight / num, bottom_diff); + break; + case HingeLossParameter_Norm_L2: + caffe_scal(count, loss_weight * 2 / num, bottom_diff); + break; + default: + LOG(FATAL) << "Unknown Norm"; + } + } +} + +INSTANTIATE_CLASS(HingeLossLayer); +REGISTER_LAYER_CLASS(HingeLoss); + +} // namespace caffe diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp new file mode 100755 index 0000000..1c80271 --- /dev/null +++ b/src/caffe/layers/im2col_layer.cpp @@ -0,0 +1,95 @@ +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void Im2colLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + ConvolutionParameter conv_param = this->layer_param_.convolution_param(); + CHECK(!conv_param.has_kernel_size() != + !(conv_param.has_kernel_h() && conv_param.has_kernel_w())) + << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; + CHECK(conv_param.has_kernel_size() || + (conv_param.has_kernel_h() && conv_param.has_kernel_w())) + << "For non-square filters both kernel_h and kernel_w are required."; + CHECK((!conv_param.has_pad() && conv_param.has_pad_h() + && conv_param.has_pad_w()) + || (!conv_param.has_pad_h() && !conv_param.has_pad_w())) + << "pad is pad OR pad_h and pad_w are required."; + CHECK((!conv_param.has_stride() && conv_param.has_stride_h() + && conv_param.has_stride_w()) + || (!conv_param.has_stride_h() && !conv_param.has_stride_w())) + << "Stride is stride OR stride_h and stride_w are required."; + if (conv_param.has_kernel_size()) { + kernel_h_ = kernel_w_ = conv_param.kernel_size(); + } else { + kernel_h_ = conv_param.kernel_h(); + kernel_w_ = conv_param.kernel_w(); + } + CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; + CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; + if (!conv_param.has_pad_h()) { + pad_h_ = pad_w_ = conv_param.pad(); + } else { + pad_h_ = conv_param.pad_h(); + pad_w_ = conv_param.pad_w(); + } + if (!conv_param.has_stride_h()) { + stride_h_ = stride_w_ = conv_param.stride(); + } else { + stride_h_ = conv_param.stride_h(); + stride_w_ = conv_param.stride_w(); + } +} + +template +void Im2colLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + top[0]->Reshape( + bottom[0]->num(), channels_ * kernel_h_ * kernel_w_, + (height_ + 2 * pad_h_ - kernel_h_) / stride_h_ + 1, + (width_ + 2 * pad_w_ - kernel_w_) / stride_w_ + 1); +} + +template +void Im2colLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + for (int n = 0; n < bottom[0]->num(); ++n) { + im2col_cpu(bottom_data + bottom[0]->offset(n), channels_, height_, + width_, kernel_h_, kernel_w_, pad_h_, pad_w_, + stride_h_, stride_w_, top_data + top[0]->offset(n)); + } +} + +template +void Im2colLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + for (int n = 0; n < top[0]->num(); ++n) { + col2im_cpu(top_diff + top[0]->offset(n), channels_, height_, width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, + stride_h_, stride_w_, bottom_diff + bottom[0]->offset(n)); + } +} + +#ifdef CPU_ONLY +STUB_GPU(Im2colLayer); +#endif + +INSTANTIATE_CLASS(Im2colLayer); +REGISTER_LAYER_CLASS(Im2col); + +} // namespace caffe diff --git a/src/caffe/layers/im2col_layer.cu b/src/caffe/layers/im2col_layer.cu new file mode 100755 index 0000000..9c338b1 --- /dev/null +++ b/src/caffe/layers/im2col_layer.cu @@ -0,0 +1,37 @@ +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void Im2colLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + for (int n = 0; n < bottom[0]->num(); ++n) { + im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_, + width_, kernel_h_, kernel_w_, pad_h_, pad_w_, + stride_h_, stride_w_, top_data + top[0]->offset(n)); + } +} + +template +void Im2colLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + for (int n = 0; n < top[0]->num(); ++n) { + col2im_gpu(top_diff + top[0]->offset(n), channels_, height_, width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, + stride_h_, stride_w_, bottom_diff + bottom[0]->offset(n)); + } +} + + +INSTANTIATE_LAYER_GPU_FUNCS(Im2colLayer); + +} // namespace caffe diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp new file mode 100755 index 0000000..223ba3a --- /dev/null +++ b/src/caffe/layers/image_data_layer.cpp @@ -0,0 +1,166 @@ +#include + +#include // NOLINT(readability/streams) +#include // NOLINT(readability/streams) +#include +#include +#include + +#include "caffe/data_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/benchmark.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/rng.hpp" + +namespace caffe { + +template +ImageDataLayer::~ImageDataLayer() { + this->StopInternalThread(); +} + +template +void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, + const vector*>& top) { + const int new_height = this->layer_param_.image_data_param().new_height(); + const int new_width = this->layer_param_.image_data_param().new_width(); + const bool is_color = this->layer_param_.image_data_param().is_color(); + string root_folder = this->layer_param_.image_data_param().root_folder(); + + CHECK((new_height == 0 && new_width == 0) || + (new_height > 0 && new_width > 0)) << "Current implementation requires " + "new_height and new_width to be set at the same time."; + // Read the file with filenames and labels + const string& source = this->layer_param_.image_data_param().source(); + LOG(INFO) << "Opening file " << source; + std::ifstream infile(source.c_str()); + string filename; + int label; + while (infile >> filename >> label) { + lines_.push_back(std::make_pair(filename, label)); + } + + if (this->layer_param_.image_data_param().shuffle()) { + // randomly shuffle data + LOG(INFO) << "Shuffling data"; + const unsigned int prefetch_rng_seed = caffe_rng_rand(); + prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed)); + ShuffleImages(); + } + LOG(INFO) << "A total of " << lines_.size() << " images."; + + lines_id_ = 0; + // Check if we would need to randomly skip a few data points + if (this->layer_param_.image_data_param().rand_skip()) { + unsigned int skip = caffe_rng_rand() % + this->layer_param_.image_data_param().rand_skip(); + LOG(INFO) << "Skipping first " << skip << " data points."; + CHECK_GT(lines_.size(), skip) << "Not enough points to skip"; + lines_id_ = skip; + } + // Read an image, and use it to initialize the top blob. + cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, + new_height, new_width, is_color); + CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; + // Use data_transformer to infer the expected blob shape from a cv_image. + vector top_shape = this->data_transformer_->InferBlobShape(cv_img); + this->transformed_data_.Reshape(top_shape); + // Reshape prefetch_data and top[0] according to the batch_size. + const int batch_size = this->layer_param_.image_data_param().batch_size(); + CHECK_GT(batch_size, 0) << "Positive batch size required"; + top_shape[0] = batch_size; + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].data_.Reshape(top_shape); + } + top[0]->Reshape(top_shape); + + LOG(INFO) << "output data size: " << top[0]->num() << "," + << top[0]->channels() << "," << top[0]->height() << "," + << top[0]->width(); + // label + vector label_shape(1, batch_size); + top[1]->Reshape(label_shape); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].label_.Reshape(label_shape); + } +} + +template +void ImageDataLayer::ShuffleImages() { + caffe::rng_t* prefetch_rng = + static_cast(prefetch_rng_->generator()); + shuffle(lines_.begin(), lines_.end(), prefetch_rng); +} + +// This function is called on prefetch thread +template +void ImageDataLayer::load_batch(Batch* batch) { + CPUTimer batch_timer; + batch_timer.Start(); + double read_time = 0; + double trans_time = 0; + CPUTimer timer; + CHECK(batch->data_.count()); + CHECK(this->transformed_data_.count()); + ImageDataParameter image_data_param = this->layer_param_.image_data_param(); + const int batch_size = image_data_param.batch_size(); + const int new_height = image_data_param.new_height(); + const int new_width = image_data_param.new_width(); + const bool is_color = image_data_param.is_color(); + string root_folder = image_data_param.root_folder(); + + // Reshape according to the first image of each batch + // on single input batches allows for inputs of varying dimension. + cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, + new_height, new_width, is_color); + CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; + // Use data_transformer to infer the expected blob shape from a cv_img. + vector top_shape = this->data_transformer_->InferBlobShape(cv_img); + this->transformed_data_.Reshape(top_shape); + // Reshape batch according to the batch_size. + top_shape[0] = batch_size; + batch->data_.Reshape(top_shape); + + Dtype* prefetch_data = batch->data_.mutable_cpu_data(); + Dtype* prefetch_label = batch->label_.mutable_cpu_data(); + + // datum scales + const int lines_size = lines_.size(); + for (int item_id = 0; item_id < batch_size; ++item_id) { + // get a blob + timer.Start(); + CHECK_GT(lines_size, lines_id_); + cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, + new_height, new_width, is_color); + CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; + read_time += timer.MicroSeconds(); + timer.Start(); + // Apply transformations (mirror, crop...) to the image + int offset = batch->data_.offset(item_id); + this->transformed_data_.set_cpu_data(prefetch_data + offset); + this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); + trans_time += timer.MicroSeconds(); + + prefetch_label[item_id] = lines_[lines_id_].second; + // go to the next iter + lines_id_++; + if (lines_id_ >= lines_size) { + // We have reached the end. Restart from the first. + DLOG(INFO) << "Restarting data prefetching from start."; + lines_id_ = 0; + if (this->layer_param_.image_data_param().shuffle()) { + ShuffleImages(); + } + } + } + batch_timer.Stop(); + DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; + DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; + DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; +} + +INSTANTIATE_CLASS(ImageDataLayer); +REGISTER_LAYER_CLASS(ImageData); + +} // namespace caffe diff --git a/src/caffe/layers/infogain_loss_layer.cpp b/src/caffe/layers/infogain_loss_layer.cpp new file mode 100755 index 0000000..a1e0b40 --- /dev/null +++ b/src/caffe/layers/infogain_loss_layer.cpp @@ -0,0 +1,110 @@ +#include +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void InfogainLossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + if (bottom.size() < 3) { + CHECK(this->layer_param_.infogain_loss_param().has_source()) + << "Infogain matrix source must be specified."; + BlobProto blob_proto; + ReadProtoFromBinaryFile( + this->layer_param_.infogain_loss_param().source(), &blob_proto); + infogain_.FromProto(blob_proto); + } +} + +template +void InfogainLossLayer::Reshape( + const vector*>& bottom, const vector*>& top) { + LossLayer::Reshape(bottom, top); + Blob* infogain = NULL; + if (bottom.size() < 3) { + infogain = &infogain_; + } else { + infogain = bottom[2]; + } + CHECK_EQ(bottom[1]->channels(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); + const int num = bottom[0]->num(); + const int dim = bottom[0]->count() / num; + CHECK_EQ(infogain->num(), 1); + CHECK_EQ(infogain->channels(), 1); + CHECK_EQ(infogain->height(), dim); + CHECK_EQ(infogain->width(), dim); +} + + +template +void InfogainLossLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* bottom_label = bottom[1]->cpu_data(); + const Dtype* infogain_mat = NULL; + if (bottom.size() < 3) { + infogain_mat = infogain_.cpu_data(); + } else { + infogain_mat = bottom[2]->cpu_data(); + } + int num = bottom[0]->num(); + int dim = bottom[0]->count() / bottom[0]->num(); + Dtype loss = 0; + for (int i = 0; i < num; ++i) { + int label = static_cast(bottom_label[i]); + for (int j = 0; j < dim; ++j) { + Dtype prob = std::max(bottom_data[i * dim + j], Dtype(kLOG_THRESHOLD)); + loss -= infogain_mat[label * dim + j] * log(prob); + } + } + top[0]->mutable_cpu_data()[0] = loss / num; +} + +template +void InfogainLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[1]) { + LOG(FATAL) << this->type() + << " Layer cannot backpropagate to label inputs."; + } + if (propagate_down.size() > 2 && propagate_down[2]) { + LOG(FATAL) << this->type() + << " Layer cannot backpropagate to infogain inputs."; + } + if (propagate_down[0]) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* bottom_label = bottom[1]->cpu_data(); + const Dtype* infogain_mat = NULL; + if (bottom.size() < 3) { + infogain_mat = infogain_.cpu_data(); + } else { + infogain_mat = bottom[2]->cpu_data(); + } + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / bottom[0]->num(); + const Dtype scale = - top[0]->cpu_diff()[0] / num; + for (int i = 0; i < num; ++i) { + const int label = static_cast(bottom_label[i]); + for (int j = 0; j < dim; ++j) { + Dtype prob = std::max(bottom_data[i * dim + j], Dtype(kLOG_THRESHOLD)); + bottom_diff[i * dim + j] = scale * infogain_mat[label * dim + j] / prob; + } + } + } +} + +INSTANTIATE_CLASS(InfogainLossLayer); +REGISTER_LAYER_CLASS(InfogainLoss); +} // namespace caffe diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp new file mode 100755 index 0000000..83c3235 --- /dev/null +++ b/src/caffe/layers/inner_product_layer.cpp @@ -0,0 +1,129 @@ +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void InnerProductLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + const int num_output = this->layer_param_.inner_product_param().num_output(); + bias_term_ = this->layer_param_.inner_product_param().bias_term(); + N_ = num_output; + const int axis = bottom[0]->CanonicalAxisIndex( + this->layer_param_.inner_product_param().axis()); + // Dimensions starting from "axis" are "flattened" into a single + // length K_ vector. For example, if bottom[0]'s shape is (N, C, H, W), + // and axis == 1, N inner products with dimension CHW are performed. + K_ = bottom[0]->count(axis); + // Check if we need to set up the weights + if (this->blobs_.size() > 0) { + LOG(INFO) << "Skipping parameter initialization"; + } else { + if (bias_term_) { + this->blobs_.resize(2); + } else { + this->blobs_.resize(1); + } + // Intialize the weight + vector weight_shape(2); + weight_shape[0] = N_; + weight_shape[1] = K_; + this->blobs_[0].reset(new Blob(weight_shape)); + // fill the weights + shared_ptr > weight_filler(GetFiller( + this->layer_param_.inner_product_param().weight_filler())); + weight_filler->Fill(this->blobs_[0].get()); + // If necessary, intiialize and fill the bias term + if (bias_term_) { + vector bias_shape(1, N_); + this->blobs_[1].reset(new Blob(bias_shape)); + shared_ptr > bias_filler(GetFiller( + this->layer_param_.inner_product_param().bias_filler())); + bias_filler->Fill(this->blobs_[1].get()); + } + } // parameter initialization + this->param_propagate_down_.resize(this->blobs_.size(), true); +} + +template +void InnerProductLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + // Figure out the dimensions + const int axis = bottom[0]->CanonicalAxisIndex( + this->layer_param_.inner_product_param().axis()); + const int new_K = bottom[0]->count(axis); + CHECK_EQ(K_, new_K) + << "Input size incompatible with inner product parameters."; + // The first "axis" dimensions are independent inner products; the total + // number of these is M_, the product over these dimensions. + M_ = bottom[0]->count(0, axis); + // The top shape will be the bottom shape with the flattened axes dropped, + // and replaced by a single axis with dimension num_output (N_). + vector top_shape = bottom[0]->shape(); + top_shape.resize(axis + 1); + top_shape[axis] = N_; + top[0]->Reshape(top_shape); + // Set up the bias multiplier + if (bias_term_) { + vector bias_shape(1, M_); + bias_multiplier_.Reshape(bias_shape); + caffe_set(M_, Dtype(1), bias_multiplier_.mutable_cpu_data()); + } +} + +template +void InnerProductLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + const Dtype* weight = this->blobs_[0]->cpu_data(); + caffe_cpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., + bottom_data, weight, (Dtype)0., top_data); + if (bias_term_) { + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., + bias_multiplier_.cpu_data(), + this->blobs_[1]->cpu_data(), (Dtype)1., top_data); + } +} + +template +void InnerProductLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (this->param_propagate_down_[0]) { + const Dtype* top_diff = top[0]->cpu_diff(); + const Dtype* bottom_data = bottom[0]->cpu_data(); + // Gradient with respect to weight + caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., + top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_cpu_diff()); + } + if (bias_term_ && this->param_propagate_down_[1]) { + const Dtype* top_diff = top[0]->cpu_diff(); + // Gradient with respect to bias + caffe_cpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, + bias_multiplier_.cpu_data(), (Dtype)1., + this->blobs_[1]->mutable_cpu_diff()); + } + if (propagate_down[0]) { + const Dtype* top_diff = top[0]->cpu_diff(); + // Gradient with respect to bottom data + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., + top_diff, this->blobs_[0]->cpu_data(), (Dtype)0., + bottom[0]->mutable_cpu_diff()); + } +} + +#ifdef CPU_ONLY +STUB_GPU(InnerProductLayer); +#endif + +INSTANTIATE_CLASS(InnerProductLayer); +REGISTER_LAYER_CLASS(InnerProduct); + +} // namespace caffe diff --git a/src/caffe/layers/inner_product_layer.cu b/src/caffe/layers/inner_product_layer.cu new file mode 100755 index 0000000..c0ebd2c --- /dev/null +++ b/src/caffe/layers/inner_product_layer.cu @@ -0,0 +1,63 @@ +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void InnerProductLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + const Dtype* weight = this->blobs_[0]->gpu_data(); + if (M_ == 1) { + caffe_gpu_gemv(CblasNoTrans, N_, K_, (Dtype)1., + weight, bottom_data, (Dtype)0., top_data); + if (bias_term_) + caffe_gpu_axpy(N_, bias_multiplier_.cpu_data()[0], + this->blobs_[1]->gpu_data(), top_data); + } else { + caffe_gpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., + bottom_data, weight, (Dtype)0., top_data); + if (bias_term_) + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., + bias_multiplier_.gpu_data(), + this->blobs_[1]->gpu_data(), (Dtype)1., top_data); + } +} + +template +void InnerProductLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (this->param_propagate_down_[0]) { + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + // Gradient with respect to weight + caffe_gpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., + top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); + } + if (bias_term_ && this->param_propagate_down_[1]) { + const Dtype* top_diff = top[0]->gpu_diff(); + // Gradient with respect to bias + caffe_gpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, + bias_multiplier_.gpu_data(), (Dtype)1., + this->blobs_[1]->mutable_gpu_diff()); + } + if (propagate_down[0]) { + const Dtype* top_diff = top[0]->gpu_diff(); + // Gradient with respect to bottom data + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., + top_diff, this->blobs_[0]->gpu_data(), (Dtype)0., + bottom[0]->mutable_gpu_diff()); + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer); + +} // namespace caffe diff --git a/src/caffe/layers/loss_layer.cpp b/src/caffe/layers/loss_layer.cpp new file mode 100755 index 0000000..3496a5c --- /dev/null +++ b/src/caffe/layers/loss_layer.cpp @@ -0,0 +1,33 @@ +#include +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void LossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + // LossLayers have a non-zero (1) loss by default. + if (this->layer_param_.loss_weight_size() == 0) { + this->layer_param_.add_loss_weight(Dtype(1)); + } +} + +template +void LossLayer::Reshape( + const vector*>& bottom, const vector*>& top) { + CHECK_EQ(bottom[0]->num(), bottom[1]->num()) + << "The data and label should have the same number."; + vector loss_shape(0); // Loss layers output a scalar; 0 axes. + top[0]->Reshape(loss_shape); +} + +INSTANTIATE_CLASS(LossLayer); + +} // namespace caffe diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp new file mode 100755 index 0000000..36c1ace --- /dev/null +++ b/src/caffe/layers/lrn_layer.cpp @@ -0,0 +1,259 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void LRNLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + size_ = this->layer_param_.lrn_param().local_size(); + CHECK_EQ(size_ % 2, 1) << "LRN only supports odd values for local_size"; + pre_pad_ = (size_ - 1) / 2; + alpha_ = this->layer_param_.lrn_param().alpha(); + beta_ = this->layer_param_.lrn_param().beta(); + k_ = this->layer_param_.lrn_param().k(); + if (this->layer_param_.lrn_param().norm_region() == + LRNParameter_NormRegion_WITHIN_CHANNEL) { + // Set up split_layer_ to use inputs in the numerator and denominator. + split_top_vec_.clear(); + split_top_vec_.push_back(&product_input_); + split_top_vec_.push_back(&square_input_); + LayerParameter split_param; + split_layer_.reset(new SplitLayer(split_param)); + split_layer_->SetUp(bottom, split_top_vec_); + // Set up square_layer_ to square the inputs. + square_bottom_vec_.clear(); + square_top_vec_.clear(); + square_bottom_vec_.push_back(&square_input_); + square_top_vec_.push_back(&square_output_); + LayerParameter square_param; + square_param.mutable_power_param()->set_power(Dtype(2)); + square_layer_.reset(new PowerLayer(square_param)); + square_layer_->SetUp(square_bottom_vec_, square_top_vec_); + // Set up pool_layer_ to sum over square neighborhoods of the input. + pool_top_vec_.clear(); + pool_top_vec_.push_back(&pool_output_); + LayerParameter pool_param; + pool_param.mutable_pooling_param()->set_pool( + PoolingParameter_PoolMethod_AVE); + pool_param.mutable_pooling_param()->set_pad(pre_pad_); + pool_param.mutable_pooling_param()->set_kernel_size(size_); + pool_layer_.reset(new PoolingLayer(pool_param)); + pool_layer_->SetUp(square_top_vec_, pool_top_vec_); + // Set up power_layer_ to compute (1 + alpha_/N^2 s)^-beta_, where s is + // the sum of a squared neighborhood (the output of pool_layer_). + power_top_vec_.clear(); + power_top_vec_.push_back(&power_output_); + LayerParameter power_param; + power_param.mutable_power_param()->set_power(-beta_); + power_param.mutable_power_param()->set_scale(alpha_); + power_param.mutable_power_param()->set_shift(Dtype(1)); + power_layer_.reset(new PowerLayer(power_param)); + power_layer_->SetUp(pool_top_vec_, power_top_vec_); + // Set up a product_layer_ to compute outputs by multiplying inputs by the + // inverse demoninator computed by the power layer. + product_bottom_vec_.clear(); + product_bottom_vec_.push_back(&product_input_); + product_bottom_vec_.push_back(&power_output_); + LayerParameter product_param; + EltwiseParameter* eltwise_param = product_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + product_layer_.reset(new EltwiseLayer(product_param)); + product_layer_->SetUp(product_bottom_vec_, top); + } +} + +template +void LRNLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; + num_ = bottom[0]->num(); + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + top[0]->Reshape(num_, channels_, height_, width_); + scale_.Reshape(num_, channels_, height_, width_); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + split_layer_->Reshape(bottom, split_top_vec_); + square_layer_->Reshape(square_bottom_vec_, square_top_vec_); + pool_layer_->Reshape(square_top_vec_, pool_top_vec_); + power_layer_->Reshape(pool_top_vec_, power_top_vec_); + product_layer_->Reshape(product_bottom_vec_, top); + break; + } +} + +template +void LRNLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + CrossChannelForward_cpu(bottom, top); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + WithinChannelForward(bottom, top); + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } +} + +template +void LRNLayer::CrossChannelForward_cpu( + const vector*>& bottom, const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + Dtype* scale_data = scale_.mutable_cpu_data(); + // start with the constant value + for (int i = 0; i < scale_.count(); ++i) { + scale_data[i] = k_; + } + Blob padded_square(1, channels_ + size_ - 1, height_, width_); + Dtype* padded_square_data = padded_square.mutable_cpu_data(); + caffe_set(padded_square.count(), Dtype(0), padded_square_data); + Dtype alpha_over_size = alpha_ / size_; + // go through the images + for (int n = 0; n < num_; ++n) { + // compute the padded square + caffe_sqr(channels_ * height_ * width_, + bottom_data + bottom[0]->offset(n), + padded_square_data + padded_square.offset(0, pre_pad_)); + // Create the first channel scale + for (int c = 0; c < size_; ++c) { + caffe_axpy(height_ * width_, alpha_over_size, + padded_square_data + padded_square.offset(0, c), + scale_data + scale_.offset(n, 0)); + } + for (int c = 1; c < channels_; ++c) { + // copy previous scale + caffe_copy(height_ * width_, + scale_data + scale_.offset(n, c - 1), + scale_data + scale_.offset(n, c)); + // add head + caffe_axpy(height_ * width_, alpha_over_size, + padded_square_data + padded_square.offset(0, c + size_ - 1), + scale_data + scale_.offset(n, c)); + // subtract tail + caffe_axpy(height_ * width_, -alpha_over_size, + padded_square_data + padded_square.offset(0, c - 1), + scale_data + scale_.offset(n, c)); + } + } + + // In the end, compute output + caffe_powx(scale_.count(), scale_data, -beta_, top_data); + caffe_mul(scale_.count(), top_data, bottom_data, top_data); +} + +template +void LRNLayer::WithinChannelForward( + const vector*>& bottom, const vector*>& top) { + split_layer_->Forward(bottom, split_top_vec_); + square_layer_->Forward(square_bottom_vec_, square_top_vec_); + pool_layer_->Forward(square_top_vec_, pool_top_vec_); + power_layer_->Forward(pool_top_vec_, power_top_vec_); + product_layer_->Forward(product_bottom_vec_, top); +} + +template +void LRNLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + CrossChannelBackward_cpu(top, propagate_down, bottom); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + WithinChannelBackward(top, propagate_down, bottom); + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } +} + +template +void LRNLayer::CrossChannelBackward_cpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { + const Dtype* top_diff = top[0]->cpu_diff(); + const Dtype* top_data = top[0]->cpu_data(); + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* scale_data = scale_.cpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + Blob padded_ratio(1, channels_ + size_ - 1, height_, width_); + Blob accum_ratio(1, 1, height_, width_); + Dtype* padded_ratio_data = padded_ratio.mutable_cpu_data(); + Dtype* accum_ratio_data = accum_ratio.mutable_cpu_data(); + // We hack a little bit by using the diff() to store an additional result + Dtype* accum_ratio_times_bottom = accum_ratio.mutable_cpu_diff(); + caffe_set(padded_ratio.count(), Dtype(0), padded_ratio_data); + Dtype cache_ratio_value = 2. * alpha_ * beta_ / size_; + + caffe_powx(scale_.count(), scale_data, -beta_, bottom_diff); + caffe_mul(scale_.count(), top_diff, bottom_diff, bottom_diff); + + // go through individual data + int inverse_pre_pad = size_ - (size_ + 1) / 2; + for (int n = 0; n < num_; ++n) { + int block_offset = scale_.offset(n); + // first, compute diff_i * y_i / s_i + caffe_mul(channels_ * height_ * width_, + top_diff + block_offset, top_data + block_offset, + padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad)); + caffe_div(channels_ * height_ * width_, + padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad), + scale_data + block_offset, + padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad)); + // Now, compute the accumulated ratios and the bottom diff + caffe_set(accum_ratio.count(), Dtype(0), accum_ratio_data); + for (int c = 0; c < size_ - 1; ++c) { + caffe_axpy(height_ * width_, 1., + padded_ratio_data + padded_ratio.offset(0, c), accum_ratio_data); + } + for (int c = 0; c < channels_; ++c) { + caffe_axpy(height_ * width_, 1., + padded_ratio_data + padded_ratio.offset(0, c + size_ - 1), + accum_ratio_data); + // compute bottom diff + caffe_mul(height_ * width_, + bottom_data + top[0]->offset(n, c), + accum_ratio_data, accum_ratio_times_bottom); + caffe_axpy(height_ * width_, -cache_ratio_value, + accum_ratio_times_bottom, bottom_diff + top[0]->offset(n, c)); + caffe_axpy(height_ * width_, -1., + padded_ratio_data + padded_ratio.offset(0, c), accum_ratio_data); + } + } +} + +template +void LRNLayer::WithinChannelBackward( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + vector product_propagate_down(2, true); + product_layer_->Backward(top, product_propagate_down, product_bottom_vec_); + power_layer_->Backward(power_top_vec_, propagate_down, pool_top_vec_); + pool_layer_->Backward(pool_top_vec_, propagate_down, square_top_vec_); + square_layer_->Backward(square_top_vec_, propagate_down, + square_bottom_vec_); + split_layer_->Backward(split_top_vec_, propagate_down, bottom); + } +} + +#ifdef CPU_ONLY +STUB_GPU(LRNLayer); +STUB_GPU_FORWARD(LRNLayer, CrossChannelForward); +STUB_GPU_BACKWARD(LRNLayer, CrossChannelBackward); +#endif + +INSTANTIATE_CLASS(LRNLayer); +REGISTER_LAYER_CLASS(LRN); + +} // namespace caffe diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu new file mode 100755 index 0000000..001b3c3 --- /dev/null +++ b/src/caffe/layers/lrn_layer.cu @@ -0,0 +1,203 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +__global__ void LRNFillScale(const int nthreads, const Dtype* const in, + const int num, const int channels, const int height, + const int width, const int size, const Dtype alpha_over_size, + const Dtype k, Dtype* const scale) { + CUDA_KERNEL_LOOP(index, nthreads) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + const Dtype* const in_off = in + offset; + Dtype* const scale_off = scale + offset; + int head = 0; + const int pre_pad = (size - 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_scale = 0; + // fill the scale at [n, :, h, w] + // accumulate values + while (head < post_pad && head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + } +} + + +template +void LRNLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + CrossChannelForward_gpu(bottom, top); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + WithinChannelForward(bottom, top); + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } +} + +// TODO: check if it would be faster to just put it into the previous kernel. +template +__global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, + const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { + CUDA_KERNEL_LOOP(index, nthreads) { + out[index] = in[index] * pow(scale[index], negative_beta); + } +} + +template +void LRNLayer::CrossChannelForward_gpu( + const vector*>& bottom, const vector*>& top) { + // First, compute scale + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + Dtype* scale_data = scale_.mutable_gpu_data(); + // We will launch one kernel for each pixel location, and have the kernel + // go through all the channels. + int n_threads = num_ * height_ * width_; + // NOLINT_NEXT_LINE(whitespace/operators) + LRNFillScale<<>>( + n_threads, bottom_data, num_, channels_, height_, width_, size_, + alpha_ / size_, k_, scale_data); + CUDA_POST_KERNEL_CHECK; + n_threads = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + LRNComputeOutput<<>>( + n_threads, bottom_data, scale_data, -beta_, top_data); + CUDA_POST_KERNEL_CHECK; +} +template void LRNLayer::CrossChannelForward_gpu( + const vector*>& bottom, const vector*>& top); +template void LRNLayer::CrossChannelForward_gpu( + const vector*>& bottom, const vector*>& top); + + +template +void LRNLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + CrossChannelBackward_gpu(top, propagate_down, bottom); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + WithinChannelBackward(top, propagate_down, bottom); + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } +} + +template +__global__ void LRNComputeDiff(const int nthreads, + const Dtype* const bottom_data, const Dtype* const top_data, + const Dtype* const scale, const Dtype* const top_diff, + const int num, const int channels, const int height, + const int width, const int size, const Dtype negative_beta, + const Dtype cache_ratio, Dtype* const bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + const Dtype* const bottom_off = bottom_data + offset; + const Dtype* const top_off = top_data + offset; + const Dtype* const scale_off = scale + offset; + const Dtype* const top_diff_off = top_diff + offset; + Dtype* const bottom_diff_off = bottom_diff + offset; + int head = 0; + const int pre_pad = size - (size + 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_ratio = 0; + // accumulate values + while (head < post_pad && head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] / + scale_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] / + scale_off[head * step]; + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] * + top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = + top_diff_off[(head - post_pad) * step] + * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] * + top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = + top_diff_off[(head - post_pad) * step] + * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + } +} + +template +void LRNLayer::CrossChannelBackward_gpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { + int n_threads = num_ * height_ * width_; + // NOLINT_NEXT_LINE(whitespace/operators) + LRNComputeDiff<<>>( + n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(), + scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, + size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), + bottom[0]->mutable_gpu_diff()); +} +template void LRNLayer::CrossChannelBackward_gpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom); +template void LRNLayer::CrossChannelBackward_gpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom); + + + +INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer); + +} // namespace caffe diff --git a/src/caffe/layers/memory_data_layer.cpp b/src/caffe/layers/memory_data_layer.cpp new file mode 100755 index 0000000..42de419 --- /dev/null +++ b/src/caffe/layers/memory_data_layer.cpp @@ -0,0 +1,121 @@ +#include + +#include + +#include "caffe/data_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" + +namespace caffe { + +template +void MemoryDataLayer::DataLayerSetUp(const vector*>& bottom, + const vector*>& top) { + batch_size_ = this->layer_param_.memory_data_param().batch_size(); + channels_ = this->layer_param_.memory_data_param().channels(); + height_ = this->layer_param_.memory_data_param().height(); + width_ = this->layer_param_.memory_data_param().width(); + size_ = channels_ * height_ * width_; + CHECK_GT(batch_size_ * size_, 0) << + "batch_size, channels, height, and width must be specified and" + " positive in memory_data_param"; + vector label_shape(1, batch_size_); + top[0]->Reshape(batch_size_, channels_, height_, width_); + top[1]->Reshape(label_shape); + added_data_.Reshape(batch_size_, channels_, height_, width_); + added_label_.Reshape(label_shape); + data_ = NULL; + labels_ = NULL; + added_data_.cpu_data(); + added_label_.cpu_data(); +} + +template +void MemoryDataLayer::AddDatumVector(const vector& datum_vector) { + CHECK(!has_new_data_) << + "Can't add data until current data has been consumed."; + size_t num = datum_vector.size(); + CHECK_GT(num, 0) << "There is no datum to add."; + CHECK_EQ(num % batch_size_, 0) << + "The added data must be a multiple of the batch size."; + added_data_.Reshape(num, channels_, height_, width_); + added_label_.Reshape(num, 1, 1, 1); + // Apply data transformations (mirror, scale, crop...) + this->data_transformer_->Transform(datum_vector, &added_data_); + // Copy Labels + Dtype* top_label = added_label_.mutable_cpu_data(); + for (int item_id = 0; item_id < num; ++item_id) { + top_label[item_id] = datum_vector[item_id].label(); + } + // num_images == batch_size_ + Dtype* top_data = added_data_.mutable_cpu_data(); + Reset(top_data, top_label, num); + has_new_data_ = true; +} + +template +void MemoryDataLayer::AddMatVector(const vector& mat_vector, + const vector& labels) { + size_t num = mat_vector.size(); + CHECK(!has_new_data_) << + "Can't add mat until current data has been consumed."; + CHECK_GT(num, 0) << "There is no mat to add"; + CHECK_EQ(num % batch_size_, 0) << + "The added data must be a multiple of the batch size."; + added_data_.Reshape(num, channels_, height_, width_); + added_label_.Reshape(num, 1, 1, 1); + // Apply data transformations (mirror, scale, crop...) + this->data_transformer_->Transform(mat_vector, &added_data_); + // Copy Labels + Dtype* top_label = added_label_.mutable_cpu_data(); + for (int item_id = 0; item_id < num; ++item_id) { + top_label[item_id] = labels[item_id]; + } + // num_images == batch_size_ + Dtype* top_data = added_data_.mutable_cpu_data(); + Reset(top_data, top_label, num); + has_new_data_ = true; +} + +template +void MemoryDataLayer::Reset(Dtype* data, Dtype* labels, int n) { + CHECK(data); + CHECK(labels); + CHECK_EQ(n % batch_size_, 0) << "n must be a multiple of batch size"; + // Warn with transformation parameters since a memory array is meant to + // be generic and no transformations are done with Reset(). + if (this->layer_param_.has_transform_param()) { + LOG(WARNING) << this->type() << " does not transform array data on Reset()"; + } + data_ = data; + labels_ = labels; + n_ = n; + pos_ = 0; +} + +template +void MemoryDataLayer::set_batch_size(int new_size) { + CHECK(!has_new_data_) << + "Can't change batch_size until current data has been consumed."; + batch_size_ = new_size; + added_data_.Reshape(batch_size_, channels_, height_, width_); + added_label_.Reshape(batch_size_, 1, 1, 1); +} + +template +void MemoryDataLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + CHECK(data_) << "MemoryDataLayer needs to be initalized by calling Reset"; + top[0]->Reshape(batch_size_, channels_, height_, width_); + top[1]->Reshape(batch_size_, 1, 1, 1); + top[0]->set_cpu_data(data_ + pos_ * size_); + top[1]->set_cpu_data(labels_ + pos_); + pos_ = (pos_ + batch_size_) % n_; + if (pos_ == 0) + has_new_data_ = false; +} + +INSTANTIATE_CLASS(MemoryDataLayer); +REGISTER_LAYER_CLASS(MemoryData); + +} // namespace caffe diff --git a/src/caffe/layers/multinomial_logistic_loss_layer.cpp b/src/caffe/layers/multinomial_logistic_loss_layer.cpp new file mode 100755 index 0000000..4267a59 --- /dev/null +++ b/src/caffe/layers/multinomial_logistic_loss_layer.cpp @@ -0,0 +1,67 @@ +#include +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void MultinomialLogisticLossLayer::Reshape( + const vector*>& bottom, const vector*>& top) { + LossLayer::Reshape(bottom, top); + CHECK_EQ(bottom[1]->channels(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); +} + +template +void MultinomialLogisticLossLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* bottom_label = bottom[1]->cpu_data(); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / bottom[0]->num(); + Dtype loss = 0; + for (int i = 0; i < num; ++i) { + int label = static_cast(bottom_label[i]); + Dtype prob = std::max( + bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD)); + loss -= log(prob); + } + top[0]->mutable_cpu_data()[0] = loss / num; +} + +template +void MultinomialLogisticLossLayer::Backward_cpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[1]) { + LOG(FATAL) << this->type() + << " Layer cannot backpropagate to label inputs."; + } + if (propagate_down[0]) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* bottom_label = bottom[1]->cpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / bottom[0]->num(); + caffe_set(bottom[0]->count(), Dtype(0), bottom_diff); + const Dtype scale = - top[0]->cpu_diff()[0] / num; + for (int i = 0; i < num; ++i) { + int label = static_cast(bottom_label[i]); + Dtype prob = std::max( + bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD)); + bottom_diff[i * dim + label] = scale / prob; + } + } +} + +INSTANTIATE_CLASS(MultinomialLogisticLossLayer); +REGISTER_LAYER_CLASS(MultinomialLogisticLoss); + +} // namespace caffe diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp new file mode 100755 index 0000000..3e79bdd --- /dev/null +++ b/src/caffe/layers/mvn_layer.cpp @@ -0,0 +1,145 @@ +#include +#include + +#include "caffe/common_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void MVNLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + mean_.Reshape(bottom[0]->num(), bottom[0]->channels(), + 1, 1); + variance_.Reshape(bottom[0]->num(), bottom[0]->channels(), + 1, 1); + temp_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + sum_multiplier_.Reshape(1, 1, + bottom[0]->height(), bottom[0]->width()); + Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); + caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); + eps_ = this->layer_param_.mvn_param().eps(); +} + +template +void MVNLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + int num; + if (this->layer_param_.mvn_param().across_channels()) + num = bottom[0]->num(); + else + num = bottom[0]->num() * bottom[0]->channels(); + + int dim = bottom[0]->count() / num; + + if (this->layer_param_.mvn_param().normalize_variance()) { + // put the squares of bottom into temp_ + caffe_powx(bottom[0]->count(), bottom_data, Dtype(2), + temp_.mutable_cpu_data()); + + // computes variance using var(X) = E(X^2) - (EX)^2 + caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, + sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); // EX + caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.cpu_data(), + sum_multiplier_.cpu_data(), 0., + variance_.mutable_cpu_data()); // E(X^2) + caffe_powx(mean_.count(), mean_.cpu_data(), Dtype(2), + temp_.mutable_cpu_data()); // (EX)^2 + caffe_sub(mean_.count(), variance_.cpu_data(), temp_.cpu_data(), + variance_.mutable_cpu_data()); // variance + + // do mean and variance normalization + // subtract mean + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., + mean_.cpu_data(), sum_multiplier_.cpu_data(), 0., + temp_.mutable_cpu_data()); + + caffe_add(temp_.count(), bottom_data, temp_.cpu_data(), top_data); + + // normalize variance + caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), + variance_.mutable_cpu_data()); + + caffe_add_scalar(variance_.count(), eps_, variance_.mutable_cpu_data()); + + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., + variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., + temp_.mutable_cpu_data()); + + caffe_div(temp_.count(), top_data, temp_.cpu_data(), top_data); + } else { + caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, + sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); // EX + + // subtract mean + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., + mean_.cpu_data(), sum_multiplier_.cpu_data(), 0., + temp_.mutable_cpu_data()); + + caffe_add(temp_.count(), bottom_data, temp_.cpu_data(), top_data); + } +} + +template +void MVNLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + const Dtype* top_diff = top[0]->cpu_diff(); + const Dtype* top_data = top[0]->cpu_data(); + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + + int num; + if (this->layer_param_.mvn_param().across_channels()) + num = bottom[0]->num(); + else + num = bottom[0]->num() * bottom[0]->channels(); + + int dim = bottom[0]->count() / num; + + if (this->layer_param_.mvn_param().normalize_variance()) { + caffe_mul(temp_.count(), top_data, top_diff, bottom_diff); + caffe_cpu_gemv(CblasNoTrans, num, dim, 1., bottom_diff, + sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., + mean_.cpu_data(), sum_multiplier_.cpu_data(), 0., + bottom_diff); + caffe_mul(temp_.count(), top_data, bottom_diff, bottom_diff); + + caffe_cpu_gemv(CblasNoTrans, num, dim, 1., top_diff, + sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., + mean_.cpu_data(), sum_multiplier_.cpu_data(), 1., + bottom_diff); + + caffe_cpu_axpby(temp_.count(), Dtype(1), top_diff, Dtype(-1. / dim), + bottom_diff); + + // put the squares of bottom into temp_ + caffe_powx(temp_.count(), bottom_data, Dtype(2), + temp_.mutable_cpu_data()); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., + variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., + temp_.mutable_cpu_data()); + + caffe_div(temp_.count(), bottom_diff, temp_.cpu_data(), bottom_diff); + } else { + caffe_copy(temp_.count(), top_diff, bottom_diff); + } +} + + +#ifdef CPU_ONLY +STUB_GPU(MVNLayer); +#endif + +INSTANTIATE_CLASS(MVNLayer); +REGISTER_LAYER_CLASS(MVN); + +} // namespace caffe diff --git a/src/caffe/layers/mvn_layer.cu b/src/caffe/layers/mvn_layer.cu new file mode 100755 index 0000000..3888a0c --- /dev/null +++ b/src/caffe/layers/mvn_layer.cu @@ -0,0 +1,124 @@ +#include +#include + +#include "caffe/common_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void MVNLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + int num; + if (this->layer_param_.mvn_param().across_channels()) + num = bottom[0]->num(); + else + num = bottom[0]->num() * bottom[0]->channels(); + + int dim = bottom[0]->count() / num; + + if (this->layer_param_.mvn_param().normalize_variance()) { + // put the squares of bottom into temp_ + caffe_gpu_powx(bottom[0]->count(), bottom_data, Dtype(2), + temp_.mutable_gpu_data()); + + // computes variance using var(X) = E(X^2) - (EX)^2 + caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, + sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); // EX + caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.gpu_data(), + sum_multiplier_.gpu_data(), 0., + variance_.mutable_gpu_data()); // E(X^2) + caffe_gpu_powx(mean_.count(), mean_.gpu_data(), Dtype(2), + temp_.mutable_gpu_data()); // (EX)^2 + caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), + variance_.mutable_gpu_data()); // variance + + // do mean and variance normalization + // subtract mean + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., + mean_.gpu_data(), sum_multiplier_.gpu_data(), 0., + temp_.mutable_gpu_data()); + + caffe_gpu_add(temp_.count(), bottom_data, temp_.gpu_data(), top_data); + + // normalize variance + caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), + variance_.mutable_gpu_data()); + + caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data()); + + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., + variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., + temp_.mutable_gpu_data()); + + caffe_gpu_div(temp_.count(), top_data, temp_.gpu_data(), top_data); + } else { + caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, + sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); // EX + + // subtract mean + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., + mean_.gpu_data(), sum_multiplier_.gpu_data(), 0., + temp_.mutable_gpu_data()); + + caffe_gpu_add(temp_.count(), bottom_data, temp_.gpu_data(), top_data); + } +} + +template +void MVNLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* top_data = top[0]->gpu_data(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + + int num; + if (this->layer_param_.mvn_param().across_channels()) + num = bottom[0]->num(); + else + num = bottom[0]->num() * bottom[0]->channels(); + + int dim = bottom[0]->count() / num; + + if (this->layer_param_.mvn_param().normalize_variance()) { + caffe_gpu_mul(temp_.count(), top_data, top_diff, bottom_diff); + caffe_gpu_gemv(CblasNoTrans, num, dim, 1., bottom_diff, + sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., + mean_.gpu_data(), sum_multiplier_.gpu_data(), 0., + bottom_diff); + caffe_gpu_mul(temp_.count(), top_data, bottom_diff, bottom_diff); + + caffe_gpu_gemv(CblasNoTrans, num, dim, 1., top_diff, + sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., + mean_.gpu_data(), sum_multiplier_.gpu_data(), 1., + bottom_diff); + + caffe_gpu_axpby(temp_.count(), Dtype(1), top_diff, Dtype(-1. / dim), + bottom_diff); + + // put the squares of bottom into temp_ + caffe_gpu_powx(temp_.count(), bottom_data, Dtype(2), + temp_.mutable_gpu_data()); + + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., + variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., + temp_.mutable_gpu_data()); + + caffe_gpu_div(temp_.count(), bottom_diff, temp_.gpu_data(), bottom_diff); + } else { + caffe_copy(temp_.count(), top_diff, bottom_diff); + } +} + + +INSTANTIATE_LAYER_GPU_FUNCS(MVNLayer); + + +} // namespace caffe diff --git a/src/caffe/layers/neuron_layer.cpp b/src/caffe/layers/neuron_layer.cpp new file mode 100755 index 0000000..ba67b43 --- /dev/null +++ b/src/caffe/layers/neuron_layer.cpp @@ -0,0 +1,16 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void NeuronLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + top[0]->ReshapeLike(*bottom[0]); +} + +INSTANTIATE_CLASS(NeuronLayer); + +} // namespace caffe diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp new file mode 100755 index 0000000..c8d4149 --- /dev/null +++ b/src/caffe/layers/pooling_layer.cpp @@ -0,0 +1,319 @@ +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +using std::min; +using std::max; + +template +void PoolingLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + PoolingParameter pool_param = this->layer_param_.pooling_param(); + if (pool_param.global_pooling()) { + CHECK(!(pool_param.has_kernel_size() || + pool_param.has_kernel_h() || pool_param.has_kernel_w())) + << "With Global_pooling: true Filter size cannot specified"; + } else { + CHECK(!pool_param.has_kernel_size() != + !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; + CHECK(pool_param.has_kernel_size() || + (pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "For non-square filters both kernel_h and kernel_w are required."; + } + CHECK((!pool_param.has_pad() && pool_param.has_pad_h() + && pool_param.has_pad_w()) + || (!pool_param.has_pad_h() && !pool_param.has_pad_w())) + << "pad is pad OR pad_h and pad_w are required."; + CHECK((!pool_param.has_stride() && pool_param.has_stride_h() + && pool_param.has_stride_w()) + || (!pool_param.has_stride_h() && !pool_param.has_stride_w())) + << "Stride is stride OR stride_h and stride_w are required."; + global_pooling_ = pool_param.global_pooling(); + if (global_pooling_) { + kernel_h_ = bottom[0]->height(); + kernel_w_ = bottom[0]->width(); + } else { + if (pool_param.has_kernel_size()) { + kernel_h_ = kernel_w_ = pool_param.kernel_size(); + } else { + kernel_h_ = pool_param.kernel_h(); + kernel_w_ = pool_param.kernel_w(); + } + } + CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; + CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; + if (!pool_param.has_pad_h()) { + pad_h_ = pad_w_ = pool_param.pad(); + } else { + pad_h_ = pool_param.pad_h(); + pad_w_ = pool_param.pad_w(); + } + if (!pool_param.has_stride_h()) { + stride_h_ = stride_w_ = pool_param.stride(); + } else { + stride_h_ = pool_param.stride_h(); + stride_w_ = pool_param.stride_w(); + } + if (global_pooling_) { + CHECK(pad_h_ == 0 && pad_w_ == 0 && stride_h_ == 1 && stride_w_ == 1) + << "With Global_pooling: true; only pad = 0 and stride = 1"; + } + if (pad_h_ != 0 || pad_w_ != 0) { + CHECK(this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_AVE + || this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) + << "Padding implemented only for average and max pooling."; + CHECK_LT(pad_h_, kernel_h_); + CHECK_LT(pad_w_, kernel_w_); + } +} + +template +void PoolingLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + if (global_pooling_) { + kernel_h_ = bottom[0]->height(); + kernel_w_ = bottom[0]->width(); + } + pooled_height_ = static_cast(ceil(static_cast( + height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1; + pooled_width_ = static_cast(ceil(static_cast( + width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1; + if (pad_h_ || pad_w_) { + // If we have padding, ensure that the last pooling starts strictly + // inside the image (instead of at the padding); otherwise clip the last. + if ((pooled_height_ - 1) * stride_h_ >= height_ + pad_h_) { + --pooled_height_; + } + if ((pooled_width_ - 1) * stride_w_ >= width_ + pad_w_) { + --pooled_width_; + } + CHECK_LT((pooled_height_ - 1) * stride_h_, height_ + pad_h_); + CHECK_LT((pooled_width_ - 1) * stride_w_, width_ + pad_w_); + } + top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, + pooled_width_); + if (top.size() > 1) { + top[1]->ReshapeLike(*top[0]); + } + // If max pooling, we will initialize the vector index part. + if (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX && top.size() == 1) { + max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, + pooled_width_); + } + // If stochastic pooling, we will initialize the random index part. + if (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_STOCHASTIC) { + rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, + pooled_width_); + } +} + +// TODO(Yangqing): Is there a faster way to do pooling in the channel-first +// case? +template +void PoolingLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + const int top_count = top[0]->count(); + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top.size() > 1; + int* mask = NULL; // suppress warnings about uninitalized variables + Dtype* top_mask = NULL; + // Different pooling methods. We explicitly do the switch outside the for + // loop to save time, although this results in more code. + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + // Initialize + if (use_top_mask) { + top_mask = top[1]->mutable_cpu_data(); + caffe_set(top_count, Dtype(-1), top_mask); + } else { + mask = max_idx_.mutable_cpu_data(); + caffe_set(top_count, -1, mask); + } + caffe_set(top_count, Dtype(-FLT_MAX), top_data); + // The main loop + for (int n = 0; n < bottom[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + int hstart = ph * stride_h_ - pad_h_; + int wstart = pw * stride_w_ - pad_w_; + int hend = min(hstart + kernel_h_, height_); + int wend = min(wstart + kernel_w_, width_); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + const int pool_index = ph * pooled_width_ + pw; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + const int index = h * width_ + w; + if (bottom_data[index] > top_data[pool_index]) { + top_data[pool_index] = bottom_data[index]; + if (use_top_mask) { + top_mask[pool_index] = static_cast(index); + } else { + mask[pool_index] = index; + } + } + } + } + } + } + // compute offset + bottom_data += bottom[0]->offset(0, 1); + top_data += top[0]->offset(0, 1); + if (use_top_mask) { + top_mask += top[0]->offset(0, 1); + } else { + mask += top[0]->offset(0, 1); + } + } + } + break; + case PoolingParameter_PoolMethod_AVE: + for (int i = 0; i < top_count; ++i) { + top_data[i] = 0; + } + // The main loop + for (int n = 0; n < bottom[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + int hstart = ph * stride_h_ - pad_h_; + int wstart = pw * stride_w_ - pad_w_; + int hend = min(hstart + kernel_h_, height_ + pad_h_); + int wend = min(wstart + kernel_w_, width_ + pad_w_); + int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height_); + wend = min(wend, width_); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + top_data[ph * pooled_width_ + pw] += + bottom_data[h * width_ + w]; + } + } + top_data[ph * pooled_width_ + pw] /= pool_size; + } + } + // compute offset + bottom_data += bottom[0]->offset(0, 1); + top_data += top[0]->offset(0, 1); + } + } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } +} + +template +void PoolingLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (!propagate_down[0]) { + return; + } + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + // Different pooling methods. We explicitly do the switch outside the for + // loop to save time, although this results in more codes. + caffe_set(bottom[0]->count(), Dtype(0), bottom_diff); + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top.size() > 1; + const int* mask = NULL; // suppress warnings about uninitialized variables + const Dtype* top_mask = NULL; + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + // The main loop + if (use_top_mask) { + top_mask = top[1]->cpu_data(); + } else { + mask = max_idx_.cpu_data(); + } + for (int n = 0; n < top[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + const int index = ph * pooled_width_ + pw; + const int bottom_index = + use_top_mask ? top_mask[index] : mask[index]; + bottom_diff[bottom_index] += top_diff[index]; + } + } + bottom_diff += bottom[0]->offset(0, 1); + top_diff += top[0]->offset(0, 1); + if (use_top_mask) { + top_mask += top[0]->offset(0, 1); + } else { + mask += top[0]->offset(0, 1); + } + } + } + break; + case PoolingParameter_PoolMethod_AVE: + // The main loop + for (int n = 0; n < top[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + int hstart = ph * stride_h_ - pad_h_; + int wstart = pw * stride_w_ - pad_w_; + int hend = min(hstart + kernel_h_, height_ + pad_h_); + int wend = min(wstart + kernel_w_, width_ + pad_w_); + int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height_); + wend = min(wend, width_); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + bottom_diff[h * width_ + w] += + top_diff[ph * pooled_width_ + pw] / pool_size; + } + } + } + } + // offset + bottom_diff += bottom[0]->offset(0, 1); + top_diff += top[0]->offset(0, 1); + } + } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } +} + + +#ifdef CPU_ONLY +STUB_GPU(PoolingLayer); +#endif + +INSTANTIATE_CLASS(PoolingLayer); + +} // namespace caffe diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu new file mode 100755 index 0000000..ca4b13f --- /dev/null +++ b/src/caffe/layers/pooling_layer.cu @@ -0,0 +1,387 @@ +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +__global__ void MaxPoolForward(const int nthreads, + const Dtype* const bottom_data, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, const int pad_h, const int pad_w, + Dtype* const top_data, int* mask, Dtype* top_mask) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + const int hend = min(hstart + kernel_h, height); + const int wend = min(wstart + kernel_w, width); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + if (bottom_slice[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_slice[maxidx]; + } + } + } + top_data[index] = maxval; + if (mask) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +template +__global__ void AvePoolForward(const int nthreads, + const Dtype* const bottom_data, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, const int pad_h, const int pad_w, + Dtype* const top_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + const int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_slice[h * width + w]; + } + } + top_data[index] = aveval / pool_size; + } +} + +template +__global__ void StoPoolForwardTrain(const int nthreads, + const Dtype* const bottom_data, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + Dtype cumsum = 0.; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + } + } + const float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_slice[h * width + w]; + return; + } + } + } + } +} + + +template +__global__ void StoPoolForwardTest(const int nthreads, + const Dtype* const bottom_data, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, Dtype* const top_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } +} + + +template +void PoolingLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + int count = top[0]->count(); + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top.size() > 1; + int* mask = NULL; + Dtype* top_mask = NULL; + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolForward<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, + mask, top_mask); + break; + case PoolingParameter_PoolMethod_AVE: + // NOLINT_NEXT_LINE(whitespace/operators) + AvePoolForward<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + if (this->phase_ == TRAIN) { + // We need to create the random index as well. + caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), + rand_idx_.mutable_gpu_data()); + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolForwardTrain<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, + rand_idx_.mutable_gpu_data(), top_data); + } else { + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolForwardTest<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, top_data); + } + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } + CUDA_POST_KERNEL_CHECK; +} + + +template +__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, + const int* const mask, const Dtype* const top_mask, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, Dtype* const bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = + (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; + const int phend = min((h + pad_h) / stride_h + 1, pooled_height); + const int pwstart = + (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; + const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + Dtype gradient = 0; + const int offset = (n * channels + c) * pooled_height * pooled_width; + const Dtype* const top_diff_slice = top_diff + offset; + if (mask) { + const int* const mask_slice = mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } else { + const Dtype* const top_mask_slice = top_mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +template +__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, const int pad_w, + Dtype* const bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + // find out the local index + // find out the local offset + const int w = index % width + pad_w; + const int h = (index / width) % height + pad_h; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + const Dtype* const top_diff_slice = + top_diff + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + // figure out the pooling size + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + int pool_size = (hend - hstart) * (wend - wstart); + gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; + } + } + bottom_diff[index] = gradient; + } +} + + +template +__global__ void StoPoolBackward(const int nthreads, + const Dtype* const rand_idx, const Dtype* const top_diff, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, Dtype* const bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + const Dtype* const rand_idx_slice = + rand_idx + (n * channels + c) * pooled_height * pooled_width; + const Dtype* const top_diff_slice = + top_diff + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + gradient += top_diff_slice[ph * pooled_width + pw] * + (index == static_cast(rand_idx_slice[ph * pooled_width + pw])); + } + } + bottom_diff[index] = gradient; + } +} + + +template +void PoolingLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (!propagate_down[0]) { + return; + } + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); + caffe_gpu_set(count, Dtype(0.), bottom_diff); + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top.size() > 1; + const int* mask = NULL; + const Dtype* top_mask = NULL; + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_.gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolBackward<<>>( + count, top_diff, mask, top_mask, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, + kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, + bottom_diff); + break; + case PoolingParameter_PoolMethod_AVE: + // NOLINT_NEXT_LINE(whitespace/operators) + AvePoolBackward<<>>( + count, top_diff, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolBackward<<>>( + count, rand_idx_.gpu_data(), top_diff, + top[0]->num(), channels_, height_, width_, pooled_height_, + pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, + bottom_diff); + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } + CUDA_POST_KERNEL_CHECK; +} + + +INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); + + +} // namespace caffe diff --git a/src/caffe/layers/power_layer.cpp b/src/caffe/layers/power_layer.cpp new file mode 100755 index 0000000..4fe34c4 --- /dev/null +++ b/src/caffe/layers/power_layer.cpp @@ -0,0 +1,104 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void PowerLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + NeuronLayer::LayerSetUp(bottom, top); + power_ = this->layer_param_.power_param().power(); + scale_ = this->layer_param_.power_param().scale(); + shift_ = this->layer_param_.power_param().shift(); + diff_scale_ = power_ * scale_; +} + +// Compute y = (shift + scale * x)^power +template +void PowerLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + Dtype* top_data = top[0]->mutable_cpu_data(); + const int count = bottom[0]->count(); + // Special case where we can ignore the input: scale or power is 0. + if (diff_scale_ == Dtype(0)) { + Dtype value = (power_ == 0) ? Dtype(1) : pow(shift_, power_); + caffe_set(count, value, top_data); + return; + } + const Dtype* bottom_data = bottom[0]->cpu_data(); + caffe_copy(count, bottom_data, top_data); + if (scale_ != Dtype(1)) { + caffe_scal(count, scale_, top_data); + } + if (shift_ != Dtype(0)) { + caffe_add_scalar(count, shift_, top_data); + } + if (power_ != Dtype(1)) { + caffe_powx(count, top_data, power_, top_data); + } +} + +template +void PowerLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const int count = bottom[0]->count(); + const Dtype* top_diff = top[0]->cpu_diff(); + if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) { + caffe_set(count, diff_scale_, bottom_diff); + } else { + const Dtype* bottom_data = bottom[0]->cpu_data(); + // Compute dy/dx = scale * power * (shift + scale * x)^(power - 1) + // = diff_scale * y / (shift + scale * x) + if (power_ == Dtype(2)) { + // Special case for y = (shift + scale * x)^2 + // -> dy/dx = 2 * scale * (shift + scale * x) + // = diff_scale * shift + diff_scale * scale * x + caffe_cpu_axpby(count, diff_scale_ * scale_, bottom_data, + Dtype(0), bottom_diff); + if (shift_ != Dtype(0)) { + caffe_add_scalar(count, diff_scale_ * shift_, bottom_diff); + } + } else if (shift_ == Dtype(0)) { + // Special case for y = (scale * x)^power + // -> dy/dx = scale * power * (scale * x)^(power - 1) + // = scale * power * (scale * x)^power * (scale * x)^(-1) + // = power * y / x + const Dtype* top_data = top[0]->cpu_data(); + caffe_div(count, top_data, bottom_data, bottom_diff); + caffe_scal(count, power_, bottom_diff); + } else { + caffe_copy(count, bottom_data, bottom_diff); + if (scale_ != Dtype(1)) { + caffe_scal(count, scale_, bottom_diff); + } + if (shift_ != Dtype(0)) { + caffe_add_scalar(count, shift_, bottom_diff); + } + const Dtype* top_data = top[0]->cpu_data(); + caffe_div(count, top_data, bottom_diff, bottom_diff); + if (diff_scale_ != Dtype(1)) { + caffe_scal(count, diff_scale_, bottom_diff); + } + } + } + if (diff_scale_ != Dtype(0)) { + caffe_mul(count, top_diff, bottom_diff, bottom_diff); + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(PowerLayer); +#endif + +INSTANTIATE_CLASS(PowerLayer); +REGISTER_LAYER_CLASS(Power); + +} // namespace caffe diff --git a/src/caffe/layers/power_layer.cu b/src/caffe/layers/power_layer.cu new file mode 100755 index 0000000..90d9440 --- /dev/null +++ b/src/caffe/layers/power_layer.cu @@ -0,0 +1,87 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void PowerLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + Dtype* top_data = top[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + // Special case where we can ignore the input: scale or power is 0. + if (diff_scale_ == Dtype(0)) { + Dtype value = (power_ == 0) ? Dtype(1) : pow(shift_, power_); + caffe_gpu_set(count, value, top_data); + return; + } + const Dtype* bottom_data = bottom[0]->gpu_data(); + caffe_copy(count, bottom_data, top_data); + if (scale_ != Dtype(1)) { + caffe_gpu_scal(count, scale_, top_data); + } + if (shift_ != Dtype(0)) { + caffe_gpu_add_scalar(count, shift_, top_data); + } + if (power_ != Dtype(1)) { + caffe_gpu_powx(count, top_data, power_, top_data); + } +} + +template +void PowerLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); + const Dtype* top_diff = top[0]->gpu_diff(); + if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) { + caffe_gpu_set(count, diff_scale_, bottom_diff); + } else { + const Dtype* bottom_data = bottom[0]->gpu_data(); + // Compute dy/dx = scale * power * (shift + scale * x)^(power - 1) + // = diff_scale * y / (shift + scale * x) + if (power_ == Dtype(2)) { + // Special case for y = (shift + scale * x)^2 + // -> dy/dx = 2 * scale * (shift + scale * x) + // = diff_scale * shift + diff_scale * scale * x + caffe_gpu_axpby(count, diff_scale_ * scale_, bottom_data, + Dtype(0), bottom_diff); + if (shift_ != Dtype(0)) { + caffe_gpu_add_scalar(count, diff_scale_ * shift_, bottom_diff); + } + } else if (shift_ == Dtype(0)) { + // Special case for y = (scale * x)^power + // -> dy/dx = scale * power * (scale * x)^(power - 1) + // = scale * power * (scale * x)^power * (scale * x)^(-1) + // = power * y / x + const Dtype* top_data = top[0]->gpu_data(); + caffe_gpu_div(count, top_data, bottom_data, bottom_diff); + caffe_gpu_scal(count, power_, bottom_diff); + } else { + caffe_copy(count, bottom_data, bottom_diff); + if (scale_ != Dtype(1)) { + caffe_gpu_scal(count, scale_, bottom_diff); + } + if (shift_ != Dtype(0)) { + caffe_gpu_add_scalar(count, shift_, bottom_diff); + } + const Dtype* top_data = top[0]->gpu_data(); + caffe_gpu_div(count, top_data, bottom_diff, bottom_diff); + if (diff_scale_ != Dtype(1)) { + caffe_gpu_scal(count, diff_scale_, bottom_diff); + } + } + } + caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff); + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(PowerLayer); + + +} // namespace caffe diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp new file mode 100755 index 0000000..8183175 --- /dev/null +++ b/src/caffe/layers/prelu_layer.cpp @@ -0,0 +1,140 @@ +#include +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void PReLULayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + CHECK_GE(bottom[0]->num_axes(), 2) + << "Number of axes of bottom blob must be >=2."; + PReLUParameter prelu_param = this->layer_param().prelu_param(); + int channels = bottom[0]->channels(); + channel_shared_ = prelu_param.channel_shared(); + if (this->blobs_.size() > 0) { + LOG(INFO) << "Skipping parameter initialization"; + } else { + this->blobs_.resize(1); + if (channel_shared_) { + this->blobs_[0].reset(new Blob(vector(0))); + } else { + this->blobs_[0].reset(new Blob(vector(1, channels))); + } + shared_ptr > filler; + if (prelu_param.has_filler()) { + filler.reset(GetFiller(prelu_param.filler())); + } else { + FillerParameter filler_param; + filler_param.set_type("constant"); + filler_param.set_value(0.25); + filler.reset(GetFiller(filler_param)); + } + filler->Fill(this->blobs_[0].get()); + } + if (channel_shared_) { + CHECK_EQ(this->blobs_[0]->count(), 1) + << "Negative slope size is inconsistent with prototxt config"; + } else { + CHECK_EQ(this->blobs_[0]->count(), channels) + << "Negative slope size is inconsistent with prototxt config"; + } + + // Propagate gradients to the parameters (as directed by backward pass). + this->param_propagate_down_.resize(this->blobs_.size(), true); + multiplier_.Reshape(vector(1, bottom[0]->count(1))); + backward_buff_.Reshape(vector(1, bottom[0]->count(1))); + caffe_set(multiplier_.count(), Dtype(1), multiplier_.mutable_cpu_data()); +} + +template +void PReLULayer::Reshape(const vector*>& bottom, + const vector*>& top) { + CHECK_GE(bottom[0]->num_axes(), 2) + << "Number of axes of bottom blob must be >=2."; + top[0]->ReshapeLike(*bottom[0]); + if (bottom[0] == top[0]) { + // For in-place computation + bottom_memory_.ReshapeLike(*bottom[0]); + } +} + +template +void PReLULayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + const int count = bottom[0]->count(); + const int dim = bottom[0]->count(2); + const int channels = bottom[0]->channels(); + const Dtype* slope_data = this->blobs_[0]->cpu_data(); + + // For in-place computation + if (bottom[0] == top[0]) { + caffe_copy(count, bottom_data, bottom_memory_.mutable_cpu_data()); + } + + // if channel_shared, channel index in the following computation becomes + // always zero. + const int div_factor = channel_shared_ ? channels : 1; + for (int i = 0; i < count; ++i) { + int c = (i / dim) % channels / div_factor; + top_data[i] = std::max(bottom_data[i], Dtype(0)) + + slope_data[c] * std::min(bottom_data[i], Dtype(0)); + } +} + +template +void PReLULayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* slope_data = this->blobs_[0]->cpu_data(); + const Dtype* top_diff = top[0]->cpu_diff(); + const int count = bottom[0]->count(); + const int dim = bottom[0]->count(2); + const int channels = bottom[0]->channels(); + + // For in-place computation + if (top[0] == bottom[0]) { + bottom_data = bottom_memory_.cpu_data(); + } + + // if channel_shared, channel index in the following computation becomes + // always zero. + const int div_factor = channel_shared_ ? channels : 1; + + // Propagte to param + // Since to write bottom diff will affect top diff if top and bottom blobs + // are identical (in-place computaion), we first compute param backward to + // keep top_diff unchanged. + if (this->param_propagate_down_[0]) { + Dtype* slope_diff = this->blobs_[0]->mutable_cpu_diff(); + for (int i = 0; i < count; ++i) { + int c = (i / dim) % channels / div_factor; + slope_diff[c] += top_diff[i] * bottom_data[i] * (bottom_data[i] <= 0); + } + } + // Propagate to bottom + if (propagate_down[0]) { + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + for (int i = 0; i < count; ++i) { + int c = (i / dim) % channels / div_factor; + bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) + + slope_data[c] * (bottom_data[i] <= 0)); + } + } +} + + +#ifdef CPU_ONLY +STUB_GPU(PReLULayer); +#endif + +INSTANTIATE_CLASS(PReLULayer); +REGISTER_LAYER_CLASS(PReLU); + +} // namespace caffe diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu new file mode 100755 index 0000000..e1f2004 --- /dev/null +++ b/src/caffe/layers/prelu_layer.cu @@ -0,0 +1,128 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +// CUDA kernele for forward +template +__global__ void PReLUForward(const int n, const int channels, const int dim, + const Dtype* in, Dtype* out, const Dtype* slope_data, + const int div_factor) { + CUDA_KERNEL_LOOP(index, n) { + int c = (index / dim) % channels / div_factor; + out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; + } +} + +// CUDA kernel for bottom backward +template +__global__ void PReLUBackward(const int n, const int channels, const int dim, + const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, + const Dtype* slope_data, const int div_factor) { + CUDA_KERNEL_LOOP(index, n) { + int c = (index / dim) % channels / div_factor; + out_diff[index] = in_diff[index] * ((in_data[index] > 0) + + (in_data[index] <= 0) * slope_data[c]); + } +} + +// CUDA kernel for element-wise parameter backward +template +__global__ void PReLUParamBackward(const int n, const Dtype* in_diff, + const Dtype* in_data, Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) { + out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0); + } +} + +template +void PReLULayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + const int dim = bottom[0]->count(2); + const int channels = bottom[0]->channels(); + const Dtype* slope_data = this->blobs_[0]->gpu_data(); + const int div_factor = channel_shared_ ? channels : 1; + + // For in-place computation + if (top[0] == bottom[0]) { + caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data()); + } + + // NOLINT_NEXT_LINE(whitespace/operators) + PReLUForward<<>>( + count, channels, dim, bottom_data, top_data, slope_data, div_factor); + CUDA_POST_KERNEL_CHECK; +} + +template +void PReLULayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + const int count = bottom[0]->count(); + const int dim = bottom[0]->count(2); + const int channels = bottom[0]->channels(); + + // For in-place computation + if (top[0] == bottom[0]) { + bottom_data = bottom_memory_.gpu_data(); + } + + // Propagate to param + // Since to write bottom diff will affect top diff if top and bottom blobs + // are identical (in-place computaion), we first compute param backward to + // keep top_diff unchanged. + if (this->param_propagate_down_[0]) { + Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); + int cdim = channels * dim; + Dtype dsum = 0.; + for (int n = 0; n < bottom[0]->num(); ++n) { + // compute element-wise diff + // NOLINT_NEXT_LINE(whitespace/operators) + PReLUParamBackward<<>>( + cdim, top_diff + top[0]->offset(n), + bottom_data + bottom[0]->offset(n), + backward_buff_.mutable_gpu_diff()); + CUDA_POST_KERNEL_CHECK; + if (channel_shared_) { + Dtype d; + caffe_gpu_dot(channels * dim, backward_buff_.gpu_diff(), + multiplier_.gpu_data(), &d); + dsum += d; + } else { + caffe_gpu_gemv(CblasNoTrans, channels, dim, 1., + backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1., + slope_diff); + } + } + if (channel_shared_) { + caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); + } + } + // Propagate to bottom + if (propagate_down[0]) { + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const Dtype* slope_data = this->blobs_[0]->gpu_data(); + int div_factor = channel_shared_ ? channels : 1; + // NOLINT_NEXT_LINE(whitespace/operators) + PReLUBackward<<>>( + count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data, + div_factor); + CUDA_POST_KERNEL_CHECK; + } +} + + +INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer); + + +} // namespace caffe diff --git a/src/caffe/layers/reduction_layer.cpp b/src/caffe/layers/reduction_layer.cpp new file mode 100755 index 0000000..8ae6329 --- /dev/null +++ b/src/caffe/layers/reduction_layer.cpp @@ -0,0 +1,132 @@ +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void ReductionLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + op_ = this->layer_param_.reduction_param().operation(); +} + +template +void ReductionLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + axis_ = bottom[0]->CanonicalAxisIndex( + this->layer_param_.reduction_param().axis()); + // In the output, we'll keep all axes up to the reduction axis, but + // throw away any after that. + // Note: currently reducing along non-tail axes is not supported; otherwise, + // we'd need to also copy any axes following an "end_axis". + vector top_shape(bottom[0]->shape().begin(), + bottom[0]->shape().begin() + axis_); + top[0]->Reshape(top_shape); + num_ = bottom[0]->count(0, axis_); + dim_ = bottom[0]->count(axis_); + CHECK_EQ(num_, top[0]->count()); + if (op_ == ReductionParameter_ReductionOp_SUM || + op_ == ReductionParameter_ReductionOp_MEAN) { + vector sum_mult_shape(1, dim_); + sum_multiplier_.Reshape(sum_mult_shape); + caffe_set(dim_, Dtype(1), sum_multiplier_.mutable_cpu_data()); + } + coeff_ = this->layer_param().reduction_param().coeff(); + if (op_ == ReductionParameter_ReductionOp_MEAN) { + coeff_ /= dim_; + } +} + +template +void ReductionLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* mult_data = NULL; + if (sum_multiplier_.count() > 0) { + mult_data = sum_multiplier_.cpu_data(); + } + Dtype* top_data = top[0]->mutable_cpu_data(); + for (int i = 0; i < num_; ++i) { + switch (op_) { + case ReductionParameter_ReductionOp_SUM: + case ReductionParameter_ReductionOp_MEAN: + *top_data = caffe_cpu_dot(dim_, mult_data, bottom_data); + break; + case ReductionParameter_ReductionOp_ASUM: + *top_data = caffe_cpu_asum(dim_, bottom_data); + break; + case ReductionParameter_ReductionOp_SUMSQ: + *top_data = caffe_cpu_dot(dim_, bottom_data, bottom_data); + break; + default: + LOG(FATAL) << "Unknown reduction op: " + << ReductionParameter_ReductionOp_Name(op_); + } + bottom_data += dim_; + ++top_data; + } + if (coeff_ != Dtype(1)) { + // Reset the top_data pointer. + top_data = top[0]->mutable_cpu_data(); + caffe_scal(num_, coeff_, top_data); + } +} + +template +void ReductionLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (!propagate_down[0]) { return; } + // Get bottom_data, if needed. + const Dtype* bottom_data = NULL; + switch (op_) { + // Operations that don't need bottom_data + case ReductionParameter_ReductionOp_SUM: + case ReductionParameter_ReductionOp_MEAN: + break; + // Operations that need bottom_data + case ReductionParameter_ReductionOp_ASUM: + case ReductionParameter_ReductionOp_SUMSQ: + bottom_data = bottom[0]->cpu_data(); + break; + default: + LOG(FATAL) << "Unknown reduction op: " + << ReductionParameter_ReductionOp_Name(op_); + } + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + for (int i = 0; i < num_; ++i) { + const Dtype bottom_coeff = (*top_diff) * coeff_; + switch (op_) { + case ReductionParameter_ReductionOp_SUM: + case ReductionParameter_ReductionOp_MEAN: + caffe_set(dim_, bottom_coeff, bottom_diff); + break; + case ReductionParameter_ReductionOp_ASUM: + caffe_cpu_sign(dim_, bottom_data, bottom_diff); + caffe_scal(dim_, bottom_coeff, bottom_diff); + break; + case ReductionParameter_ReductionOp_SUMSQ: + caffe_cpu_scale(dim_, 2 * bottom_coeff, bottom_data, bottom_diff); + break; + default: + LOG(FATAL) << "Unknown reduction op: " + << ReductionParameter_ReductionOp_Name(op_); + } + bottom_data += dim_; + bottom_diff += dim_; + ++top_diff; + } +} + +#ifdef CPU_ONLY +STUB_GPU(ReductionLayer); +#endif + +INSTANTIATE_CLASS(ReductionLayer); +REGISTER_LAYER_CLASS(Reduction); + +} // namespace caffe diff --git a/src/caffe/layers/reduction_layer.cu b/src/caffe/layers/reduction_layer.cu new file mode 100755 index 0000000..2dbd3bc --- /dev/null +++ b/src/caffe/layers/reduction_layer.cu @@ -0,0 +1,93 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void ReductionLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + const Dtype* mult_data = NULL; + if (sum_multiplier_.count() > 0) { + mult_data = sum_multiplier_.gpu_data(); + } + Dtype* top_data = top[0]->mutable_cpu_data(); + for (int i = 0; i < num_; ++i) { + switch (op_) { + case ReductionParameter_ReductionOp_SUM: + case ReductionParameter_ReductionOp_MEAN: + caffe_gpu_dot(dim_, mult_data, bottom_data, top_data); + break; + case ReductionParameter_ReductionOp_ASUM: + caffe_gpu_asum(dim_, bottom_data, top_data); + break; + case ReductionParameter_ReductionOp_SUMSQ: + caffe_gpu_dot(dim_, bottom_data, bottom_data, top_data); + break; + default: + LOG(FATAL) << "Unknown reduction op: " + << ReductionParameter_ReductionOp_Name(op_); + } + bottom_data += dim_; + ++top_data; + } + if (coeff_ != Dtype(1)) { + // Reset the top_data pointer. + top_data = top[0]->mutable_gpu_data(); + caffe_gpu_scal(num_, coeff_, top_data); + } +} + +template +void ReductionLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (!propagate_down[0]) { return; } + // Get bottom_data, if needed. + const Dtype* bottom_data = NULL; + switch (op_) { + // Operations that don't need bottom_data + case ReductionParameter_ReductionOp_SUM: + case ReductionParameter_ReductionOp_MEAN: + break; + // Operations that need bottom_data + case ReductionParameter_ReductionOp_ASUM: + case ReductionParameter_ReductionOp_SUMSQ: + bottom_data = bottom[0]->gpu_data(); + break; + default: + LOG(FATAL) << "Unknown reduction op: " + << ReductionParameter_ReductionOp_Name(op_); + } + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + for (int i = 0; i < num_; ++i) { + const Dtype bottom_coeff = (*top_diff) * coeff_; + switch (op_) { + case ReductionParameter_ReductionOp_SUM: + case ReductionParameter_ReductionOp_MEAN: + caffe_gpu_set(dim_, bottom_coeff, bottom_diff); + break; + case ReductionParameter_ReductionOp_ASUM: + caffe_gpu_sign(dim_, bottom_data, bottom_diff); + caffe_gpu_scal(dim_, bottom_coeff, bottom_diff); + break; + case ReductionParameter_ReductionOp_SUMSQ: + caffe_gpu_scale(dim_, 2 * bottom_coeff, bottom_data, bottom_diff); + break; + default: + LOG(FATAL) << "Unknown reduction op: " + << ReductionParameter_ReductionOp_Name(op_); + } + bottom_data += dim_; + bottom_diff += dim_; + ++top_diff; + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(ReductionLayer); + +} // namespace caffe diff --git a/src/caffe/layers/relu_layer.cpp b/src/caffe/layers/relu_layer.cpp new file mode 100755 index 0000000..cc00319 --- /dev/null +++ b/src/caffe/layers/relu_layer.cpp @@ -0,0 +1,46 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void ReLULayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + const int count = bottom[0]->count(); + Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); + for (int i = 0; i < count; ++i) { + top_data[i] = std::max(bottom_data[i], Dtype(0)) + + negative_slope * std::min(bottom_data[i], Dtype(0)); + } +} + +template +void ReLULayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const int count = bottom[0]->count(); + Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); + for (int i = 0; i < count; ++i) { + bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) + + negative_slope * (bottom_data[i] <= 0)); + } + } +} + + +#ifdef CPU_ONLY +STUB_GPU(ReLULayer); +#endif + +INSTANTIATE_CLASS(ReLULayer); + +} // namespace caffe diff --git a/src/caffe/layers/relu_layer.cu b/src/caffe/layers/relu_layer.cu new file mode 100755 index 0000000..b8924c8 --- /dev/null +++ b/src/caffe/layers/relu_layer.cu @@ -0,0 +1,65 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, + Dtype negative_slope) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; + } +} + +template +void ReLULayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); + // NOLINT_NEXT_LINE(whitespace/operators) + ReLUForward<<>>( + count, bottom_data, top_data, negative_slope); + CUDA_POST_KERNEL_CHECK; + // << " count: " << count << " bottom_data: " + // << (unsigned long)bottom_data + // << " top_data: " << (unsigned long)top_data + // << " blocks: " << CAFFE_GET_BLOCKS(count) + // << " threads: " << CAFFE_CUDA_NUM_THREADS; +} + +template +__global__ void ReLUBackward(const int n, const Dtype* in_diff, + const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { + CUDA_KERNEL_LOOP(index, n) { + out_diff[index] = in_diff[index] * ((in_data[index] > 0) + + (in_data[index] <= 0) * negative_slope); + } +} + +template +void ReLULayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); + Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); + // NOLINT_NEXT_LINE(whitespace/operators) + ReLUBackward<<>>( + count, top_diff, bottom_data, bottom_diff, negative_slope); + CUDA_POST_KERNEL_CHECK; + } +} + + +INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer); + + +} // namespace caffe diff --git a/src/caffe/layers/reshape_layer.cpp b/src/caffe/layers/reshape_layer.cpp new file mode 100755 index 0000000..ffe970f --- /dev/null +++ b/src/caffe/layers/reshape_layer.cpp @@ -0,0 +1,95 @@ +#include + +#include "caffe/common_layers.hpp" +#include "caffe/layer.hpp" + +namespace caffe { + +template +void ReshapeLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + inferred_axis_ = -1; + copy_axes_.clear(); + const BlobShape& top_blob_shape = this->layer_param_.reshape_param().shape(); + const int top_num_axes = top_blob_shape.dim_size(); + constant_count_ = 1; + for (int i = 0; i < top_num_axes; ++i) { + const int top_dim = top_blob_shape.dim(i); + if (top_dim == 0) { + copy_axes_.push_back(i); + } else if (top_dim == -1) { + CHECK_EQ(inferred_axis_, -1) << "new shape contains multiple " + << "-1 dims; at most a single (1) value of -1 may be specified"; + inferred_axis_ = i; + } else { + constant_count_ *= top_dim; + } + } +} + +template +void ReshapeLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + const int input_start_axis = this->layer_param_.reshape_param().axis(); + const int start_axis = (input_start_axis >= 0) ? input_start_axis : + bottom[0]->num_axes() + input_start_axis + 1; + CHECK_GE(start_axis, 0) << "axis " << input_start_axis << " out of range"; + CHECK_LE(start_axis, bottom[0]->num_axes()) << "axis " << input_start_axis + << " out of range for " << bottom[0]->num_axes() << "-D input blob"; + const int num_axes = this->layer_param_.reshape_param().num_axes(); + CHECK_GE(num_axes, -1) << "num_axes must be >= 0, or -1 for all"; + const int end_axis = + (num_axes == -1) ? bottom[0]->num_axes() : (start_axis + num_axes); + CHECK_LE(end_axis, bottom[0]->num_axes()) + << "end_axis = axis + num_axes is out of range"; + const int num_axes_replaced = end_axis - start_axis; + const int num_axes_retained = bottom[0]->num_axes() - num_axes_replaced; + const BlobShape& top_blob_shape = this->layer_param_.reshape_param().shape(); + const int num_new_axes = top_blob_shape.dim_size(); + vector top_shape(num_axes_retained + num_new_axes); + int top_shape_index = 0; + for (int i = 0; i < start_axis; ++i) { + top_shape[top_shape_index++] = bottom[0]->shape(i); + } + for (int i = 0; i < num_new_axes; ++i) { + top_shape[top_shape_index++] = top_blob_shape.dim(i); + } + for (int i = end_axis; i < bottom[0]->num_axes(); ++i) { + top_shape[top_shape_index++] = bottom[0]->shape(i); + } + CHECK_EQ(top_shape_index, top_shape.size()); + for (int i = 0; i < copy_axes_.size(); ++i) { + const int copy_axis_index = copy_axes_[i]; + CHECK_GT(bottom[0]->num_axes(), start_axis + copy_axis_index) + << "new shape contains a 0, but there was no corresponding bottom axis " + << "to copy"; + top_shape[start_axis + copy_axis_index] = + bottom[0]->shape(start_axis + copy_axis_index); + } + if (inferred_axis_ >= 0) { + // A -1 dim was specified; infer the correct dimension by computing the + // product of the other dimensions. + int explicit_count = constant_count_; + explicit_count *= bottom[0]->count(0, start_axis); + explicit_count *= bottom[0]->count(end_axis); + for (int i = 0; i < copy_axes_.size(); ++i) { + const int copy_axis_index = copy_axes_[i]; + explicit_count *= top_shape[start_axis + copy_axis_index]; + } + CHECK_EQ(0, bottom[0]->count() % explicit_count) << "bottom count (" + << bottom[0]->count() << ") must be divisible by the product of " + << "the specified dimensions (" << explicit_count << ")"; + const int inferred_dim = bottom[0]->count() / explicit_count; + top_shape[start_axis + inferred_axis_] = inferred_dim; + } + top[0]->Reshape(top_shape); + CHECK_EQ(top[0]->count(), bottom[0]->count()) + << "output count must match input count"; + top[0]->ShareData(*bottom[0]); + top[0]->ShareDiff(*bottom[0]); +} + +INSTANTIATE_CLASS(ReshapeLayer); +REGISTER_LAYER_CLASS(Reshape); + +} // namespace caffe diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp new file mode 100755 index 0000000..cc236fe --- /dev/null +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp @@ -0,0 +1,80 @@ +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void SigmoidCrossEntropyLossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + sigmoid_bottom_vec_.clear(); + sigmoid_bottom_vec_.push_back(bottom[0]); + sigmoid_top_vec_.clear(); + sigmoid_top_vec_.push_back(sigmoid_output_.get()); + sigmoid_layer_->SetUp(sigmoid_bottom_vec_, sigmoid_top_vec_); +} + +template +void SigmoidCrossEntropyLossLayer::Reshape( + const vector*>& bottom, const vector*>& top) { + LossLayer::Reshape(bottom, top); + CHECK_EQ(bottom[0]->count(), bottom[1]->count()) << + "SIGMOID_CROSS_ENTROPY_LOSS layer inputs must have the same count."; + sigmoid_layer_->Reshape(sigmoid_bottom_vec_, sigmoid_top_vec_); +} + +template +void SigmoidCrossEntropyLossLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + // The forward pass computes the sigmoid outputs. + sigmoid_bottom_vec_[0] = bottom[0]; + sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); + // Compute the loss (negative log likelihood) + const int count = bottom[0]->count(); + const int num = bottom[0]->num(); + // Stable version of loss computation from input data + const Dtype* input_data = bottom[0]->cpu_data(); + const Dtype* target = bottom[1]->cpu_data(); + Dtype loss = 0; + for (int i = 0; i < count; ++i) { + loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) - + log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); + } + top[0]->mutable_cpu_data()[0] = loss / num; +} + +template +void SigmoidCrossEntropyLossLayer::Backward_cpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[1]) { + LOG(FATAL) << this->type() + << " Layer cannot backpropagate to label inputs."; + } + if (propagate_down[0]) { + // First, compute the diff + const int count = bottom[0]->count(); + const int num = bottom[0]->num(); + const Dtype* sigmoid_output_data = sigmoid_output_->cpu_data(); + const Dtype* target = bottom[1]->cpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + caffe_sub(count, sigmoid_output_data, target, bottom_diff); + // Scale down gradient + const Dtype loss_weight = top[0]->cpu_diff()[0]; + caffe_scal(count, loss_weight / num, bottom_diff); + } +} + +#ifdef CPU_ONLY +STUB_GPU_BACKWARD(SigmoidCrossEntropyLossLayer, Backward); +#endif + +INSTANTIATE_CLASS(SigmoidCrossEntropyLossLayer); +REGISTER_LAYER_CLASS(SigmoidCrossEntropyLoss); + +} // namespace caffe diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu new file mode 100755 index 0000000..547fa80 --- /dev/null +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu @@ -0,0 +1,37 @@ +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void SigmoidCrossEntropyLossLayer::Backward_gpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[1]) { + LOG(FATAL) << this->type() + << " Layer cannot backpropagate to label inputs."; + } + if (propagate_down[0]) { + // First, compute the diff + const int count = bottom[0]->count(); + const int num = bottom[0]->num(); + const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); + const Dtype* target = bottom[1]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + caffe_copy(count, sigmoid_output_data, bottom_diff); + caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); + // Scale down gradient + const Dtype loss_weight = top[0]->cpu_diff()[0]; + caffe_gpu_scal(count, loss_weight / num, bottom_diff); + } +} + +INSTANTIATE_LAYER_GPU_BACKWARD(SigmoidCrossEntropyLossLayer); + + +} // namespace caffe diff --git a/src/caffe/layers/sigmoid_layer.cpp b/src/caffe/layers/sigmoid_layer.cpp new file mode 100755 index 0000000..48c3849 --- /dev/null +++ b/src/caffe/layers/sigmoid_layer.cpp @@ -0,0 +1,49 @@ +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +inline Dtype sigmoid(Dtype x) { + return 1. / (1. + exp(-x)); +} + +template +void SigmoidLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + const int count = bottom[0]->count(); + for (int i = 0; i < count; ++i) { + top_data[i] = sigmoid(bottom_data[i]); + } +} + +template +void SigmoidLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* top_data = top[0]->cpu_data(); + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const int count = bottom[0]->count(); + for (int i = 0; i < count; ++i) { + const Dtype sigmoid_x = top_data[i]; + bottom_diff[i] = top_diff[i] * sigmoid_x * (1. - sigmoid_x); + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(SigmoidLayer); +#endif + +INSTANTIATE_CLASS(SigmoidLayer); + + +} // namespace caffe diff --git a/src/caffe/layers/sigmoid_layer.cu b/src/caffe/layers/sigmoid_layer.cu new file mode 100755 index 0000000..e1af065 --- /dev/null +++ b/src/caffe/layers/sigmoid_layer.cu @@ -0,0 +1,62 @@ +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +__global__ void SigmoidForward(const int n, const Dtype* in, Dtype* out) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = 1. / (1. + exp(-in[index])); + } +} + +template +void SigmoidLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + SigmoidForward<<>>( + count, bottom_data, top_data); + CUDA_POST_KERNEL_CHECK; + // << " count: " << count << " bottom_data: " + // << (unsigned long)bottom_data + // << " top_data: " << (unsigned long)top_data + // << " blocks: " << CAFFE_GET_BLOCKS(count) + // << " threads: " << CAFFE_CUDA_NUM_THREADS; +} + +template +__global__ void SigmoidBackward(const int n, const Dtype* in_diff, + const Dtype* out_data, Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) { + const Dtype sigmoid_x = out_data[index]; + out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); + } +} + +template +void SigmoidLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* top_data = top[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + SigmoidBackward<<>>( + count, top_diff, top_data, bottom_diff); + CUDA_POST_KERNEL_CHECK; + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(SigmoidLayer); + + +} // namespace caffe diff --git a/src/caffe/layers/silence_layer.cpp b/src/caffe/layers/silence_layer.cpp new file mode 100755 index 0000000..4abf9ef --- /dev/null +++ b/src/caffe/layers/silence_layer.cpp @@ -0,0 +1,27 @@ +#include + +#include "caffe/common_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void SilenceLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + for (int i = 0; i < bottom.size(); ++i) { + if (propagate_down[i]) { + caffe_set(bottom[i]->count(), Dtype(0), + bottom[i]->mutable_cpu_data()); + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(SilenceLayer); +#endif + +INSTANTIATE_CLASS(SilenceLayer); +REGISTER_LAYER_CLASS(Silence); + +} // namespace caffe diff --git a/src/caffe/layers/silence_layer.cu b/src/caffe/layers/silence_layer.cu new file mode 100755 index 0000000..8d044ee --- /dev/null +++ b/src/caffe/layers/silence_layer.cu @@ -0,0 +1,28 @@ +#include + +#include "caffe/common_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void SilenceLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + // Do nothing. +} + +template +void SilenceLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + for (int i = 0; i < bottom.size(); ++i) { + if (propagate_down[i]) { + caffe_gpu_set(bottom[i]->count(), Dtype(0), + bottom[i]->mutable_gpu_data()); + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer); + +} // namespace caffe diff --git a/src/caffe/layers/slice_layer.cpp b/src/caffe/layers/slice_layer.cpp new file mode 100755 index 0000000..e4418c9 --- /dev/null +++ b/src/caffe/layers/slice_layer.cpp @@ -0,0 +1,120 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void SliceLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + const SliceParameter& slice_param = this->layer_param_.slice_param(); + CHECK(!(slice_param.has_axis() && slice_param.has_slice_dim())) + << "Either axis or slice_dim should be specified; not both."; + slice_point_.clear(); + std::copy(slice_param.slice_point().begin(), + slice_param.slice_point().end(), + std::back_inserter(slice_point_)); +} + +template +void SliceLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + const int num_axes = bottom[0]->num_axes(); + const SliceParameter& slice_param = this->layer_param_.slice_param(); + if (slice_param.has_slice_dim()) { + slice_axis_ = static_cast(slice_param.slice_dim()); + // Don't allow negative indexing for slice_dim, a uint32 -- almost + // certainly unintended. + CHECK_GE(slice_axis_, 0) << "casting slice_dim from uint32 to int32 " + << "produced negative result; slice_dim must satisfy " + << "0 <= slice_dim < " << kMaxBlobAxes; + CHECK_LT(slice_axis_, num_axes) << "slice_dim out of range."; + } else { + slice_axis_ = bottom[0]->CanonicalAxisIndex(slice_param.axis()); + } + vector top_shape = bottom[0]->shape(); + const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + num_slices_ = bottom[0]->count(0, slice_axis_); + slice_size_ = bottom[0]->count(slice_axis_ + 1); + int count = 0; + if (slice_point_.size() != 0) { + CHECK_EQ(slice_point_.size(), top.size() - 1); + CHECK_LE(top.size(), bottom_slice_axis); + int prev = 0; + vector slices; + for (int i = 0; i < slice_point_.size(); ++i) { + CHECK_GT(slice_point_[i], prev); + slices.push_back(slice_point_[i] - prev); + prev = slice_point_[i]; + } + slices.push_back(bottom_slice_axis - prev); + for (int i = 0; i < top.size(); ++i) { + top_shape[slice_axis_] = slices[i]; + top[i]->Reshape(top_shape); + count += top[i]->count(); + } + } else { + CHECK_EQ(bottom_slice_axis % top.size(), 0) + << "Number of top blobs (" << top.size() << ") should evenly " + << "divide input slice axis (" << bottom_slice_axis << ")"; + top_shape[slice_axis_] = bottom_slice_axis / top.size(); + for (int i = 0; i < top.size(); ++i) { + top[i]->Reshape(top_shape); + count += top[i]->count(); + } + } + CHECK_EQ(count, bottom[0]->count()); +} + +template +void SliceLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + int offset_slice_axis = 0; + const Dtype* bottom_data = bottom[0]->cpu_data(); + const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + for (int i = 0; i < top.size(); ++i) { + Dtype* top_data = top[i]->mutable_cpu_data(); + const int top_slice_axis = top[i]->shape(slice_axis_); + for (int n = 0; n < num_slices_; ++n) { + const int top_offset = n * top_slice_axis * slice_size_; + const int bottom_offset = + (n * bottom_slice_axis + offset_slice_axis) * slice_size_; + caffe_copy(top_slice_axis * slice_size_, + bottom_data + bottom_offset, top_data + top_offset); + } + offset_slice_axis += top_slice_axis; + } +} + +template +void SliceLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (!propagate_down[0]) { return; } + int offset_slice_axis = 0; + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->cpu_diff(); + const int top_slice_axis = top[i]->shape(slice_axis_); + for (int n = 0; n < num_slices_; ++n) { + const int top_offset = n * top_slice_axis * slice_size_; + const int bottom_offset = + (n * bottom_slice_axis + offset_slice_axis) * slice_size_; + caffe_copy(top_slice_axis * slice_size_, + top_diff + top_offset, bottom_diff + bottom_offset); + } + offset_slice_axis += top_slice_axis; + } +} + +#ifdef CPU_ONLY +STUB_GPU(SliceLayer); +#endif + +INSTANTIATE_CLASS(SliceLayer); +REGISTER_LAYER_CLASS(Slice); + +} // namespace caffe diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu new file mode 100755 index 0000000..796841d --- /dev/null +++ b/src/caffe/layers/slice_layer.cu @@ -0,0 +1,71 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +__global__ void Slice(const int nthreads, const Dtype* in_data, + const bool forward, const int num_slices, const int slice_size, + const int bottom_slice_axis, const int top_slice_axis, + const int offset_slice_axis, Dtype* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int total_slice_size = slice_size * top_slice_axis; + const int slice_num = index / total_slice_size; + const int slice_index = index % total_slice_size; + const int bottom_index = slice_index + + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; + if (forward) { + out_data[index] = in_data[bottom_index]; + } else { + out_data[bottom_index] = in_data[index]; + } + } +} + +template +void SliceLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + int offset_slice_axis = 0; + const Dtype* bottom_data = bottom[0]->gpu_data(); + const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + const bool kForward = true; + for (int i = 0; i < top.size(); ++i) { + Dtype* top_data = top[i]->mutable_gpu_data(); + const int top_slice_axis = top[i]->shape(slice_axis_); + const int top_slice_size = top_slice_axis * slice_size_; + const int nthreads = top_slice_size * num_slices_; + Slice // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, bottom_data, kForward, num_slices_, slice_size_, + bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data); + offset_slice_axis += top_slice_axis; + } +} + +template +void SliceLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (!propagate_down[0]) { return; } + int offset_slice_axis = 0; + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + const bool kForward = false; + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + const int top_slice_axis = top[i]->shape(slice_axis_); + const int top_slice_size = top_slice_axis * slice_size_; + const int nthreads = top_slice_size * num_slices_; + Slice // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, top_diff, kForward, num_slices_, slice_size_, + bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff); + offset_slice_axis += top_slice_axis; + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer); + +} // namespace caffe diff --git a/src/caffe/layers/softmax_layer.cpp b/src/caffe/layers/softmax_layer.cpp new file mode 100755 index 0000000..04712c9 --- /dev/null +++ b/src/caffe/layers/softmax_layer.cpp @@ -0,0 +1,96 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void SoftmaxLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + softmax_axis_ = + bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis()); + top[0]->ReshapeLike(*bottom[0]); + vector mult_dims(1, bottom[0]->shape(softmax_axis_)); + sum_multiplier_.Reshape(mult_dims); + Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); + caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); + outer_num_ = bottom[0]->count(0, softmax_axis_); + inner_num_ = bottom[0]->count(softmax_axis_ + 1); + vector scale_dims = bottom[0]->shape(); + scale_dims[softmax_axis_] = 1; + scale_.Reshape(scale_dims); +} + +template +void SoftmaxLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + Dtype* scale_data = scale_.mutable_cpu_data(); + int channels = bottom[0]->shape(softmax_axis_); + int dim = bottom[0]->count() / outer_num_; + caffe_copy(bottom[0]->count(), bottom_data, top_data); + // We need to subtract the max to avoid numerical issues, compute the exp, + // and then normalize. + for (int i = 0; i < outer_num_; ++i) { + // initialize scale_data to the first plane + caffe_copy(inner_num_, bottom_data + i * dim, scale_data); + for (int j = 0; j < channels; j++) { + for (int k = 0; k < inner_num_; k++) { + scale_data[k] = std::max(scale_data[k], + bottom_data[i * dim + j * inner_num_ + k]); + } + } + // subtraction + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, inner_num_, + 1, -1., sum_multiplier_.cpu_data(), scale_data, 1., top_data); + // exponentiation + caffe_exp(dim, top_data, top_data); + // sum after exp + caffe_cpu_gemv(CblasTrans, channels, inner_num_, 1., + top_data, sum_multiplier_.cpu_data(), 0., scale_data); + // division + for (int j = 0; j < channels; j++) { + caffe_div(inner_num_, top_data, scale_data, top_data); + top_data += inner_num_; + } + } +} + +template +void SoftmaxLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + const Dtype* top_diff = top[0]->cpu_diff(); + const Dtype* top_data = top[0]->cpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + Dtype* scale_data = scale_.mutable_cpu_data(); + int channels = top[0]->shape(softmax_axis_); + int dim = top[0]->count() / outer_num_; + caffe_copy(top[0]->count(), top_diff, bottom_diff); + for (int i = 0; i < outer_num_; ++i) { + // compute dot(top_diff, top_data) and subtract them from the bottom diff + for (int k = 0; k < inner_num_; ++k) { + scale_data[k] = caffe_cpu_strided_dot(channels, + bottom_diff + i * dim + k, inner_num_, + top_data + i * dim + k, inner_num_); + } + // subtraction + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, inner_num_, 1, + -1., sum_multiplier_.cpu_data(), scale_data, 1., bottom_diff + i * dim); + } + // elementwise multiplication + caffe_mul(top[0]->count(), bottom_diff, top_data, bottom_diff); +} + + +#ifdef CPU_ONLY +STUB_GPU(SoftmaxLayer); +#endif + +INSTANTIATE_CLASS(SoftmaxLayer); + +} // namespace caffe diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu new file mode 100755 index 0000000..1f9c3a4 --- /dev/null +++ b/src/caffe/layers/softmax_layer.cu @@ -0,0 +1,149 @@ +#include +#include +#include + +#include "thrust/device_vector.h" + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +__global__ void kernel_channel_max(const int num, const int channels, + const int spatial_dim, const Dtype* data, Dtype* out) { + CUDA_KERNEL_LOOP(index, num * spatial_dim) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype maxval = -FLT_MAX; + for (int c = 0; c < channels; ++c) { + maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); + } + out[index] = maxval; + } +} + +template +__global__ void kernel_channel_subtract(const int count, + const int num, const int channels, + const int spatial_dim, const Dtype* channel_max, Dtype* data) { + CUDA_KERNEL_LOOP(index, count) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] -= channel_max[n * spatial_dim + s]; + } +} + +template +__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { + CUDA_KERNEL_LOOP(index, count) { + out[index] = exp(data[index]); + } +} + +template +__global__ void kernel_channel_sum(const int num, const int channels, + const int spatial_dim, const Dtype* data, Dtype* channel_sum) { + CUDA_KERNEL_LOOP(index, num * spatial_dim) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype sum = 0; + for (int c = 0; c < channels; ++c) { + sum += data[(n * channels + c) * spatial_dim + s]; + } + channel_sum[index] = sum; + } +} + +template +__global__ void kernel_channel_div(const int count, + const int num, const int channels, + const int spatial_dim, const Dtype* channel_sum, Dtype* data) { + CUDA_KERNEL_LOOP(index, count) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] /= channel_sum[n * spatial_dim + s]; + } +} + +template +__global__ void kernel_channel_dot(const int num, const int channels, + const int spatial_dim, const Dtype* data_1, const Dtype* data_2, + Dtype* channel_dot) { + CUDA_KERNEL_LOOP(index, num * spatial_dim) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype dot = 0; + for (int c = 0; c < channels; ++c) { + dot += (data_1[(n * channels + c) * spatial_dim + s] + * data_2[(n * channels + c) * spatial_dim + s]); + } + channel_dot[index] = dot; + } +} + +template +void SoftmaxLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + Dtype* scale_data = scale_.mutable_gpu_data(); + int count = bottom[0]->count(); + int channels = top[0]->shape(softmax_axis_); + caffe_copy(count, bottom_data, top_data); + // We need to subtract the max to avoid numerical issues, compute the exp, + // and then normalize. + // compute max + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_max<<>>(outer_num_, channels, inner_num_, top_data, + scale_data); + // subtract + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_subtract<<>>(count, outer_num_, channels, inner_num_, + scale_data, top_data); + // exponentiate + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_exp<<>>( + count, top_data, top_data); + // sum after exp + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_sum<<>>(outer_num_, channels, inner_num_, top_data, + scale_data); + // divide + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_div<<>>(count, outer_num_, channels, inner_num_, + scale_data, top_data); +} + +template +void SoftmaxLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* top_data = top[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + Dtype* scale_data = scale_.mutable_gpu_data(); + int count = top[0]->count(); + int channels = top[0]->shape(softmax_axis_); + caffe_copy(count, top_diff, bottom_diff); + // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_dot<<>>(outer_num_, channels, inner_num_, + top_diff, top_data, scale_data); + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_subtract<<>>(count, outer_num_, channels, inner_num_, + scale_data, bottom_diff); + // elementwise multiplication + caffe_gpu_mul(top[0]->count(), bottom_diff, top_data, bottom_diff); +} + +INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); + + +} // namespace caffe diff --git a/src/caffe/layers/softmax_loss_layer.cpp b/src/caffe/layers/softmax_loss_layer.cpp new file mode 100755 index 0000000..ba312f6 --- /dev/null +++ b/src/caffe/layers/softmax_loss_layer.cpp @@ -0,0 +1,130 @@ +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/layer_factory.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void SoftmaxWithLossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + LayerParameter softmax_param(this->layer_param_); + softmax_param.set_type("Softmax"); + softmax_layer_ = LayerRegistry::CreateLayer(softmax_param); + softmax_bottom_vec_.clear(); + softmax_bottom_vec_.push_back(bottom[0]); + softmax_top_vec_.clear(); + softmax_top_vec_.push_back(&prob_); + softmax_layer_->SetUp(softmax_bottom_vec_, softmax_top_vec_); + + has_ignore_label_ = + this->layer_param_.loss_param().has_ignore_label(); + if (has_ignore_label_) { + ignore_label_ = this->layer_param_.loss_param().ignore_label(); + } + normalize_ = this->layer_param_.loss_param().normalize(); +} + +template +void SoftmaxWithLossLayer::Reshape( + const vector*>& bottom, const vector*>& top) { + LossLayer::Reshape(bottom, top); + softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_); + softmax_axis_ = + bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis()); + outer_num_ = bottom[0]->count(0, softmax_axis_); + inner_num_ = bottom[0]->count(softmax_axis_ + 1); + CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count()) + << "Number of labels must match number of predictions; " + << "e.g., if softmax axis == 1 and prediction shape is (N, C, H, W), " + << "label count (number of labels) must be N*H*W, " + << "with integer values in {0, 1, ..., C-1}."; + if (top.size() >= 2) { + // softmax output + top[1]->ReshapeLike(*bottom[0]); + } +} + +template +void SoftmaxWithLossLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + // The forward pass computes the softmax prob values. + softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); + const Dtype* prob_data = prob_.cpu_data(); + const Dtype* label = bottom[1]->cpu_data(); + int dim = prob_.count() / outer_num_; + int count = 0; + Dtype loss = 0; + for (int i = 0; i < outer_num_; ++i) { + for (int j = 0; j < inner_num_; j++) { + const int label_value = static_cast(label[i * inner_num_ + j]); + if (has_ignore_label_ && label_value == ignore_label_) { + continue; + } + DCHECK_GE(label_value, 0); + DCHECK_LT(label_value, prob_.shape(softmax_axis_)); + loss -= log(std::max(prob_data[i * dim + label_value * inner_num_ + j], + Dtype(FLT_MIN))); + ++count; + } + } + if (normalize_) { + top[0]->mutable_cpu_data()[0] = loss / count; + } else { + top[0]->mutable_cpu_data()[0] = loss / outer_num_; + } + if (top.size() == 2) { + top[1]->ShareData(prob_); + } +} + +template +void SoftmaxWithLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (propagate_down[1]) { + LOG(FATAL) << this->type() + << " Layer cannot backpropagate to label inputs."; + } + if (propagate_down[0]) { + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const Dtype* prob_data = prob_.cpu_data(); + caffe_copy(prob_.count(), prob_data, bottom_diff); + const Dtype* label = bottom[1]->cpu_data(); + int dim = prob_.count() / outer_num_; + int count = 0; + for (int i = 0; i < outer_num_; ++i) { + for (int j = 0; j < inner_num_; ++j) { + const int label_value = static_cast(label[i * inner_num_ + j]); + if (has_ignore_label_ && label_value == ignore_label_) { + for (int c = 0; c < bottom[0]->shape(softmax_axis_); ++c) { + bottom_diff[i * dim + c * inner_num_ + j] = 0; + } + } else { + bottom_diff[i * dim + label_value * inner_num_ + j] -= 1; + ++count; + } + } + } + // Scale gradient + const Dtype loss_weight = top[0]->cpu_diff()[0]; + if (normalize_) { + caffe_scal(prob_.count(), loss_weight / count, bottom_diff); + } else { + caffe_scal(prob_.count(), loss_weight / outer_num_, bottom_diff); + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(SoftmaxWithLossLayer); +#endif + +INSTANTIATE_CLASS(SoftmaxWithLossLayer); +REGISTER_LAYER_CLASS(SoftmaxWithLoss); + +} // namespace caffe diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu new file mode 100755 index 0000000..7e0f3da --- /dev/null +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -0,0 +1,125 @@ +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +__global__ void SoftmaxLossForwardGPU(const int nthreads, + const Dtype* prob_data, const Dtype* label, Dtype* loss, + const int num, const int dim, const int spatial_dim, + const bool has_ignore_label_, const int ignore_label_, + Dtype* counts) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = static_cast(label[n * spatial_dim + s]); + if (has_ignore_label_ && label_value == ignore_label_) { + loss[index] = 0; + counts[index] = 0; + } else { + loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], + Dtype(FLT_MIN))); + counts[index] = 1; + } + } +} + +template +void SoftmaxWithLossLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); + const Dtype* prob_data = prob_.gpu_data(); + const Dtype* label = bottom[1]->gpu_data(); + const int dim = prob_.count() / outer_num_; + const int nthreads = outer_num_ * inner_num_; + // Since this memory is not used for anything until it is overwritten + // on the backward pass, we use it here to avoid having to allocate new GPU + // memory to accumulate intermediate results in the kernel. + Dtype* loss_data = bottom[0]->mutable_gpu_diff(); + // Similarly, this memory is never used elsewhere, and thus we can use it + // to avoid having to allocate additional GPU memory. + Dtype* counts = prob_.mutable_gpu_diff(); + // NOLINT_NEXT_LINE(whitespace/operators) + SoftmaxLossForwardGPU<<>>(nthreads, prob_data, label, loss_data, + outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); + Dtype loss; + caffe_gpu_asum(nthreads, loss_data, &loss); + if (normalize_) { + Dtype count; + caffe_gpu_asum(nthreads, counts, &count); + loss /= count; + } else { + loss /= outer_num_; + } + top[0]->mutable_cpu_data()[0] = loss; + if (top.size() == 2) { + top[1]->ShareData(prob_); + } +} + +template +__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, + const Dtype* label, Dtype* bottom_diff, const int num, const int dim, + const int spatial_dim, const bool has_ignore_label_, + const int ignore_label_, Dtype* counts) { + const int channels = dim / spatial_dim; + + CUDA_KERNEL_LOOP(index, nthreads) { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = static_cast(label[n * spatial_dim + s]); + + if (has_ignore_label_ && label_value == ignore_label_) { + for (int c = 0; c < channels; ++c) { + bottom_diff[n * dim + c * spatial_dim + s] = 0; + } + counts[index] = 0; + } else { + bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; + counts[index] = 1; + } + } +} + +template +void SoftmaxWithLossLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (propagate_down[1]) { + LOG(FATAL) << this->type() + << " Layer cannot backpropagate to label inputs."; + } + if (propagate_down[0]) { + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const Dtype* prob_data = prob_.gpu_data(); + const Dtype* top_data = top[0]->gpu_data(); + caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); + const Dtype* label = bottom[1]->gpu_data(); + const int dim = prob_.count() / outer_num_; + const int nthreads = outer_num_ * inner_num_; + // Since this memory is never used for anything else, + // we use to to avoid allocating new GPU memory. + Dtype* counts = prob_.mutable_gpu_diff(); + // NOLINT_NEXT_LINE(whitespace/operators) + SoftmaxLossBackwardGPU<<>>(nthreads, top_data, label, bottom_diff, + outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); + const Dtype loss_weight = top[0]->cpu_diff()[0]; + if (normalize_) { + Dtype count; + caffe_gpu_asum(nthreads, counts, &count); + caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); + } else { + caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff); + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); + +} // namespace caffe diff --git a/src/caffe/layers/split_layer.cpp b/src/caffe/layers/split_layer.cpp new file mode 100755 index 0000000..272cb59 --- /dev/null +++ b/src/caffe/layers/split_layer.cpp @@ -0,0 +1,60 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void SplitLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + count_ = bottom[0]->count(); + for (int i = 0; i < top.size(); ++i) { + // Do not allow in-place computation in the SplitLayer. Instead, share data + // by reference in the forward pass, and keep separate diff allocations in + // the backward pass. (Technically, it should be possible to share the diff + // blob of the first split output with the input, but this seems to cause + // some strange effects in practice...) + CHECK_NE(top[i], bottom[0]) << this->type() << " Layer does not " + "allow in-place computation."; + top[i]->ReshapeLike(*bottom[0]); + CHECK_EQ(count_, top[i]->count()); + } +} + +template +void SplitLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + for (int i = 0; i < top.size(); ++i) { + top[i]->ShareData(*bottom[0]); + } +} + +template +void SplitLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (!propagate_down[0]) { return; } + if (top.size() == 1) { + caffe_copy(count_, top[0]->cpu_diff(), bottom[0]->mutable_cpu_diff()); + return; + } + caffe_add(count_, top[0]->cpu_diff(), top[1]->cpu_diff(), + bottom[0]->mutable_cpu_diff()); + // Add remaining top blob diffs. + for (int i = 2; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + caffe_axpy(count_, Dtype(1.), top_diff, bottom_diff); + } +} + + +#ifdef CPU_ONLY +STUB_GPU(SplitLayer); +#endif + +INSTANTIATE_CLASS(SplitLayer); +REGISTER_LAYER_CLASS(Split); + +} // namespace caffe diff --git a/src/caffe/layers/split_layer.cu b/src/caffe/layers/split_layer.cu new file mode 100755 index 0000000..a4f5df2 --- /dev/null +++ b/src/caffe/layers/split_layer.cu @@ -0,0 +1,38 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void SplitLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + for (int i = 0; i < top.size(); ++i) { + top[i]->ShareData(*bottom[0]); + } +} + +template +void SplitLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (!propagate_down[0]) { return; } + if (top.size() == 1) { + caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff()); + return; + } + caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(), + bottom[0]->mutable_gpu_diff()); + // Add remaining top blob diffs. + for (int i = 2; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); + } +} + + +INSTANTIATE_LAYER_GPU_FUNCS(SplitLayer); + +} // namespace caffe diff --git a/src/caffe/layers/spp_layer.cpp b/src/caffe/layers/spp_layer.cpp new file mode 100755 index 0000000..795dd71 --- /dev/null +++ b/src/caffe/layers/spp_layer.cpp @@ -0,0 +1,193 @@ +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +using std::min; +using std::max; + +template +LayerParameter SPPLayer::GetPoolingParam(const int pyramid_level, + const int bottom_h, const int bottom_w, const SPPParameter spp_param) { + LayerParameter pooling_param; + int num_bins = pow(2, pyramid_level); + + // find padding and kernel size so that the pooling is + // performed across the entire image + int kernel_h = ceil(bottom_h / static_cast(num_bins)); + // remainder_h is the min number of pixels that need to be padded before + // entire image height is pooled over with the chosen kernel dimension + int remainder_h = kernel_h * num_bins - bottom_h; + // pooling layer pads (2 * pad_h) pixels on the top and bottom of the + // image. + int pad_h = (remainder_h + 1) / 2; + + // similar logic for width + int kernel_w = ceil(bottom_w / static_cast(num_bins)); + int remainder_w = kernel_w * num_bins - bottom_w; + int pad_w = (remainder_w + 1) / 2; + + pooling_param.mutable_pooling_param()->set_pad_h(pad_h); + pooling_param.mutable_pooling_param()->set_pad_w(pad_w); + pooling_param.mutable_pooling_param()->set_kernel_h(kernel_h); + pooling_param.mutable_pooling_param()->set_kernel_w(kernel_w); + pooling_param.mutable_pooling_param()->set_stride_h(kernel_h); + pooling_param.mutable_pooling_param()->set_stride_w(kernel_w); + + switch (spp_param.pool()) { + case SPPParameter_PoolMethod_MAX: + pooling_param.mutable_pooling_param()->set_pool( + PoolingParameter_PoolMethod_MAX); + break; + case SPPParameter_PoolMethod_AVE: + pooling_param.mutable_pooling_param()->set_pool( + PoolingParameter_PoolMethod_AVE); + break; + case SPPParameter_PoolMethod_STOCHASTIC: + pooling_param.mutable_pooling_param()->set_pool( + PoolingParameter_PoolMethod_STOCHASTIC); + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } + + return pooling_param; +} + +template +void SPPLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + SPPParameter spp_param = this->layer_param_.spp_param(); + + bottom_h_ = bottom[0]->height(); + bottom_w_ = bottom[0]->width(); + CHECK_GT(bottom_h_, 0) << "Input dimensions cannot be zero."; + CHECK_GT(bottom_w_, 0) << "Input dimensions cannot be zero."; + + pyramid_height_ = spp_param.pyramid_height(); + split_top_vec_.clear(); + pooling_bottom_vecs_.clear(); + pooling_layers_.clear(); + pooling_top_vecs_.clear(); + pooling_outputs_.clear(); + flatten_layers_.clear(); + flatten_top_vecs_.clear(); + flatten_outputs_.clear(); + concat_bottom_vec_.clear(); + + // split layer output holders setup + for (int i = 0; i < pyramid_height_; i++) { + split_top_vec_.push_back(new Blob()); + } + + // split layer setup + LayerParameter split_param; + split_layer_.reset(new SplitLayer(split_param)); + split_layer_->SetUp(bottom, split_top_vec_); + + for (int i = 0; i < pyramid_height_; i++) { + // pooling layer input holders setup + pooling_bottom_vecs_.push_back(new vector*>); + pooling_bottom_vecs_[i]->push_back(split_top_vec_[i]); + + // pooling layer output holders setup + pooling_outputs_.push_back(new Blob()); + pooling_top_vecs_.push_back(new vector*>); + pooling_top_vecs_[i]->push_back(pooling_outputs_[i]); + + // pooling layer setup + LayerParameter pooling_param = GetPoolingParam( + i, bottom_h_, bottom_w_, spp_param); + + pooling_layers_.push_back(shared_ptr > ( + new PoolingLayer(pooling_param))); + pooling_layers_[i]->SetUp(*pooling_bottom_vecs_[i], *pooling_top_vecs_[i]); + + // flatten layer output holders setup + flatten_outputs_.push_back(new Blob()); + flatten_top_vecs_.push_back(new vector*>); + flatten_top_vecs_[i]->push_back(flatten_outputs_[i]); + + // flatten layer setup + LayerParameter flatten_param; + flatten_layers_.push_back(new FlattenLayer(flatten_param)); + flatten_layers_[i]->SetUp(*pooling_top_vecs_[i], *flatten_top_vecs_[i]); + + // concat layer input holders setup + concat_bottom_vec_.push_back(flatten_outputs_[i]); + } + + // concat layer setup + LayerParameter concat_param; + concat_layer_.reset(new ConcatLayer(concat_param)); + concat_layer_->SetUp(concat_bottom_vec_, top); +} + +template +void SPPLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; + channels_ = bottom[0]->channels(); + bottom_h_ = bottom[0]->height(); + bottom_w_ = bottom[0]->width(); + SPPParameter spp_param = this->layer_param_.spp_param(); + split_layer_->Reshape(bottom, split_top_vec_); + for (int i = 0; i < pyramid_height_; i++) { + LayerParameter pooling_param = GetPoolingParam( + i, bottom_h_, bottom_w_, spp_param); + + pooling_layers_[i].reset( + new PoolingLayer(pooling_param)); + pooling_layers_[i]->SetUp( + *pooling_bottom_vecs_[i], *pooling_top_vecs_[i]); + pooling_layers_[i]->Reshape( + *pooling_bottom_vecs_[i], *pooling_top_vecs_[i]); + flatten_layers_[i]->Reshape( + *pooling_top_vecs_[i], *flatten_top_vecs_[i]); + } + concat_layer_->Reshape(concat_bottom_vec_, top); +} + +template +void SPPLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + split_layer_->Forward(bottom, split_top_vec_); + for (int i = 0; i < pyramid_height_; i++) { + pooling_layers_[i]->Forward( + *pooling_bottom_vecs_[i], *pooling_top_vecs_[i]); + flatten_layers_[i]->Forward( + *pooling_top_vecs_[i], *flatten_top_vecs_[i]); + } + concat_layer_->Forward(concat_bottom_vec_, top); +} + +template +void SPPLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (!propagate_down[0]) { + return; + } + vector concat_propagate_down(pyramid_height_, true); + concat_layer_->Backward(top, concat_propagate_down, concat_bottom_vec_); + for (int i = 0; i < pyramid_height_; i++) { + flatten_layers_[i]->Backward( + *flatten_top_vecs_[i], propagate_down, *pooling_top_vecs_[i]); + pooling_layers_[i]->Backward( + *pooling_top_vecs_[i], propagate_down, *pooling_bottom_vecs_[i]); + } + split_layer_->Backward(split_top_vec_, propagate_down, bottom); +} + + +INSTANTIATE_CLASS(SPPLayer); +REGISTER_LAYER_CLASS(SPP); + +} // namespace caffe diff --git a/src/caffe/layers/tanh_layer.cpp b/src/caffe/layers/tanh_layer.cpp new file mode 100755 index 0000000..ee5ed77 --- /dev/null +++ b/src/caffe/layers/tanh_layer.cpp @@ -0,0 +1,46 @@ +// TanH neuron activation function layer. +// Adapted from ReLU layer code written by Yangqing Jia + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void TanHLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + const int count = bottom[0]->count(); + for (int i = 0; i < count; ++i) { + top_data[i] = tanh(bottom_data[i]); + } +} + +template +void TanHLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* top_data = top[0]->cpu_data(); + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const int count = bottom[0]->count(); + Dtype tanhx; + for (int i = 0; i < count; ++i) { + tanhx = top_data[i]; + bottom_diff[i] = top_diff[i] * (1 - tanhx * tanhx); + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(TanHLayer); +#endif + +INSTANTIATE_CLASS(TanHLayer); + +} // namespace caffe diff --git a/src/caffe/layers/tanh_layer.cu b/src/caffe/layers/tanh_layer.cu new file mode 100755 index 0000000..ccd6e63 --- /dev/null +++ b/src/caffe/layers/tanh_layer.cu @@ -0,0 +1,59 @@ +// TanH neuron activation function layer. +// Adapted from ReLU layer code written by Yangqing Jia + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +__global__ void TanHForward(const int n, const Dtype* in, Dtype* out) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = tanh(in[index]); + } +} + +template +void TanHLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + TanHForward<<>>( + count, bottom_data, top_data); + CUDA_POST_KERNEL_CHECK; +} + +template +__global__ void TanHBackward(const int n, const Dtype* in_diff, + const Dtype* out_data, Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) { + Dtype tanhx = out_data[index]; + out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); + } +} + +template +void TanHLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* top_data = top[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + TanHBackward<<>>( + count, top_diff, top_data, bottom_diff); + CUDA_POST_KERNEL_CHECK; + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(TanHLayer); + + +} // namespace caffe diff --git a/src/caffe/layers/threshold_layer.cpp b/src/caffe/layers/threshold_layer.cpp new file mode 100755 index 0000000..2365e7b --- /dev/null +++ b/src/caffe/layers/threshold_layer.cpp @@ -0,0 +1,34 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + + +namespace caffe { + +template +void ThresholdLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + NeuronLayer::LayerSetUp(bottom, top); + threshold_ = this->layer_param_.threshold_param().threshold(); +} + +template +void ThresholdLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + const int count = bottom[0]->count(); + for (int i = 0; i < count; ++i) { + top_data[i] = (bottom_data[i] > threshold_) ? Dtype(1) : Dtype(0); + } +} + +#ifdef CPU_ONLY +STUB_GPU_FORWARD(ThresholdLayer, Forward); +#endif + +INSTANTIATE_CLASS(ThresholdLayer); +REGISTER_LAYER_CLASS(Threshold); + +} // namespace caffe diff --git a/src/caffe/layers/threshold_layer.cu b/src/caffe/layers/threshold_layer.cu new file mode 100755 index 0000000..bfa7f15 --- /dev/null +++ b/src/caffe/layers/threshold_layer.cu @@ -0,0 +1,33 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +__global__ void ThresholdForward(const int n, const Dtype threshold, + const Dtype* in, Dtype* out) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = in[index] > threshold ? 1 : 0; + } +} + +template +void ThresholdLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + ThresholdForward<<>>( + count, threshold_, bottom_data, top_data); + CUDA_POST_KERNEL_CHECK; +} + + +INSTANTIATE_LAYER_GPU_FORWARD(ThresholdLayer); + + +} // namespace caffe diff --git a/src/caffe/layers/window_data_layer.cpp b/src/caffe/layers/window_data_layer.cpp new file mode 100755 index 0000000..f637f2e --- /dev/null +++ b/src/caffe/layers/window_data_layer.cpp @@ -0,0 +1,470 @@ +#include +#include + +#include +#include +#include +#include +#include + +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui.hpp" +#include "opencv2/imgproc/imgproc.hpp" + +#include "caffe/common.hpp" +#include "caffe/data_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/benchmark.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/rng.hpp" + +// caffe.proto > LayerParameter > WindowDataParameter +// 'source' field specifies the window_file +// 'crop_size' indicates the desired warped size + +namespace caffe { + +template +WindowDataLayer::~WindowDataLayer() { + this->StopInternalThread(); +} + +template +void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, + const vector*>& top) { + // LayerSetUp runs through the window_file and creates two structures + // that hold windows: one for foreground (object) windows and one + // for background (non-object) windows. We use an overlap threshold + // to decide which is which. + + // window_file format + // repeated: + // # image_index + // img_path (abs path) + // channels + // height + // width + // num_windows + // class_index overlap x1 y1 x2 y2 + + LOG(INFO) << "Window data layer:" << std::endl + << " foreground (object) overlap threshold: " + << this->layer_param_.window_data_param().fg_threshold() << std::endl + << " background (non-object) overlap threshold: " + << this->layer_param_.window_data_param().bg_threshold() << std::endl + << " foreground sampling fraction: " + << this->layer_param_.window_data_param().fg_fraction() << std::endl + << " cache_images: " + << this->layer_param_.window_data_param().cache_images() << std::endl + << " root_folder: " + << this->layer_param_.window_data_param().root_folder(); + + cache_images_ = this->layer_param_.window_data_param().cache_images(); + string root_folder = this->layer_param_.window_data_param().root_folder(); + + const bool prefetch_needs_rand = + this->transform_param_.mirror() || + this->transform_param_.crop_size(); + if (prefetch_needs_rand) { + const unsigned int prefetch_rng_seed = caffe_rng_rand(); + prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed)); + } else { + prefetch_rng_.reset(); + } + + std::ifstream infile(this->layer_param_.window_data_param().source().c_str()); + CHECK(infile.good()) << "Failed to open window file " + << this->layer_param_.window_data_param().source() << std::endl; + + map label_hist; + label_hist.insert(std::make_pair(0, 0)); + + string hashtag; + int image_index, channels; + if (!(infile >> hashtag >> image_index)) { + LOG(FATAL) << "Window file is empty"; + } + do { + CHECK_EQ(hashtag, "#"); + // read image path + string image_path; + infile >> image_path; + image_path = root_folder + image_path; + // read image dimensions + vector image_size(3); + infile >> image_size[0] >> image_size[1] >> image_size[2]; + channels = image_size[0]; + image_database_.push_back(std::make_pair(image_path, image_size)); + + if (cache_images_) { + Datum datum; + if (!ReadFileToDatum(image_path, &datum)) { + LOG(ERROR) << "Could not open or find file " << image_path; + return; + } + image_database_cache_.push_back(std::make_pair(image_path, datum)); + } + // read each box + int num_windows; + infile >> num_windows; + const float fg_threshold = + this->layer_param_.window_data_param().fg_threshold(); + const float bg_threshold = + this->layer_param_.window_data_param().bg_threshold(); + for (int i = 0; i < num_windows; ++i) { + int label, x1, y1, x2, y2; + float overlap; + infile >> label >> overlap >> x1 >> y1 >> x2 >> y2; + + vector window(WindowDataLayer::NUM); + window[WindowDataLayer::IMAGE_INDEX] = image_index; + window[WindowDataLayer::LABEL] = label; + window[WindowDataLayer::OVERLAP] = overlap; + window[WindowDataLayer::X1] = x1; + window[WindowDataLayer::Y1] = y1; + window[WindowDataLayer::X2] = x2; + window[WindowDataLayer::Y2] = y2; + + // add window to foreground list or background list + if (overlap >= fg_threshold) { + int label = window[WindowDataLayer::LABEL]; + CHECK_GT(label, 0); + fg_windows_.push_back(window); + label_hist.insert(std::make_pair(label, 0)); + label_hist[label]++; + } else if (overlap < bg_threshold) { + // background window, force label and overlap to 0 + window[WindowDataLayer::LABEL] = 0; + window[WindowDataLayer::OVERLAP] = 0; + bg_windows_.push_back(window); + label_hist[0]++; + } + } + + if (image_index % 100 == 0) { + LOG(INFO) << "num: " << image_index << " " + << image_path << " " + << image_size[0] << " " + << image_size[1] << " " + << image_size[2] << " " + << "windows to process: " << num_windows; + } + } while (infile >> hashtag >> image_index); + + LOG(INFO) << "Number of images: " << image_index+1; + + for (map::iterator it = label_hist.begin(); + it != label_hist.end(); ++it) { + LOG(INFO) << "class " << it->first << " has " << label_hist[it->first] + << " samples"; + } + + LOG(INFO) << "Amount of context padding: " + << this->layer_param_.window_data_param().context_pad(); + + LOG(INFO) << "Crop mode: " + << this->layer_param_.window_data_param().crop_mode(); + + // image + const int crop_size = this->transform_param_.crop_size(); + CHECK_GT(crop_size, 0); + const int batch_size = this->layer_param_.window_data_param().batch_size(); + top[0]->Reshape(batch_size, channels, crop_size, crop_size); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) + this->prefetch_[i].data_.Reshape( + batch_size, channels, crop_size, crop_size); + + LOG(INFO) << "output data size: " << top[0]->num() << "," + << top[0]->channels() << "," << top[0]->height() << "," + << top[0]->width(); + // label + vector label_shape(1, batch_size); + top[1]->Reshape(label_shape); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].label_.Reshape(label_shape); + } + + // data mean + has_mean_file_ = this->transform_param_.has_mean_file(); + has_mean_values_ = this->transform_param_.mean_value_size() > 0; + if (has_mean_file_) { + const string& mean_file = + this->transform_param_.mean_file(); + LOG(INFO) << "Loading mean file from: " << mean_file; + BlobProto blob_proto; + ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); + data_mean_.FromProto(blob_proto); + } + if (has_mean_values_) { + CHECK(has_mean_file_ == false) << + "Cannot specify mean_file and mean_value at the same time"; + for (int c = 0; c < this->transform_param_.mean_value_size(); ++c) { + mean_values_.push_back(this->transform_param_.mean_value(c)); + } + CHECK(mean_values_.size() == 1 || mean_values_.size() == channels) << + "Specify either 1 mean_value or as many as channels: " << channels; + if (channels > 1 && mean_values_.size() == 1) { + // Replicate the mean_value for simplicity + for (int c = 1; c < channels; ++c) { + mean_values_.push_back(mean_values_[0]); + } + } + } +} + +template +unsigned int WindowDataLayer::PrefetchRand() { + CHECK(prefetch_rng_); + caffe::rng_t* prefetch_rng = + static_cast(prefetch_rng_->generator()); + return (*prefetch_rng)(); +} + +// This function is called on prefetch thread +template +void WindowDataLayer::load_batch(Batch* batch) { + // At each iteration, sample N windows where N*p are foreground (object) + // windows and N*(1-p) are background (non-object) windows + CPUTimer batch_timer; + batch_timer.Start(); + double read_time = 0; + double trans_time = 0; + CPUTimer timer; + Dtype* top_data = batch->data_.mutable_cpu_data(); + Dtype* top_label = batch->label_.mutable_cpu_data(); + const Dtype scale = this->layer_param_.window_data_param().scale(); + const int batch_size = this->layer_param_.window_data_param().batch_size(); + const int context_pad = this->layer_param_.window_data_param().context_pad(); + const int crop_size = this->transform_param_.crop_size(); + const bool mirror = this->transform_param_.mirror(); + const float fg_fraction = + this->layer_param_.window_data_param().fg_fraction(); + Dtype* mean = NULL; + int mean_off = 0; + int mean_width = 0; + int mean_height = 0; + if (this->has_mean_file_) { + mean = this->data_mean_.mutable_cpu_data(); + mean_off = (this->data_mean_.width() - crop_size) / 2; + mean_width = this->data_mean_.width(); + mean_height = this->data_mean_.height(); + } + cv::Size cv_crop_size(crop_size, crop_size); + const string& crop_mode = this->layer_param_.window_data_param().crop_mode(); + + bool use_square = (crop_mode == "square") ? true : false; + + // zero out batch + caffe_set(batch->data_.count(), Dtype(0), top_data); + + const int num_fg = static_cast(static_cast(batch_size) + * fg_fraction); + const int num_samples[2] = { batch_size - num_fg, num_fg }; + + int item_id = 0; + // sample from bg set then fg set + for (int is_fg = 0; is_fg < 2; ++is_fg) { + for (int dummy = 0; dummy < num_samples[is_fg]; ++dummy) { + // sample a window + timer.Start(); + const unsigned int rand_index = PrefetchRand(); + vector window = (is_fg) ? + fg_windows_[rand_index % fg_windows_.size()] : + bg_windows_[rand_index % bg_windows_.size()]; + + bool do_mirror = mirror && PrefetchRand() % 2; + + // load the image containing the window + pair > image = + image_database_[window[WindowDataLayer::IMAGE_INDEX]]; + + cv::Mat cv_img; + if (this->cache_images_) { + pair image_cached = + image_database_cache_[window[WindowDataLayer::IMAGE_INDEX]]; + cv_img = DecodeDatumToCVMat(image_cached.second, true); + } else { + cv_img = cv::imread(image.first, CV_LOAD_IMAGE_COLOR); + if (!cv_img.data) { + LOG(ERROR) << "Could not open or find file " << image.first; + return; + } + } + read_time += timer.MicroSeconds(); + timer.Start(); + const int channels = cv_img.channels(); + + // crop window out of image and warp it + int x1 = window[WindowDataLayer::X1]; + int y1 = window[WindowDataLayer::Y1]; + int x2 = window[WindowDataLayer::X2]; + int y2 = window[WindowDataLayer::Y2]; + + int pad_w = 0; + int pad_h = 0; + if (context_pad > 0 || use_square) { + // scale factor by which to expand the original region + // such that after warping the expanded region to crop_size x crop_size + // there's exactly context_pad amount of padding on each side + Dtype context_scale = static_cast(crop_size) / + static_cast(crop_size - 2*context_pad); + + // compute the expanded region + Dtype half_height = static_cast(y2-y1+1)/2.0; + Dtype half_width = static_cast(x2-x1+1)/2.0; + Dtype center_x = static_cast(x1) + half_width; + Dtype center_y = static_cast(y1) + half_height; + if (use_square) { + if (half_height > half_width) { + half_width = half_height; + } else { + half_height = half_width; + } + } + x1 = static_cast(round(center_x - half_width*context_scale)); + x2 = static_cast(round(center_x + half_width*context_scale)); + y1 = static_cast(round(center_y - half_height*context_scale)); + y2 = static_cast(round(center_y + half_height*context_scale)); + + // the expanded region may go outside of the image + // so we compute the clipped (expanded) region and keep track of + // the extent beyond the image + int unclipped_height = y2-y1+1; + int unclipped_width = x2-x1+1; + int pad_x1 = std::max(0, -x1); + int pad_y1 = std::max(0, -y1); + int pad_x2 = std::max(0, x2 - cv_img.cols + 1); + int pad_y2 = std::max(0, y2 - cv_img.rows + 1); + // clip bounds + x1 = x1 + pad_x1; + x2 = x2 - pad_x2; + y1 = y1 + pad_y1; + y2 = y2 - pad_y2; + CHECK_GT(x1, -1); + CHECK_GT(y1, -1); + CHECK_LT(x2, cv_img.cols); + CHECK_LT(y2, cv_img.rows); + + int clipped_height = y2-y1+1; + int clipped_width = x2-x1+1; + + // scale factors that would be used to warp the unclipped + // expanded region + Dtype scale_x = + static_cast(crop_size)/static_cast(unclipped_width); + Dtype scale_y = + static_cast(crop_size)/static_cast(unclipped_height); + + // size to warp the clipped expanded region to + cv_crop_size.width = + static_cast(round(static_cast(clipped_width)*scale_x)); + cv_crop_size.height = + static_cast(round(static_cast(clipped_height)*scale_y)); + pad_x1 = static_cast(round(static_cast(pad_x1)*scale_x)); + pad_x2 = static_cast(round(static_cast(pad_x2)*scale_x)); + pad_y1 = static_cast(round(static_cast(pad_y1)*scale_y)); + pad_y2 = static_cast(round(static_cast(pad_y2)*scale_y)); + + pad_h = pad_y1; + // if we're mirroring, we mirror the padding too (to be pedantic) + if (do_mirror) { + pad_w = pad_x2; + } else { + pad_w = pad_x1; + } + + // ensure that the warped, clipped region plus the padding fits in the + // crop_size x crop_size image (it might not due to rounding) + if (pad_h + cv_crop_size.height > crop_size) { + cv_crop_size.height = crop_size - pad_h; + } + if (pad_w + cv_crop_size.width > crop_size) { + cv_crop_size.width = crop_size - pad_w; + } + } + + cv::Rect roi(x1, y1, x2-x1+1, y2-y1+1); + cv::Mat cv_cropped_img = cv_img(roi); + cv::resize(cv_cropped_img, cv_cropped_img, + cv_crop_size, 0, 0, cv::INTER_LINEAR); + + // horizontal flip at random + if (do_mirror) { + cv::flip(cv_cropped_img, cv_cropped_img, 1); + } + + // copy the warped window into top_data + for (int h = 0; h < cv_cropped_img.rows; ++h) { + const uchar* ptr = cv_cropped_img.ptr(h); + int img_index = 0; + for (int w = 0; w < cv_cropped_img.cols; ++w) { + for (int c = 0; c < channels; ++c) { + int top_index = ((item_id * channels + c) * crop_size + h + pad_h) + * crop_size + w + pad_w; + // int top_index = (c * height + h) * width + w; + Dtype pixel = static_cast(ptr[img_index++]); + if (this->has_mean_file_) { + int mean_index = (c * mean_height + h + mean_off + pad_h) + * mean_width + w + mean_off + pad_w; + top_data[top_index] = (pixel - mean[mean_index]) * scale; + } else { + if (this->has_mean_values_) { + top_data[top_index] = (pixel - this->mean_values_[c]) * scale; + } else { + top_data[top_index] = pixel * scale; + } + } + } + } + } + trans_time += timer.MicroSeconds(); + // get window label + top_label[item_id] = window[WindowDataLayer::LABEL]; + + #if 0 + // useful debugging code for dumping transformed windows to disk + string file_id; + std::stringstream ss; + ss << PrefetchRand(); + ss >> file_id; + std::ofstream inf((string("dump/") + file_id + + string("_info.txt")).c_str(), std::ofstream::out); + inf << image.first << std::endl + << window[WindowDataLayer::X1]+1 << std::endl + << window[WindowDataLayer::Y1]+1 << std::endl + << window[WindowDataLayer::X2]+1 << std::endl + << window[WindowDataLayer::Y2]+1 << std::endl + << do_mirror << std::endl + << top_label[item_id] << std::endl + << is_fg << std::endl; + inf.close(); + std::ofstream top_data_file((string("dump/") + file_id + + string("_data.txt")).c_str(), + std::ofstream::out | std::ofstream::binary); + for (int c = 0; c < channels; ++c) { + for (int h = 0; h < crop_size; ++h) { + for (int w = 0; w < crop_size; ++w) { + top_data_file.write(reinterpret_cast( + &top_data[((item_id * channels + c) * crop_size + h) + * crop_size + w]), + sizeof(Dtype)); + } + } + } + top_data_file.close(); + #endif + + item_id++; + } + } + batch_timer.Stop(); + DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; + DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; + DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; +} + +INSTANTIATE_CLASS(WindowDataLayer); +REGISTER_LAYER_CLASS(WindowData); + +} // namespace caffe diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp new file mode 100755 index 0000000..8bfcf2d --- /dev/null +++ b/src/caffe/net.cpp @@ -0,0 +1,1096 @@ +#include +#include +#include +#include +#include +#include + +#include "hdf5.h" + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/net.hpp" +#include "caffe/parallel.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/hdf5.hpp" +#include "caffe/util/insert_splits.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/upgrade_proto.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +Net::Net(const NetParameter& param, const Net* root_net) + : root_net_(root_net) { + Init(param); +} + +template +Net::Net(const string& param_file, Phase phase, const Net* root_net) + : root_net_(root_net) { + NetParameter param; + ReadNetParamsFromTextFileOrDie(param_file, ¶m); + param.mutable_state()->set_phase(phase); + Init(param); +} + +template +void Net::Init(const NetParameter& in_param) { + CHECK(Caffe::root_solver() || root_net_) + << "root_net_ needs to be set for all non-root solvers"; + // Set phase from the state. + phase_ = in_param.state().phase(); + // Filter layers based on their include/exclude rules and + // the current NetState. + NetParameter filtered_param; + FilterNet(in_param, &filtered_param); + if (Caffe::root_solver()) { + LOG(INFO) << "Initializing net from parameters: " << std::endl + << filtered_param.DebugString(); + } + // Create a copy of filtered_param with splits added where necessary. + NetParameter param; + InsertSplits(filtered_param, ¶m); + // Basically, build all the layers and set up their connections. + name_ = param.name(); + map blob_name_to_idx; + set available_blobs; + CHECK(param.input_dim_size() == 0 || param.input_shape_size() == 0) + << "Must specify either input_shape OR deprecated input_dim, not both."; + if (param.input_dim_size() > 0) { + // Deprecated 4D dimensions. + CHECK_EQ(param.input_size() * 4, param.input_dim_size()) + << "Incorrect input blob dimension specifications."; + } else { + CHECK_EQ(param.input_size(), param.input_shape_size()) + << "Exactly one input_shape must be specified per input."; + } + memory_used_ = 0; + // set the input blobs + for (int input_id = 0; input_id < param.input_size(); ++input_id) { + const int layer_id = -1; // inputs have fake layer ID -1 + AppendTop(param, layer_id, input_id, &available_blobs, &blob_name_to_idx); + } + DLOG_IF(INFO, Caffe::root_solver()) + << "Memory required for data: " << memory_used_ * sizeof(Dtype); + // For each layer, set up its input and output + bottom_vecs_.resize(param.layer_size()); + top_vecs_.resize(param.layer_size()); + bottom_id_vecs_.resize(param.layer_size()); + param_id_vecs_.resize(param.layer_size()); + top_id_vecs_.resize(param.layer_size()); + bottom_need_backward_.resize(param.layer_size()); + for (int layer_id = 0; layer_id < param.layer_size(); ++layer_id) { + // For non-root solvers, whether this layer is shared from root_net_. + bool share_from_root = !Caffe::root_solver() + && root_net_->layers_[layer_id]->ShareInParallel(); + // Inherit phase from net if unset. + if (!param.layer(layer_id).has_phase()) { + param.mutable_layer(layer_id)->set_phase(phase_); + } + // Setup layer. + const LayerParameter& layer_param = param.layer(layer_id); + if (layer_param.propagate_down_size() > 0) { + CHECK_EQ(layer_param.propagate_down_size(), + layer_param.bottom_size()) + << "propagate_down param must be specified " + << "either 0 or bottom_size times "; + } + if (share_from_root) { + LOG(INFO) << "Sharing layer " << layer_param.name() << " from root net"; + layers_.push_back(root_net_->layers_[layer_id]); + layers_[layer_id]->SetShared(true); + } else { + layers_.push_back(LayerRegistry::CreateLayer(layer_param)); + } + layer_names_.push_back(layer_param.name()); + if (Caffe::root_solver()) { + LOG(INFO) << "Creating Layer " << layer_param.name(); + } + bool need_backward = false; + + // Figure out this layer's input and output + for (int bottom_id = 0; bottom_id < layer_param.bottom_size(); + ++bottom_id) { + const int blob_id = AppendBottom(param, layer_id, bottom_id, + &available_blobs, &blob_name_to_idx); + // If a blob needs backward, this layer should provide it. + need_backward |= blob_need_backward_[blob_id]; + } + int num_top = layer_param.top_size(); + for (int top_id = 0; top_id < num_top; ++top_id) { + AppendTop(param, layer_id, top_id, &available_blobs, &blob_name_to_idx); + } + // If the layer specifies that AutoTopBlobs() -> true and the LayerParameter + // specified fewer than the required number (as specified by + // ExactNumTopBlobs() or MinTopBlobs()), allocate them here. + Layer* layer = layers_[layer_id].get(); + if (layer->AutoTopBlobs()) { + const int needed_num_top = + std::max(layer->MinTopBlobs(), layer->ExactNumTopBlobs()); + for (; num_top < needed_num_top; ++num_top) { + // Add "anonymous" top blobs -- do not modify available_blobs or + // blob_name_to_idx as we don't want these blobs to be usable as input + // to other layers. + AppendTop(param, layer_id, num_top, NULL, NULL); + } + } + // After this layer is connected, set it up. + if (share_from_root) { + // Set up size of top blobs using root_net_ + const vector*>& base_top = root_net_->top_vecs_[layer_id]; + const vector*>& this_top = this->top_vecs_[layer_id]; + for (int top_id = 0; top_id < base_top.size(); ++top_id) { + this_top[top_id]->ReshapeLike(*base_top[top_id]); + LOG(INFO) << "Created top blob " << top_id << " (shape: " + << this_top[top_id]->shape_string() << ") for shared layer " + << layer_param.name(); + } + } else { + layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]); + } + if (Caffe::root_solver()) { + LOG(INFO) << "Setting up " << layer_names_[layer_id]; + } + for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { + if (blob_loss_weights_.size() <= top_id_vecs_[layer_id][top_id]) { + blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0)); + } + blob_loss_weights_[top_id_vecs_[layer_id][top_id]] = layer->loss(top_id); + if (Caffe::root_solver()) { + LOG(INFO) << "Top shape: " + << top_vecs_[layer_id][top_id]->shape_string(); + } + if (layer->loss(top_id)) { + if (Caffe::root_solver()) { + LOG(INFO) << " with loss weight " << layer->loss(top_id); + } + } + memory_used_ += top_vecs_[layer_id][top_id]->count(); + } + if (Caffe::root_solver()) { + DLOG(INFO) << "Memory required for data: " + << memory_used_ * sizeof(Dtype); + } + const int param_size = layer_param.param_size(); + const int num_param_blobs = layers_[layer_id]->blobs().size(); + CHECK_LE(param_size, num_param_blobs) + << "Too many params specified for layer " << layer_param.name(); + ParamSpec default_param_spec; + for (int param_id = 0; param_id < num_param_blobs; ++param_id) { + const ParamSpec* param_spec = (param_id < param_size) ? + &layer_param.param(param_id) : &default_param_spec; + const bool param_need_backward = param_spec->lr_mult() > 0; + need_backward |= param_need_backward; + layers_[layer_id]->set_param_propagate_down(param_id, + param_need_backward); + } + for (int param_id = 0; param_id < num_param_blobs; ++param_id) { + AppendParam(param, layer_id, param_id); + } + // Finally, set the backward flag + layer_need_backward_.push_back(need_backward); + if (need_backward) { + for (int top_id = 0; top_id < top_id_vecs_[layer_id].size(); ++top_id) { + blob_need_backward_[top_id_vecs_[layer_id][top_id]] = true; + } + } + } + // Go through the net backwards to determine which blobs contribute to the + // loss. We can skip backward computation for blobs that don't contribute + // to the loss. + // Also checks if all bottom blobs don't need backward computation (possible + // because the skip_propagate_down param) and so we can skip bacward + // computation for the entire layer + set blobs_under_loss; + set blobs_skip_backp; + for (int layer_id = layers_.size() - 1; layer_id >= 0; --layer_id) { + bool layer_contributes_loss = false; + bool layer_skip_propagate_down = true; + for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { + const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; + if (layers_[layer_id]->loss(top_id) || + (blobs_under_loss.find(blob_name) != blobs_under_loss.end())) { + layer_contributes_loss = true; + } + if (blobs_skip_backp.find(blob_name) == blobs_skip_backp.end()) { + layer_skip_propagate_down = false; + } + if (layer_contributes_loss && !layer_skip_propagate_down) + break; + } + // If this layer can skip backward computation, also all his bottom blobs + // don't need backpropagation + if (layer_need_backward_[layer_id] && layer_skip_propagate_down) { + layer_need_backward_[layer_id] = false; + for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); + ++bottom_id) { + bottom_need_backward_[layer_id][bottom_id] = false; + } + } + if (!layer_contributes_loss) { layer_need_backward_[layer_id] = false; } + if (layer_need_backward_[layer_id]) { + if (Caffe::root_solver()) { + LOG(INFO) << layer_names_[layer_id] << " needs backward computation."; + } + } else { + if (Caffe::root_solver()) { + LOG(INFO) << layer_names_[layer_id] + << " does not need backward computation."; + } + } + for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); + ++bottom_id) { + if (layer_contributes_loss) { + const string& blob_name = + blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; + blobs_under_loss.insert(blob_name); + } else { + bottom_need_backward_[layer_id][bottom_id] = false; + } + if (!bottom_need_backward_[layer_id][bottom_id]) { + const string& blob_name = + blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; + blobs_skip_backp.insert(blob_name); + } + } + } + // Handle force_backward if needed. + if (param.force_backward()) { + for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) { + layer_need_backward_[layer_id] = true; + for (int bottom_id = 0; + bottom_id < bottom_need_backward_[layer_id].size(); ++bottom_id) { + bottom_need_backward_[layer_id][bottom_id] = + bottom_need_backward_[layer_id][bottom_id] || + layers_[layer_id]->AllowForceBackward(bottom_id); + blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] = + blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] || + bottom_need_backward_[layer_id][bottom_id]; + } + for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); + ++param_id) { + layers_[layer_id]->set_param_propagate_down(param_id, true); + } + } + } + // In the end, all remaining blobs are considered output blobs. + for (set::iterator it = available_blobs.begin(); + it != available_blobs.end(); ++it) { + if (Caffe::root_solver()) { + LOG(INFO) << "This network produces output " << *it; + } + net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get()); + net_output_blob_indices_.push_back(blob_name_to_idx[*it]); + } + for (size_t blob_id = 0; blob_id < blob_names_.size(); ++blob_id) { + blob_names_index_[blob_names_[blob_id]] = blob_id; + } + for (size_t layer_id = 0; layer_id < layer_names_.size(); ++layer_id) { + layer_names_index_[layer_names_[layer_id]] = layer_id; + } + ShareWeights(); + debug_info_ = param.debug_info(); + if (Caffe::root_solver()) { + LOG(INFO) << "Network initialization done."; + LOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); + } +} + +template +void Net::FilterNet(const NetParameter& param, + NetParameter* param_filtered) { + NetState net_state(param.state()); + param_filtered->CopyFrom(param); + param_filtered->clear_layer(); + for (int i = 0; i < param.layer_size(); ++i) { + const LayerParameter& layer_param = param.layer(i); + const string& layer_name = layer_param.name(); + CHECK(layer_param.include_size() == 0 || layer_param.exclude_size() == 0) + << "Specify either include rules or exclude rules; not both."; + // If no include rules are specified, the layer is included by default and + // only excluded if it meets one of the exclude rules. + bool layer_included = (layer_param.include_size() == 0); + for (int j = 0; layer_included && j < layer_param.exclude_size(); ++j) { + if (StateMeetsRule(net_state, layer_param.exclude(j), layer_name)) { + layer_included = false; + } + } + for (int j = 0; !layer_included && j < layer_param.include_size(); ++j) { + if (StateMeetsRule(net_state, layer_param.include(j), layer_name)) { + layer_included = true; + } + } + if (layer_included) { + param_filtered->add_layer()->CopyFrom(layer_param); + } + } +} + +template +bool Net::StateMeetsRule(const NetState& state, + const NetStateRule& rule, const string& layer_name) { + // Check whether the rule is broken due to phase. + if (rule.has_phase()) { + if (rule.phase() != state.phase()) { + if (Caffe::root_solver()) { + LOG(INFO) << "The NetState phase (" << state.phase() + << ") differed from the phase (" << rule.phase() + << ") specified by a rule in layer " << layer_name; + } + return false; + } + } + // Check whether the rule is broken due to min level. + if (rule.has_min_level()) { + if (state.level() < rule.min_level()) { + if (Caffe::root_solver()) { + LOG(INFO) << "The NetState level (" << state.level() + << ") is above the min_level (" << rule.min_level() + << ") specified by a rule in layer " << layer_name; + } + return false; + } + } + // Check whether the rule is broken due to max level. + if (rule.has_max_level()) { + if (state.level() > rule.max_level()) { + if (Caffe::root_solver()) { + LOG(INFO) << "The NetState level (" << state.level() + << ") is above the max_level (" << rule.max_level() + << ") specified by a rule in layer " << layer_name; + } + return false; + } + } + // Check whether the rule is broken due to stage. The NetState must + // contain ALL of the rule's stages to meet it. + for (int i = 0; i < rule.stage_size(); ++i) { + // Check that the NetState contains the rule's ith stage. + bool has_stage = false; + for (int j = 0; !has_stage && j < state.stage_size(); ++j) { + if (rule.stage(i) == state.stage(j)) { has_stage = true; } + } + if (!has_stage) { + if (Caffe::root_solver()) { + LOG(INFO) << "The NetState did not contain stage '" << rule.stage(i) + << "' specified by a rule in layer " << layer_name; + } + return false; + } + } + // Check whether the rule is broken due to not_stage. The NetState must + // contain NONE of the rule's not_stages to meet it. + for (int i = 0; i < rule.not_stage_size(); ++i) { + // Check that the NetState contains the rule's ith not_stage. + bool has_stage = false; + for (int j = 0; !has_stage && j < state.stage_size(); ++j) { + if (rule.not_stage(i) == state.stage(j)) { has_stage = true; } + } + if (has_stage) { + if (Caffe::root_solver()) { + LOG(INFO) << "The NetState contained a not_stage '" << rule.not_stage(i) + << "' specified by a rule in layer " << layer_name; + } + return false; + } + } + return true; +} + +// Helper for Net::Init: add a new input or top blob to the net. (Inputs have +// layer_id == -1, tops have layer_id >= 0.) +template +void Net::AppendTop(const NetParameter& param, const int layer_id, + const int top_id, set* available_blobs, + map* blob_name_to_idx) { + shared_ptr layer_param((layer_id >= 0) ? + (new LayerParameter(param.layer(layer_id))) : NULL); + const string& blob_name = layer_param ? + (layer_param->top_size() > top_id ? + layer_param->top(top_id) : "(automatic)") : param.input(top_id); + // Check if we are doing in-place computation + if (blob_name_to_idx && layer_param && layer_param->bottom_size() > top_id && + blob_name == layer_param->bottom(top_id)) { + // In-place computation + if (Caffe::root_solver()) { + LOG(INFO) << layer_param->name() << " -> " << blob_name << " (in-place)"; + } + top_vecs_[layer_id].push_back(blobs_[(*blob_name_to_idx)[blob_name]].get()); + top_id_vecs_[layer_id].push_back((*blob_name_to_idx)[blob_name]); + } else if (blob_name_to_idx && + blob_name_to_idx->find(blob_name) != blob_name_to_idx->end()) { + // If we are not doing in-place computation but have duplicated blobs, + // raise an error. + LOG(FATAL) << "Duplicate blobs produced by multiple sources."; + } else { + // Normal output. + if (Caffe::root_solver()) { + if (layer_param) { + LOG(INFO) << layer_param->name() << " -> " << blob_name; + } else { + LOG(INFO) << "Input " << top_id << " -> " << blob_name; + } + } + shared_ptr > blob_pointer(new Blob()); + const int blob_id = blobs_.size(); + blobs_.push_back(blob_pointer); + blob_names_.push_back(blob_name); + blob_need_backward_.push_back(false); + if (blob_name_to_idx) { (*blob_name_to_idx)[blob_name] = blob_id; } + if (layer_id == -1) { + // Set the (explicitly specified) dimensions of the input blob. + if (param.input_dim_size() > 0) { + blob_pointer->Reshape(param.input_dim(top_id * 4), + param.input_dim(top_id * 4 + 1), + param.input_dim(top_id * 4 + 2), + param.input_dim(top_id * 4 + 3)); + } else { + blob_pointer->Reshape(param.input_shape(top_id)); + } + net_input_blob_indices_.push_back(blob_id); + net_input_blobs_.push_back(blob_pointer.get()); + } else { + top_id_vecs_[layer_id].push_back(blob_id); + top_vecs_[layer_id].push_back(blob_pointer.get()); + } + } + if (available_blobs) { available_blobs->insert(blob_name); } +} + +// Helper for Net::Init: add a new bottom blob to the net. +template +int Net::AppendBottom(const NetParameter& param, const int layer_id, + const int bottom_id, set* available_blobs, + map* blob_name_to_idx) { + const LayerParameter& layer_param = param.layer(layer_id); + const string& blob_name = layer_param.bottom(bottom_id); + if (available_blobs->find(blob_name) == available_blobs->end()) { + LOG(FATAL) << "Unknown blob input " << blob_name + << " (at index " << bottom_id << ") to layer " << layer_id; + } + const int blob_id = (*blob_name_to_idx)[blob_name]; + if (Caffe::root_solver()) { + LOG(INFO) << layer_names_[layer_id] << " <- " << blob_name; + } + bottom_vecs_[layer_id].push_back(blobs_[blob_id].get()); + bottom_id_vecs_[layer_id].push_back(blob_id); + available_blobs->erase(blob_name); + bool propagate_down = true; + // Check if the backpropagation on bottom_id should be skipped + if (layer_param.propagate_down_size() > 0) + propagate_down = layer_param.propagate_down(bottom_id); + const bool need_backward = blob_need_backward_[blob_id] && + propagate_down; + bottom_need_backward_[layer_id].push_back(need_backward); + return blob_id; +} + +template +void Net::AppendParam(const NetParameter& param, const int layer_id, + const int param_id) { + const LayerParameter& layer_param = layers_[layer_id]->layer_param(); + const int param_size = layer_param.param_size(); + string param_name = + (param_size > param_id) ? layer_param.param(param_id).name() : ""; + if (param_name.size()) { + param_display_names_.push_back(param_name); + } else { + ostringstream param_display_name; + param_display_name << param_id; + param_display_names_.push_back(param_display_name.str()); + } + const int net_param_id = params_.size(); + params_.push_back(layers_[layer_id]->blobs()[param_id]); + param_id_vecs_[layer_id].push_back(net_param_id); + param_layer_indices_.push_back(make_pair(layer_id, param_id)); + ParamSpec default_param_spec; + const ParamSpec* param_spec = (layer_param.param_size() > param_id) ? + &layer_param.param(param_id) : &default_param_spec; + if (!param_size || !param_name.size() || (param_name.size() && + param_names_index_.find(param_name) == param_names_index_.end())) { + // This layer "owns" this parameter blob -- it is either anonymous + // (i.e., not given a param_name) or explicitly given a name that we + // haven't already seen. + param_owners_.push_back(-1); + if (param_name.size()) { + param_names_index_[param_name] = net_param_id; + } + const int learnable_param_id = learnable_params_.size(); + learnable_params_.push_back(params_[net_param_id].get()); + learnable_param_ids_.push_back(learnable_param_id); + /************ For dynamic network surgery ***************/ + if(param_id>=2) { + mask_param_ids_.push_back(learnable_param_id); + } + /********************************************************/ + has_params_lr_.push_back(param_spec->has_lr_mult()); + has_params_decay_.push_back(param_spec->has_decay_mult()); + params_lr_.push_back(param_spec->lr_mult()); + params_weight_decay_.push_back(param_spec->decay_mult()); + } else { + // Named param blob with name we've seen before: share params + const int owner_net_param_id = param_names_index_[param_name]; + param_owners_.push_back(owner_net_param_id); + const pair& owner_index = + param_layer_indices_[owner_net_param_id]; + const int owner_layer_id = owner_index.first; + const int owner_param_id = owner_index.second; + LOG_IF(INFO, Caffe::root_solver()) << "Sharing parameters '" << param_name + << "' owned by " + << "layer '" << layer_names_[owner_layer_id] << "', param " + << "index " << owner_param_id; + Blob* this_blob = layers_[layer_id]->blobs()[param_id].get(); + Blob* owner_blob = + layers_[owner_layer_id]->blobs()[owner_param_id].get(); + const int param_size = layer_param.param_size(); + if (param_size > param_id && (layer_param.param(param_id).share_mode() == + ParamSpec_DimCheckMode_PERMISSIVE)) { + // Permissive dimension checking -- only check counts are the same. + CHECK_EQ(this_blob->count(), owner_blob->count()) + << "Shared parameter blobs must have the same count."; + } else { + // Strict dimension checking -- all dims must be the same. + CHECK(this_blob->shape() == owner_blob->shape()); + } + const int learnable_param_id = learnable_param_ids_[owner_net_param_id]; + learnable_param_ids_.push_back(learnable_param_id); + /************ For dynamic network surgery ***************/ + if(param_id>=2) { + mask_param_ids_.push_back(learnable_param_id); + } + /********************************************************/ + if (param_spec->has_lr_mult()) { + if (has_params_lr_[learnable_param_id]) { + CHECK_EQ(param_spec->lr_mult(), params_lr_[learnable_param_id]) + << "Shared param '" << param_name << "' has mismatched lr_mult."; + } else { + has_params_lr_[learnable_param_id] = true; + params_lr_[learnable_param_id] = param_spec->lr_mult(); + } + } + if (param_spec->has_decay_mult()) { + if (has_params_decay_[learnable_param_id]) { + CHECK_EQ(param_spec->decay_mult(), + params_weight_decay_[learnable_param_id]) + << "Shared param '" << param_name << "' has mismatched decay_mult."; + } else { + has_params_decay_[learnable_param_id] = true; + params_weight_decay_[learnable_param_id] = param_spec->decay_mult(); + } + } + } +} + +template +Dtype Net::ForwardFromTo(int start, int end) { + CHECK_GE(start, 0); + CHECK_LT(end, layers_.size()); + Dtype loss = 0; + if (debug_info_) { + for (int i = 0; i < net_input_blobs_.size(); ++i) { + InputDebugInfo(i); + } + } + for (int i = start; i <= end; ++i) { + // LOG(ERROR) << "Forwarding " << layer_names_[i]; + Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]); + loss += layer_loss; + if (debug_info_) { ForwardDebugInfo(i); } + } + return loss; +} + +template +Dtype Net::ForwardFrom(int start) { + return ForwardFromTo(start, layers_.size() - 1); +} + +template +Dtype Net::ForwardTo(int end) { + return ForwardFromTo(0, end); +} + +template +const vector*>& Net::ForwardPrefilled(Dtype* loss) { + if (loss != NULL) { + *loss = ForwardFromTo(0, layers_.size() - 1); + } else { + ForwardFromTo(0, layers_.size() - 1); + } + return net_output_blobs_; +} + +template +const vector*>& Net::Forward( + const vector*> & bottom, Dtype* loss) { + // Copy bottom to internal bottom + for (int i = 0; i < bottom.size(); ++i) { + net_input_blobs_[i]->CopyFrom(*bottom[i]); + } + return ForwardPrefilled(loss); +} + +template +string Net::Forward(const string& input_blob_protos, Dtype* loss) { + BlobProtoVector blob_proto_vec; + if (net_input_blobs_.size()) { + blob_proto_vec.ParseFromString(input_blob_protos); + CHECK_EQ(blob_proto_vec.blobs_size(), net_input_blobs_.size()) + << "Incorrect input size."; + for (int i = 0; i < blob_proto_vec.blobs_size(); ++i) { + net_input_blobs_[i]->FromProto(blob_proto_vec.blobs(i)); + } + } + ForwardPrefilled(loss); + blob_proto_vec.Clear(); + for (int i = 0; i < net_output_blobs_.size(); ++i) { + net_output_blobs_[i]->ToProto(blob_proto_vec.add_blobs()); + } + string output; + blob_proto_vec.SerializeToString(&output); + return output; +} + +template +void Net::BackwardFromTo(int start, int end) { + CHECK_GE(end, 0); + CHECK_LT(start, layers_.size()); + for (int i = start; i >= end; --i) { + if (layer_need_backward_[i]) { + layers_[i]->Backward( + top_vecs_[i], bottom_need_backward_[i], bottom_vecs_[i]); + if (debug_info_) { BackwardDebugInfo(i); } + } + } +} + +template +void Net::InputDebugInfo(const int input_id) { + const Blob& blob = *net_input_blobs_[input_id]; + const string& blob_name = blob_names_[net_input_blob_indices_[input_id]]; + const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); + if (Caffe::root_solver()) { + LOG(INFO) << " [Forward] " + << "Input " << blob_name << " data: " << data_abs_val_mean; + } +} + +template +void Net::ForwardDebugInfo(const int layer_id) { + for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { + const Blob& blob = *top_vecs_[layer_id][top_id]; + const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; + const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); + if (Caffe::root_solver()) { + LOG(INFO) << " [Forward] " + << "Layer " << layer_names_[layer_id] + << ", top blob " << blob_name + << " data: " << data_abs_val_mean; + } + } + for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); + ++param_id) { + const Blob& blob = *layers_[layer_id]->blobs()[param_id]; + const int net_param_id = param_id_vecs_[layer_id][param_id]; + const string& blob_name = param_display_names_[net_param_id]; + const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); + if (Caffe::root_solver()) { + LOG(INFO) << " [Forward] " + << "Layer " << layer_names_[layer_id] + << ", param blob " << blob_name + << " data: " << data_abs_val_mean; + } + } +} + +template +void Net::BackwardDebugInfo(const int layer_id) { + const vector*>& bottom_vec = bottom_vecs_[layer_id]; + for (int bottom_id = 0; bottom_id < bottom_vec.size(); ++bottom_id) { + if (!bottom_need_backward_[layer_id][bottom_id]) { continue; } + const Blob& blob = *bottom_vec[bottom_id]; + const string& blob_name = blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; + const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); + if (Caffe::root_solver()) { + LOG(INFO) << " [Backward] " + << "Layer " << layer_names_[layer_id] + << ", bottom blob " << blob_name + << " diff: " << diff_abs_val_mean; + } + } + for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); + ++param_id) { + if (!layers_[layer_id]->param_propagate_down(param_id)) { continue; } + const Blob& blob = *layers_[layer_id]->blobs()[param_id]; + const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); + if (Caffe::root_solver()) { + LOG(INFO) << " [Backward] " + << "Layer " << layer_names_[layer_id] + << ", param blob " << param_id + << " diff: " << diff_abs_val_mean; + } + } +} + +template +void Net::UpdateDebugInfo(const int param_id) { + const Blob& blob = *params_[param_id]; + const int param_owner = param_owners_[param_id]; + const string& layer_name = layer_names_[param_layer_indices_[param_id].first]; + const string& param_display_name = param_display_names_[param_id]; + const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); + if (param_owner < 0) { + const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); + if (Caffe::root_solver()) { + LOG(INFO) << " [Update] Layer " << layer_name + << ", param " << param_display_name + << " data: " << data_abs_val_mean + << "; diff: " << diff_abs_val_mean; + } + } else { + const string& owner_layer_name = + layer_names_[param_layer_indices_[param_owner].first]; + if (Caffe::root_solver()) { + LOG(INFO) << " [Update] Layer " << layer_name + << ", param blob " << param_display_name + << " (owned by layer " << owner_layer_name << ", " << "param " + << param_display_names_[param_owners_[param_id]] << ")" + << " diff: " << diff_abs_val_mean; + } + } +} + +template +void Net::ShareTrainedLayersWith(const Net* other) { + int num_source_layers = other->layers().size(); + for (int i = 0; i < num_source_layers; ++i) { + Layer* source_layer = other->layers()[i].get(); + const string& source_layer_name = other->layer_names()[i]; + int target_layer_id = 0; + while (target_layer_id != layer_names_.size() && + layer_names_[target_layer_id] != source_layer_name) { + ++target_layer_id; + } + if (target_layer_id == layer_names_.size()) { + DLOG(INFO) << "Ignoring source layer " << source_layer_name; + continue; + } + DLOG(INFO) << "Copying source layer " << source_layer_name; + vector > >& target_blobs = + layers_[target_layer_id]->blobs(); + CHECK_EQ(target_blobs.size(), source_layer->blobs().size()) + << "Incompatible number of blobs for layer " << source_layer_name; + for (int j = 0; j < target_blobs.size(); ++j) { + Blob* source_blob = source_layer->blobs()[j].get(); + CHECK(target_blobs[j]->shape() == source_blob->shape()); + target_blobs[j]->ShareData(*source_blob); + } + } +} + +template +void Net::BackwardFrom(int start) { + BackwardFromTo(start, 0); +} + +template +void Net::BackwardTo(int end) { + BackwardFromTo(layers_.size() - 1, end); +} + +template +void Net::Backward() { + BackwardFromTo(layers_.size() - 1, 0); + if (debug_info_) { + Dtype asum_data = 0, asum_diff = 0, sumsq_data = 0, sumsq_diff = 0; + for (int i = 0; i < params_.size(); ++i) { + if (param_owners_[i] >= 0) { continue; } + asum_data += params_[i]->asum_data(); + asum_diff += params_[i]->asum_diff(); + sumsq_data += params_[i]->sumsq_data(); + sumsq_diff += params_[i]->sumsq_diff(); + } + const Dtype l2norm_data = std::sqrt(sumsq_data); + const Dtype l2norm_diff = std::sqrt(sumsq_diff); + LOG(ERROR) << " [Backward] All net params (data, diff): " + << "L1 norm = (" << asum_data << ", " << asum_diff << "); " + << "L2 norm = (" << l2norm_data << ", " << l2norm_diff << ")"; + } +} + +template +void Net::Reshape() { + for (int i = 0; i < layers_.size(); ++i) { + layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]); + } +} + +template +void Net::CopyTrainedLayersFrom(const NetParameter& param) { + int num_source_layers = param.layer_size(); + for (int i = 0; i < num_source_layers; ++i) { + const LayerParameter& source_layer = param.layer(i); + const string& source_layer_name = source_layer.name(); + int target_layer_id = 0; + while (target_layer_id != layer_names_.size() && + layer_names_[target_layer_id] != source_layer_name) { + ++target_layer_id; + } + if (target_layer_id == layer_names_.size()) { + DLOG(INFO) << "Ignoring source layer " << source_layer_name; + continue; + } + DLOG(INFO) << "Copying source layer " << source_layer_name; + vector > >& target_blobs = + layers_[target_layer_id]->blobs(); + + /************ For dynamic network surgery ***************/ + if (strcmp(layers_[target_layer_id]->type(),"CInnerProduct")==0 || strcmp(layers_[target_layer_id]->type(),"CConvolution" )==0 ) { + if(target_blobs.size() > source_layer.blobs_size()) { + for (int j = 0; j < source_layer.blobs_size(); ++j) { + const bool kReshape = false; + target_blobs[j]->FromProto(source_layer.blobs(j), kReshape); + } + continue; + } + } + /********************************************************/ + CHECK_EQ(target_blobs.size(), source_layer.blobs_size()) + << "Incompatible number of blobs for layer " << source_layer_name; + for (int j = 0; j < target_blobs.size(); ++j) { + const bool kReshape = false; + target_blobs[j]->FromProto(source_layer.blobs(j), kReshape); + } + } +} + +template +void Net::CopyTrainedLayersFrom(const string trained_filename) { + if (trained_filename.size() >= 3 && + trained_filename.compare(trained_filename.size() - 3, 3, ".h5") == 0) { + CopyTrainedLayersFromHDF5(trained_filename); + } else { + CopyTrainedLayersFromBinaryProto(trained_filename); + } +} + +template +void Net::CopyTrainedLayersFromBinaryProto( + const string trained_filename) { + NetParameter param; + ReadNetParamsFromBinaryFileOrDie(trained_filename, ¶m); + CopyTrainedLayersFrom(param); +} + +template +void Net::CopyTrainedLayersFromHDF5(const string trained_filename) { + hid_t file_hid = H5Fopen(trained_filename.c_str(), H5F_ACC_RDONLY, + H5P_DEFAULT); + CHECK_GE(file_hid, 0) << "Couldn't open " << trained_filename; + hid_t data_hid = H5Gopen2(file_hid, "data", H5P_DEFAULT); + CHECK_GE(data_hid, 0) << "Error reading weights from " << trained_filename; + int num_layers = hdf5_get_num_links(data_hid); + for (int i = 0; i < num_layers; ++i) { + string source_layer_name = hdf5_get_name_by_idx(data_hid, i); + if (!layer_names_index_.count(source_layer_name)) { + DLOG(INFO) << "Ignoring source layer " << source_layer_name; + continue; + } + int target_layer_id = layer_names_index_[source_layer_name]; + DLOG(INFO) << "Copying source layer " << source_layer_name; + vector > >& target_blobs = + layers_[target_layer_id]->blobs(); + hid_t layer_hid = H5Gopen2(data_hid, source_layer_name.c_str(), + H5P_DEFAULT); + CHECK_GE(layer_hid, 0) + << "Error reading weights from " << trained_filename; + // Check that source layer doesn't have more params than target layer + int num_source_params = hdf5_get_num_links(layer_hid); + CHECK_LE(num_source_params, target_blobs.size()) + << "Incompatible number of blobs for layer " << source_layer_name; + for (int j = 0; j < target_blobs.size(); ++j) { + ostringstream oss; + oss << j; + string dataset_name = oss.str(); + int target_net_param_id = param_id_vecs_[target_layer_id][j]; + if (!H5Lexists(layer_hid, dataset_name.c_str(), H5P_DEFAULT)) { + // Target param doesn't exist in source weights... + if (param_owners_[target_net_param_id] != -1) { + // ...but it's weight-shared in target, so that's fine. + continue; + } else { + LOG(FATAL) << "Incompatible number of blobs for layer " + << source_layer_name; + } + } + hdf5_load_nd_dataset(layer_hid, dataset_name.c_str(), 0, kMaxBlobAxes, + target_blobs[j].get()); + } + H5Gclose(layer_hid); + } + H5Gclose(data_hid); + H5Fclose(file_hid); +} + +template +void Net::ToProto(NetParameter* param, bool write_diff) const { + param->Clear(); + param->set_name(name_); + // Add bottom and top + for (int i = 0; i < net_input_blob_indices_.size(); ++i) { + param->add_input(blob_names_[net_input_blob_indices_[i]]); + } + DLOG(INFO) << "Serializing " << layers_.size() << " layers"; + for (int i = 0; i < layers_.size(); ++i) { + LayerParameter* layer_param = param->add_layer(); + for (int j = 0; j < bottom_id_vecs_[i].size(); ++j) { + layer_param->add_bottom(blob_names_[bottom_id_vecs_[i][j]]); + } + for (int j = 0; j < top_id_vecs_[i].size(); ++j) { + layer_param->add_top(blob_names_[top_id_vecs_[i][j]]); + } + layers_[i]->ToProto(layer_param, write_diff); + } +} + +template +void Net::ToHDF5(const string& filename, bool write_diff) const { + hid_t file_hid = H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, + H5P_DEFAULT); + CHECK_GE(file_hid, 0) + << "Couldn't open " << filename << " to save weights."; + hid_t data_hid = H5Gcreate2(file_hid, "data", H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK_GE(data_hid, 0) << "Error saving weights to " << filename << "."; + hid_t diff_hid = -1; + if (write_diff) { + diff_hid = H5Gcreate2(file_hid, "diff", H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK_GE(diff_hid, 0) << "Error saving weights to " << filename << "."; + } + for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) { + const LayerParameter& layer_param = layers_[layer_id]->layer_param(); + string layer_name = layer_param.name(); + hid_t layer_data_hid = H5Gcreate2(data_hid, layer_name.c_str(), + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK_GE(layer_data_hid, 0) + << "Error saving weights to " << filename << "."; + hid_t layer_diff_hid = -1; + if (write_diff) { + layer_diff_hid = H5Gcreate2(diff_hid, layer_name.c_str(), + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK_GE(layer_diff_hid, 0) + << "Error saving weights to " << filename << "."; + } + int num_params = layers_[layer_id]->blobs().size(); + for (int param_id = 0; param_id < num_params; ++param_id) { + ostringstream dataset_name; + dataset_name << param_id; + const int net_param_id = param_id_vecs_[layer_id][param_id]; + if (param_owners_[net_param_id] == -1) { + // Only save params that own themselves + hdf5_save_nd_dataset(layer_data_hid, dataset_name.str(), + *params_[net_param_id]); + } + if (write_diff) { + // Write diffs regardless of weight-sharing + hdf5_save_nd_dataset(layer_diff_hid, dataset_name.str(), + *params_[net_param_id], true); + } + } + H5Gclose(layer_data_hid); + if (write_diff) { + H5Gclose(layer_diff_hid); + } + } + H5Gclose(data_hid); + if (write_diff) { + H5Gclose(diff_hid); + } + H5Fclose(file_hid); +} + +template +void Net::Update() { + for (int i = 0; i < learnable_params_.size(); ++i) { + /************ For dynamic network surgery ***************/ + if (std::find(mask_param_ids_.begin(), mask_param_ids_.end(), + learnable_param_ids_[i]) != mask_param_ids_.end()) + continue; + /********************************************************/ + learnable_params_[i]->Update(); + } +} + +template +void Net::ClearParamDiffs() { + for (int i = 0; i < learnable_params_.size(); ++i) { + Blob* blob = learnable_params_[i]; + switch (Caffe::mode()) { + case Caffe::CPU: + caffe_set(blob->count(), static_cast(0), + blob->mutable_cpu_diff()); + break; + case Caffe::GPU: +#ifndef CPU_ONLY + caffe_gpu_set(blob->count(), static_cast(0), + blob->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + } +} + +template +void Net::ShareWeights() { + for (int i = 0; i < params_.size(); ++i) { + if (param_owners_[i] < 0) { continue; } + params_[i]->ShareData(*params_[param_owners_[i]]); + params_[i]->ShareDiff(*params_[param_owners_[i]]); + } +} + +template +bool Net::has_blob(const string& blob_name) const { + return blob_names_index_.find(blob_name) != blob_names_index_.end(); +} + +template +const shared_ptr > Net::blob_by_name( + const string& blob_name) const { + shared_ptr > blob_ptr; + if (has_blob(blob_name)) { + blob_ptr = blobs_[blob_names_index_.find(blob_name)->second]; + } else { + blob_ptr.reset((Blob*)(NULL)); + LOG(WARNING) << "Unknown blob name " << blob_name; + } + return blob_ptr; +} + +template +bool Net::has_layer(const string& layer_name) const { + return layer_names_index_.find(layer_name) != layer_names_index_.end(); +} + +template +const shared_ptr > Net::layer_by_name( + const string& layer_name) const { + shared_ptr > layer_ptr; + if (has_layer(layer_name)) { + layer_ptr = layers_[layer_names_index_.find(layer_name)->second]; + } else { + layer_ptr.reset((Layer*)(NULL)); + LOG(WARNING) << "Unknown layer name " << layer_name; + } + return layer_ptr; +} + +INSTANTIATE_CLASS(Net); + +} // namespace caffe diff --git a/src/caffe/parallel.cpp b/src/caffe/parallel.cpp new file mode 100755 index 0000000..a6d154e --- /dev/null +++ b/src/caffe/parallel.cpp @@ -0,0 +1,441 @@ +#ifndef CPU_ONLY +#include +#endif +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "boost/thread.hpp" +#include "caffe/caffe.hpp" +#include "caffe/parallel.hpp" + +namespace caffe { + +enum Op { + copy, + replace_cpu, + replace_gpu, + replace_cpu_diff, + replace_gpu_diff +}; + +template +static void apply_buffers(const vector*>& blobs, + Dtype* buffer, size_t total_size, Op op) { + Dtype* ptr = buffer; + for (int i = 0; i < blobs.size(); ++i) { + int size = blobs[i]->count(); + switch (op) { + case copy: { + // Init buffer to current values of blobs + caffe_copy(size, + reinterpret_cast(blobs[i]->data()->cpu_data()), + ptr); + break; + } + case replace_cpu: + blobs[i]->data()->set_cpu_data(ptr); + break; + case replace_gpu: + blobs[i]->data()->set_gpu_data(ptr); + break; + case replace_cpu_diff: + blobs[i]->diff()->set_cpu_data(ptr); + break; + case replace_gpu_diff: + blobs[i]->diff()->set_gpu_data(ptr); + break; + } + ptr += size; + } + // total_size is at least one byte + CHECK_EQ(total_size, (ptr == buffer ? 1 : ptr - buffer)); +} + +// Buffer size necessary to store given blobs +template +static size_t total_size(const vector*>& params) { + size_t size = 0; + for (int i = 0; i < params.size(); ++i) + size += params[i]->count(); + // Size have at least one byte, otherwise cudaMalloc fails if net has no + // learnable parameters. + return (size > 0) ? size : 1; +} + +template +Params::Params(shared_ptr > root_solver) + : size_(total_size(root_solver->net()->learnable_params())), + data_(), + diff_() { +} + +template +GPUParams::GPUParams(shared_ptr > root_solver, int device) + : Params(root_solver) { +#ifndef CPU_ONLY + int initial_device; + CUDA_CHECK(cudaGetDevice(&initial_device)); + + // Allocate device buffers + CUDA_CHECK(cudaSetDevice(device)); + CUDA_CHECK(cudaMalloc(&data_, size_ * sizeof(Dtype))); + + // Copy blob values + const vector*>& net = + root_solver->net()->learnable_params(); + apply_buffers(net, data_, size_, copy); + + CUDA_CHECK(cudaMalloc(&diff_, size_ * sizeof(Dtype))); + caffe_gpu_set(size_, Dtype(0), diff_); + + CUDA_CHECK(cudaSetDevice(initial_device)); +#else + NO_GPU; +#endif +} + +template +GPUParams::~GPUParams() { +#ifndef CPU_ONLY + CUDA_CHECK(cudaFree(data_)); + CUDA_CHECK(cudaFree(diff_)); +#endif +} + +template +void GPUParams::configure(Solver* solver) const { + const vector*>& net = + solver->net()->learnable_params(); + apply_buffers(net, data_, size_, replace_gpu); + apply_buffers(net, diff_, size_, replace_gpu_diff); +} + +void DevicePair::compute(const vector devices, vector* pairs) { +#ifndef CPU_ONLY + vector remaining(devices); + + // Depth for reduction tree + int remaining_depth = static_cast(ceil(log2(remaining.size()))); + + // Group GPUs by board + for (int d = 0; d < remaining_depth; ++d) { + for (int i = 0; i < remaining.size(); ++i) { + for (int j = i + 1; j < remaining.size(); ++j) { + cudaDeviceProp a, b; + CUDA_CHECK(cudaGetDeviceProperties(&a, remaining[i])); + CUDA_CHECK(cudaGetDeviceProperties(&b, remaining[j])); + if (a.isMultiGpuBoard && b.isMultiGpuBoard) { + if (a.multiGpuBoardGroupID == b.multiGpuBoardGroupID) { + pairs->push_back(DevicePair(remaining[i], remaining[j])); + DLOG(INFO) << "GPU board: " << remaining[i] << ":" << remaining[j]; + remaining.erase(remaining.begin() + j); + break; + } + } + } + } + } + ostringstream s; + for (int i = 0; i < remaining.size(); ++i) { + s << (i ? ", " : "") << remaining[i]; + } + DLOG(INFO) << "GPUs paired by boards, remaining: " << s.str(); + + // Group by P2P accessibility + remaining_depth = ceil(log2(remaining.size())); + for (int d = 0; d < remaining_depth; ++d) { + for (int i = 0; i < remaining.size(); ++i) { + for (int j = i + 1; j < remaining.size(); ++j) { + int access; + CUDA_CHECK( + cudaDeviceCanAccessPeer(&access, remaining[i], remaining[j])); + if (access) { + pairs->push_back(DevicePair(remaining[i], remaining[j])); + DLOG(INFO) << "P2P pair: " << remaining[i] << ":" << remaining[j]; + remaining.erase(remaining.begin() + j); + break; + } + } + } + } + s.str(""); + for (int i = 0; i < remaining.size(); ++i) { + s << (i ? ", " : "") << remaining[i]; + } + DLOG(INFO) << "GPUs paired by P2P access, remaining: " << s.str(); + + // Group remaining + remaining_depth = ceil(log2(remaining.size())); + for (int d = 0; d < remaining_depth; ++d) { + for (int i = 0; i < remaining.size(); ++i) { + pairs->push_back(DevicePair(remaining[i], remaining[i + 1])); + DLOG(INFO) << "Remaining pair: " << remaining[i] << ":" + << remaining[i + 1]; + remaining.erase(remaining.begin() + i + 1); + } + } + + // Should only be the parent node remaining + CHECK_EQ(remaining.size(), 1); + + pairs->insert(pairs->begin(), DevicePair(-1, remaining[0])); + + CHECK(pairs->size() == devices.size()); + for (int i = 0; i < pairs->size(); ++i) { + CHECK((*pairs)[i].parent() != (*pairs)[i].device()); + for (int j = i + 1; j < pairs->size(); ++j) { + CHECK((*pairs)[i].device() != (*pairs)[j].device()); + } + } +#else + NO_GPU; +#endif +} + +// + +template +P2PSync::P2PSync(shared_ptr > root_solver, + P2PSync* parent, const SolverParameter& param) + : GPUParams(root_solver, param.device_id()), + parent_(parent), + children_(), + queue_(), + initial_iter_(root_solver->iter()), + solver_() { +#ifndef CPU_ONLY + int initial_device; + CUDA_CHECK(cudaGetDevice(&initial_device)); + const int self = param.device_id(); + CUDA_CHECK(cudaSetDevice(self)); + + if (parent == NULL) { + solver_ = root_solver; + } else { + Caffe::set_root_solver(false); + solver_.reset(new WorkerSolver(param, root_solver.get())); + Caffe::set_root_solver(true); + } + this->configure(solver_.get()); + solver_->add_callback(this); + + if (parent) { + // Enable p2p access between devices + const int peer = parent->solver_->param().device_id(); + int access; + CUDA_CHECK(cudaDeviceCanAccessPeer(&access, self, peer)); + if (access) { + CUDA_CHECK(cudaDeviceEnablePeerAccess(peer, 0)); + } else { + LOG(INFO)<< "GPU " << self << " does not have p2p access to GPU " << peer; + } + // Allocate receiving buffer on parent + CUDA_CHECK(cudaSetDevice(peer)); + CUDA_CHECK(cudaMalloc(&parent_grads_, size_ * sizeof(Dtype))); + CUDA_CHECK(cudaSetDevice(self)); + } + + CUDA_CHECK(cudaSetDevice(initial_device)); +#else + NO_GPU; +#endif +} + +template +P2PSync::~P2PSync() { +#ifndef CPU_ONLY + int initial_device; + CUDA_CHECK(cudaGetDevice(&initial_device)); + const int self = solver_->param().device_id(); + CUDA_CHECK(cudaSetDevice(self)); + + if (parent_) { + CUDA_CHECK(cudaFree(parent_grads_)); + const int peer = parent_->solver_->param().device_id(); + int access; + CUDA_CHECK(cudaDeviceCanAccessPeer(&access, self, peer)); + if (access) { + CUDA_CHECK(cudaDeviceDisablePeerAccess(peer)); + } + } + + CUDA_CHECK(cudaSetDevice(initial_device)); +#endif +} + +template +void P2PSync::InternalThreadEntry() { + Caffe::SetDevice(solver_->param().device_id()); + CHECK(Caffe::root_solver()); + Caffe::set_root_solver(false); + // See if there is a defined seed and reset random state if so + if (solver_->param().random_seed() >= 0) { + // Fetch random seed and modulate by device ID to make sure + // everyone doesn't have the same seed. We seem to have some + // solver instability if we have everyone with the same seed + Caffe::set_random_seed( + solver_->param().random_seed() + solver_->param().device_id()); + } + solver_->Step(solver_->param().max_iter() - initial_iter_); +} + +template +void P2PSync::on_start() { +#ifndef CPU_ONLY +#ifdef DEBUG + int device; + CUDA_CHECK(cudaGetDevice(&device)); + CHECK(device == solver_->param().device_id()); +#else +// CHECK(false); +#endif + + // Wait for update from parent + if (parent_) { + P2PSync *parent = queue_.pop(); + CHECK(parent == parent_); + } + + // Update children + for (int i = children_.size() - 1; i >= 0; i--) { + Dtype* src = data_; + Dtype* dst = children_[i]->data_; + +#ifdef DEBUG + cudaPointerAttributes attributes; + CUDA_CHECK(cudaPointerGetAttributes(&attributes, src)); + CHECK(attributes.device == device); + CUDA_CHECK(cudaPointerGetAttributes(&attributes, dst)); + CHECK(attributes.device == children_[i]->solver_->param().device_id()); +#endif + + CUDA_CHECK(cudaMemcpyAsync(dst, src, size_ * sizeof(Dtype), + cudaMemcpyDeviceToDevice, cudaStreamDefault)); + CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); + children_[i]->queue_.push(this); + } +#endif +} + +template +void P2PSync::on_gradients_ready() { +#ifndef CPU_ONLY +#ifdef DEBUG + int device; + CUDA_CHECK(cudaGetDevice(&device)); + CHECK(device == solver_->param().device_id()); +#endif + + // Sum children gradients as they appear in the queue + for (int i = 0; i < children_.size(); ++i) { + P2PSync *child = queue_.pop(); + Dtype* src = child->parent_grads_; + Dtype* dst = diff_; + +#ifdef DEBUG + bool ok = false; + for (int j = 0; j < children_.size(); ++j) { + if (child == children_[j]) { + ok = true; + } + } + CHECK(ok); + cudaPointerAttributes attributes; + CUDA_CHECK(cudaPointerGetAttributes(&attributes, src)); + CHECK(attributes.device == device); + CUDA_CHECK(cudaPointerGetAttributes(&attributes, dst)); + CHECK(attributes.device == device); +#endif + + caffe_gpu_add(size_, src, dst, dst); + } + + // Send gradients to parent + if (parent_) { + Dtype* src = diff_; + Dtype* dst = parent_grads_; + +#ifdef DEBUG + cudaPointerAttributes attributes; + CUDA_CHECK(cudaPointerGetAttributes(&attributes, src)); + CHECK(attributes.device == device); + CUDA_CHECK(cudaPointerGetAttributes(&attributes, dst)); + CHECK(attributes.device == parent_->solver_->param().device_id()); +#endif + + CUDA_CHECK(cudaMemcpyAsync(dst, src, size_ * sizeof(Dtype), // + cudaMemcpyDeviceToDevice, cudaStreamDefault)); + CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); + parent_->queue_.push(this); + } else { + // Loss functions divide gradients by the batch size, so to compensate + // for split batch, the root solver divides by number of solvers. + caffe_gpu_scal(size_, Dtype(1.0 / Caffe::solver_count()), diff_); + } +#endif +} + +template +void P2PSync::run(const vector& gpus) { + // Pair devices for map-reduce synchronization + vector pairs; + DevicePair::compute(gpus, &pairs); + ostringstream s; + for (int i = 1; i < pairs.size(); ++i) { + s << (i == 1 ? "" : ", ") << pairs[i].parent() << ":" << pairs[i].device(); + } + LOG(INFO)<< "GPUs pairs " << s.str(); + + SolverParameter param(solver_->param()); + vector > > syncs(gpus.size()); + + // Build the GPU tree by finding the parent for each solver + for (int attempts = 0; attempts < pairs.size(); ++attempts) { + for (int i = 1; i < pairs.size(); ++i) { + if (!syncs[i].get()) { + P2PSync* parent = NULL; + for (int j = 0; j < syncs.size(); ++j) { + P2PSync* sync = j == 0 ? this : syncs[j].get(); + if (sync) { + const SolverParameter& p = sync->solver()->param(); + if (p.device_id() == pairs[i].parent()) { + parent = sync; + } + } + } + if (parent) { + param.set_device_id(pairs[i].device()); + syncs[i].reset(new P2PSync(solver_, parent, param)); + parent->children_.push_back((P2PSync*) syncs[i].get()); + } + } + } + } + + LOG(INFO)<< "Starting Optimization"; + + for (int i = 1; i < syncs.size(); ++i) { + syncs[i]->StartInternalThread(); + } + + // Run root solver on current thread + solver_->Solve(); + + for (int i = 1; i < syncs.size(); ++i) { + syncs[i]->StopInternalThread(); + } +} + +INSTANTIATE_CLASS(Params); +INSTANTIATE_CLASS(GPUParams); +INSTANTIATE_CLASS(P2PSync); + +} // namespace caffe diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto new file mode 100755 index 0000000..b45d467 --- /dev/null +++ b/src/caffe/proto/caffe.proto @@ -0,0 +1,1175 @@ +syntax = "proto2"; + +package caffe; + +// Specifies the shape (dimensions) of a Blob. +message BlobShape { + repeated int64 dim = 1 [packed = true]; +} + +message BlobProto { + optional BlobShape shape = 7; + repeated float data = 5 [packed = true]; + repeated float diff = 6 [packed = true]; + repeated double double_data = 8 [packed = true]; + repeated double double_diff = 9 [packed = true]; + + // 4D dimensions -- deprecated. Use "shape" instead. + optional int32 num = 1 [default = 0]; + optional int32 channels = 2 [default = 0]; + optional int32 height = 3 [default = 0]; + optional int32 width = 4 [default = 0]; +} + +// The BlobProtoVector is simply a way to pass multiple blobproto instances +// around. +message BlobProtoVector { + repeated BlobProto blobs = 1; +} + +message Datum { + optional int32 channels = 1; + optional int32 height = 2; + optional int32 width = 3; + // the actual image data, in bytes + optional bytes data = 4; + optional int32 label = 5; + // Optionally, the datum could also hold float data. + repeated float float_data = 6; + // If true data contains an encoded image that need to be decoded + optional bool encoded = 7 [default = false]; +} + +message FillerParameter { + // The filler type. + optional string type = 1 [default = 'constant']; + optional float value = 2 [default = 0]; // the value in constant filler + optional float min = 3 [default = 0]; // the min value in uniform filler + optional float max = 4 [default = 1]; // the max value in uniform filler + optional float mean = 5 [default = 0]; // the mean value in Gaussian filler + optional float std = 6 [default = 1]; // the std value in Gaussian filler + // The expected number of non-zero output weights for a given input in + // Gaussian filler -- the default -1 means don't perform sparsification. + optional int32 sparse = 7 [default = -1]; + // Normalize the filler variance by fan_in, fan_out, or their average. + // Applies to 'xavier' and 'msra' fillers. + enum VarianceNorm { + FAN_IN = 0; + FAN_OUT = 1; + AVERAGE = 2; + } + optional VarianceNorm variance_norm = 8 [default = FAN_IN]; +} + +message NetParameter { + optional string name = 1; // consider giving the network a name + // The input blobs to the network. + repeated string input = 3; + // The shape of the input blobs. + repeated BlobShape input_shape = 8; + + // 4D input dimensions -- deprecated. Use "shape" instead. + // If specified, for each input blob there should be four + // values specifying the num, channels, height and width of the input blob. + // Thus, there should be a total of (4 * #input) numbers. + repeated int32 input_dim = 4; + + // Whether the network will force every layer to carry out backward operation. + // If set False, then whether to carry out backward is determined + // automatically according to the net structure and learning rates. + optional bool force_backward = 5 [default = false]; + // The current "state" of the network, including the phase, level, and stage. + // Some layers may be included/excluded depending on this state and the states + // specified in the layers' include and exclude fields. + optional NetState state = 6; + + // Print debugging information about results while running Net::Forward, + // Net::Backward, and Net::Update. + optional bool debug_info = 7 [default = false]; + + // The layers that make up the net. Each of their configurations, including + // connectivity and behavior, is specified as a LayerParameter. + repeated LayerParameter layer = 100; // ID 100 so layers are printed last. + + // DEPRECATED: use 'layer' instead. + repeated V1LayerParameter layers = 2; +} + +// NOTE +// Update the next available ID when you add a new SolverParameter field. +// +// SolverParameter next available ID: 40 (last added: momentum2) +message SolverParameter { + ////////////////////////////////////////////////////////////////////////////// + // Specifying the train and test networks + // + // Exactly one train net must be specified using one of the following fields: + // train_net_param, train_net, net_param, net + // One or more test nets may be specified using any of the following fields: + // test_net_param, test_net, net_param, net + // If more than one test net field is specified (e.g., both net and + // test_net are specified), they will be evaluated in the field order given + // above: (1) test_net_param, (2) test_net, (3) net_param/net. + // A test_iter must be specified for each test_net. + // A test_level and/or a test_stage may also be specified for each test_net. + ////////////////////////////////////////////////////////////////////////////// + + // Proto filename for the train net, possibly combined with one or more + // test nets. + optional string net = 24; + // Inline train net param, possibly combined with one or more test nets. + optional NetParameter net_param = 25; + + optional string train_net = 1; // Proto filename for the train net. + repeated string test_net = 2; // Proto filenames for the test nets. + optional NetParameter train_net_param = 21; // Inline train net params. + repeated NetParameter test_net_param = 22; // Inline test net params. + + // The states for the train/test nets. Must be unspecified or + // specified once per net. + // + // By default, all states will have solver = true; + // train_state will have phase = TRAIN, + // and all test_state's will have phase = TEST. + // Other defaults are set according to the NetState defaults. + optional NetState train_state = 26; + repeated NetState test_state = 27; + + // The number of iterations for each test net. + repeated int32 test_iter = 3; + + // The number of iterations between two testing phases. + optional int32 test_interval = 4 [default = 0]; + optional bool test_compute_loss = 19 [default = false]; + // If true, run an initial test pass before the first iteration, + // ensuring memory availability and printing the starting value of the loss. + optional bool test_initialization = 32 [default = true]; + optional float base_lr = 5; // The base learning rate + // the number of iterations between displaying info. If display = 0, no info + // will be displayed. + optional int32 display = 6; + // Display the loss averaged over the last average_loss iterations + optional int32 average_loss = 33 [default = 1]; + optional int32 max_iter = 7; // the maximum number of iterations + // accumulate gradients over `iter_size` x `batch_size` instances + optional int32 iter_size = 36 [default = 1]; + + // The learning rate decay policy. The currently implemented learning rate + // policies are as follows: + // - fixed: always return base_lr. + // - step: return base_lr * gamma ^ (floor(iter / step)) + // - exp: return base_lr * gamma ^ iter + // - inv: return base_lr * (1 + gamma * iter) ^ (- power) + // - multistep: similar to step but it allows non uniform steps defined by + // stepvalue + // - poly: the effective learning rate follows a polynomial decay, to be + // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) + // - sigmoid: the effective learning rate follows a sigmod decay + // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) + // + // where base_lr, max_iter, gamma, step, stepvalue and power are defined + // in the solver parameter protocol buffer, and iter is the current iteration. + optional string lr_policy = 8; + optional float gamma = 9; // The parameter to compute the learning rate. + optional float power = 10; // The parameter to compute the learning rate. + optional float momentum = 11; // The momentum value. + optional float weight_decay = 12; // The weight decay. + // regularization types supported: L1 and L2 + // controlled by weight_decay + optional string regularization_type = 29 [default = "L2"]; + // the stepsize for learning rate policy "step" + optional int32 stepsize = 13; + // the stepsize for learning rate policy "multistep" + repeated int32 stepvalue = 34; + + // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm, + // whenever their actual L2 norm is larger. + optional float clip_gradients = 35 [default = -1]; + + optional int32 snapshot = 14 [default = 0]; // The snapshot interval + optional string snapshot_prefix = 15; // The prefix for the snapshot. + // whether to snapshot diff in the results or not. Snapshotting diff will help + // debugging but the final protocol buffer size will be much larger. + optional bool snapshot_diff = 16 [default = false]; + enum SnapshotFormat { + HDF5 = 0; + BINARYPROTO = 1; + } + optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; + // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. + enum SolverMode { + CPU = 0; + GPU = 1; + } + optional SolverMode solver_mode = 17 [default = GPU]; + // the device_id will that be used in GPU mode. Use device_id = 0 in default. + optional int32 device_id = 18 [default = 0]; + // If non-negative, the seed with which the Solver will initialize the Caffe + // random number generator -- useful for reproducible results. Otherwise, + // (and by default) initialize using a seed derived from the system clock. + optional int64 random_seed = 20 [default = -1]; + + // Solver type + enum SolverType { + SGD = 0; + NESTEROV = 1; + ADAGRAD = 2; + RMSPROP = 3; + ADADELTA = 4; + ADAM = 5; + } + optional SolverType solver_type = 30 [default = SGD]; + // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam + optional float delta = 31 [default = 1e-8]; + // parameters for the Adam solver + optional float momentum2 = 39 [default = 0.999]; + + // RMSProp decay value + // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) + optional float rms_decay = 38; + + // If true, print information about the state of the net that may help with + // debugging learning problems. + optional bool debug_info = 23 [default = false]; + + // If false, don't save a snapshot after training finishes. + optional bool snapshot_after_train = 28 [default = true]; +} + +// A message that stores the solver snapshots +message SolverState { + optional int32 iter = 1; // The current iteration + optional string learned_net = 2; // The file that stores the learned net. + repeated BlobProto history = 3; // The history for sgd solvers + optional int32 current_step = 4 [default = 0]; // The current step for learning rate +} + +enum Phase { + TRAIN = 0; + TEST = 1; +} + +message NetState { + optional Phase phase = 1 [default = TEST]; + optional int32 level = 2 [default = 0]; + repeated string stage = 3; +} + +message NetStateRule { + // Set phase to require the NetState have a particular phase (TRAIN or TEST) + // to meet this rule. + optional Phase phase = 1; + + // Set the minimum and/or maximum levels in which the layer should be used. + // Leave undefined to meet the rule regardless of level. + optional int32 min_level = 2; + optional int32 max_level = 3; + + // Customizable sets of stages to include or exclude. + // The net must have ALL of the specified stages and NONE of the specified + // "not_stage"s to meet the rule. + // (Use multiple NetStateRules to specify conjunctions of stages.) + repeated string stage = 4; + repeated string not_stage = 5; +} + +// Specifies training parameters (multipliers on global learning constants, +// and the name and other settings used for weight sharing). +message ParamSpec { + // The names of the parameter blobs -- useful for sharing parameters among + // layers, but never required otherwise. To share a parameter between two + // layers, give it a (non-empty) name. + optional string name = 1; + + // Whether to require shared weights to have the same shape, or just the same + // count -- defaults to STRICT if unspecified. + optional DimCheckMode share_mode = 2; + enum DimCheckMode { + // STRICT (default) requires that num, channels, height, width each match. + STRICT = 0; + // PERMISSIVE requires only the count (num*channels*height*width) to match. + PERMISSIVE = 1; + } + + // The multiplier on the global learning rate for this parameter. + optional float lr_mult = 3 [default = 1.0]; + + // The multiplier on the global weight decay for this parameter. + optional float decay_mult = 4 [default = 1.0]; +} + +// NOTE +// Update the next available ID when you add a new LayerParameter field. +// +// LayerParameter next available layer-specific ID: 137 (last added: reduction_param) +message LayerParameter { + optional string name = 1; // the layer name + optional string type = 2; // the layer type + repeated string bottom = 3; // the name of each bottom blob + repeated string top = 4; // the name of each top blob + + // The train / test phase for computation. + optional Phase phase = 10; + + // The amount of weight to assign each top blob in the objective. + // Each layer assigns a default value, usually of either 0 or 1, + // to each top blob. + repeated float loss_weight = 5; + + // Specifies training parameters (multipliers on global learning constants, + // and the name and other settings used for weight sharing). + repeated ParamSpec param = 6; + + // The blobs containing the numeric parameters of the layer. + repeated BlobProto blobs = 7; + + // Specifies on which bottoms the backpropagation should be skipped. + // The size must be either 0 or equal to the number of bottoms. + repeated bool propagate_down = 11; + + // Rules controlling whether and when a layer is included in the network, + // based on the current NetState. You may specify a non-zero number of rules + // to include OR exclude, but not both. If no include or exclude rules are + // specified, the layer is always included. If the current NetState meets + // ANY (i.e., one or more) of the specified rules, the layer is + // included/excluded. + repeated NetStateRule include = 8; + repeated NetStateRule exclude = 9; + + // Parameters for data pre-processing. + optional TransformationParameter transform_param = 100; + + // Parameters shared by loss layers. + optional LossParameter loss_param = 101; + + // Layer type-specific parameters. + // + // Note: certain layers may have more than one computational engine + // for their implementation. These layers include an Engine type and + // engine parameter for selecting the implementation. + // The default for the engine is set by the ENGINE switch at compile-time. + optional AccuracyParameter accuracy_param = 102; + optional ArgMaxParameter argmax_param = 103; + optional ConcatParameter concat_param = 104; + optional ContrastiveLossParameter contrastive_loss_param = 105; + optional ConvolutionParameter convolution_param = 106; + optional DataParameter data_param = 107; + optional DropoutParameter dropout_param = 108; + optional DummyDataParameter dummy_data_param = 109; + optional EltwiseParameter eltwise_param = 110; + optional ExpParameter exp_param = 111; + optional FlattenParameter flatten_param = 135; + optional HDF5DataParameter hdf5_data_param = 112; + optional HDF5OutputParameter hdf5_output_param = 113; + optional HingeLossParameter hinge_loss_param = 114; + optional ImageDataParameter image_data_param = 115; + optional InfogainLossParameter infogain_loss_param = 116; + optional InnerProductParameter inner_product_param = 117; + optional LogParameter log_param = 134; + optional LRNParameter lrn_param = 118; + optional MemoryDataParameter memory_data_param = 119; + optional MVNParameter mvn_param = 120; + optional PoolingParameter pooling_param = 121; + optional PowerParameter power_param = 122; + optional PReLUParameter prelu_param = 131; + optional PythonParameter python_param = 130; + optional ReductionParameter reduction_param = 136; + optional ReLUParameter relu_param = 123; + optional ReshapeParameter reshape_param = 133; + optional SigmoidParameter sigmoid_param = 124; + optional SoftmaxParameter softmax_param = 125; + optional SPPParameter spp_param = 132; + optional SliceParameter slice_param = 126; + optional TanHParameter tanh_param = 127; + optional ThresholdParameter threshold_param = 128; + optional WindowDataParameter window_data_param = 129; + optional CConvolutionParameter cconvolution_param = 140; + optional CInnerProductParameter cinner_product_param = 141; +} + +// Message that stores parameters used to apply transformation +// to the data layer's data +message TransformationParameter { + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 1 [default = 1]; + // Specify if we want to randomly mirror data. + optional bool mirror = 2 [default = false]; + // Specify if we would like to randomly crop an image. + optional uint32 crop_size = 3 [default = 0]; + // mean_file and mean_value cannot be specified at the same time + optional string mean_file = 4; + // if specified can be repeated once (would substract it from all the channels) + // or can be repeated the same number of times as channels + // (would subtract them from the corresponding channel) + repeated float mean_value = 5; + // Force the decoded image to have 3 color channels. + optional bool force_color = 6 [default = false]; + // Force the decoded image to have 1 color channels. + optional bool force_gray = 7 [default = false]; +} + +// Message that stores parameters shared by loss layers +message LossParameter { + // If specified, ignore instances with the given label. + optional int32 ignore_label = 1; + // If true, normalize each batch across all instances (including spatial + // dimesions, but not ignored instances); else, divide by batch size only. + optional bool normalize = 2 [default = true]; +} + +// Messages that store parameters used by individual layer types follow, in +// alphabetical order. + +message AccuracyParameter { + // When computing accuracy, count as correct by comparing the true label to + // the top k scoring classes. By default, only compare to the top scoring + // class (i.e. argmax). + optional uint32 top_k = 1 [default = 1]; + + // The "label" axis of the prediction blob, whose argmax corresponds to the + // predicted label -- may be negative to index from the end (e.g., -1 for the + // last axis). For example, if axis == 1 and the predictions are + // (N x C x H x W), the label blob is expected to contain N*H*W ground truth + // labels with integer values in {0, 1, ..., C-1}. + optional int32 axis = 2 [default = 1]; + + // If specified, ignore instances with the given label. + optional int32 ignore_label = 3; +} + +message ArgMaxParameter { + // If true produce pairs (argmax, maxval) + optional bool out_max_val = 1 [default = false]; + optional uint32 top_k = 2 [default = 1]; +} + +message ConcatParameter { + // The axis along which to concatenate -- may be negative to index from the + // end (e.g., -1 for the last axis). Other axes must have the + // same dimension for all the bottom blobs. + // By default, ConcatLayer concatenates blobs along the "channels" axis (1). + optional int32 axis = 2 [default = 1]; + + // DEPRECATED: alias for "axis" -- does not support negative indexing. + optional uint32 concat_dim = 1 [default = 1]; +} + +message ContrastiveLossParameter { + // margin for dissimilar pair + optional float margin = 1 [default = 1.0]; + // The first implementation of this cost did not exactly match the cost of + // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. + // legacy_version = false (the default) uses (margin - d)^2 as proposed in the + // Hadsell paper. New models should probably use this version. + // legacy_version = true uses (margin - d^2). This is kept to support / + // reproduce existing models and results + optional bool legacy_version = 2 [default = false]; +} + +message ConvolutionParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + optional bool bias_term = 2 [default = true]; // whether to have bias terms + // Pad, kernel size, and stride are all given as a single value for equal + // dimensions in height and width or as Y, X pairs. + optional uint32 pad = 3 [default = 0]; // The padding size (equal in Y, X) + optional uint32 pad_h = 9 [default = 0]; // The padding height + optional uint32 pad_w = 10 [default = 0]; // The padding width + optional uint32 kernel_size = 4; // The kernel size (square) + optional uint32 kernel_h = 11; // The kernel height + optional uint32 kernel_w = 12; // The kernel width + optional uint32 group = 5 [default = 1]; // The group size for group conv + optional uint32 stride = 6 [default = 1]; // The stride (equal in Y, X) + optional uint32 stride_h = 13; // The stride height + optional uint32 stride_w = 14; // The stride width + optional FillerParameter weight_filler = 7; // The filler for the weight + optional FillerParameter bias_filler = 8; // The filler for the bias + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 15 [default = DEFAULT]; +} + +message CConvolutionParameter { + optional float gamma = 1 [default = 0.001]; // The compress parameter of current layer + optional float power = 2 [default = 1]; + optional float iter_stop = 3 [default = 10000]; + optional float c_rate = 4 [default = 3]; + optional FillerParameter weight_mask_filler = 5; // The filler for the weight + optional FillerParameter bias_mask_filler = 6; // The filler for the bias +} + +message DataParameter { + enum DB { + LEVELDB = 0; + LMDB = 1; + } + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 4; + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the database. + // DEPRECATED. Each solver accesses a different subset of the database. + optional uint32 rand_skip = 7 [default = 0]; + optional DB backend = 8 [default = LEVELDB]; + // DEPRECATED. See TransformationParameter. For data pre-processing, we can do + // simple scaling and subtracting the data mean, if provided. Note that the + // mean subtraction is always carried out before scaling. + optional float scale = 2 [default = 1]; + optional string mean_file = 3; + // DEPRECATED. See TransformationParameter. Specify if we would like to randomly + // crop an image. + optional uint32 crop_size = 5 [default = 0]; + // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror + // data. + optional bool mirror = 6 [default = false]; + // Force the encoded image to have 3 color channels + optional bool force_encoded_color = 9 [default = false]; + // Prefetch queue (Number of batches to prefetch to host memory, increase if + // data access bandwidth varies). + optional uint32 prefetch = 10 [default = 4]; +} + +message DropoutParameter { + optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio +} + +// DummyDataLayer fills any number of arbitrarily shaped blobs with random +// (or constant) data generated by "Fillers" (see "message FillerParameter"). +message DummyDataParameter { + // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N + // shape fields, and 0, 1 or N data_fillers. + // + // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used. + // If 1 data_filler is specified, it is applied to all top blobs. If N are + // specified, the ith is applied to the ith top blob. + repeated FillerParameter data_filler = 1; + repeated BlobShape shape = 6; + + // 4D dimensions -- deprecated. Use "shape" instead. + repeated uint32 num = 2; + repeated uint32 channels = 3; + repeated uint32 height = 4; + repeated uint32 width = 5; +} + +message EltwiseParameter { + enum EltwiseOp { + PROD = 0; + SUM = 1; + MAX = 2; + } + optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation + repeated float coeff = 2; // blob-wise coefficient for SUM operation + + // Whether to use an asymptotically slower (for >2 inputs) but stabler method + // of computing the gradient for the PROD operation. (No effect for SUM op.) + optional bool stable_prod_grad = 3 [default = true]; +} + +message ExpParameter { + // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. + // Or if base is set to the default (-1), base is set to e, + // so y = exp(shift + scale * x). + optional float base = 1 [default = -1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + +/// Message that stores parameters used by FlattenLayer +message FlattenParameter { + // The first axis to flatten: all preceding axes are retained in the output. + // May be negative to index from the end (e.g., -1 for the last axis). + optional int32 axis = 1 [default = 1]; + + // The last axis to flatten: all following axes are retained in the output. + // May be negative to index from the end (e.g., the default -1 for the last + // axis). + optional int32 end_axis = 2 [default = -1]; +} + +// Message that stores parameters used by HDF5DataLayer +message HDF5DataParameter { + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 2; + + // Specify whether to shuffle the data. + // If shuffle == true, the ordering of the HDF5 files is shuffled, + // and the ordering of data within any given HDF5 file is shuffled, + // but data between different files are not interleaved; all of a file's + // data are output (in a random order) before moving onto another file. + optional bool shuffle = 3 [default = false]; +} + +message HDF5OutputParameter { + optional string file_name = 1; +} + +message HingeLossParameter { + enum Norm { + L1 = 1; + L2 = 2; + } + // Specify the Norm to use L1 or L2 + optional Norm norm = 1 [default = L1]; +} + +message ImageDataParameter { + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 4 [default = 1]; + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the database. + optional uint32 rand_skip = 7 [default = 0]; + // Whether or not ImageLayer should shuffle the list of files at every epoch. + optional bool shuffle = 8 [default = false]; + // It will also resize images if new_height or new_width are not zero. + optional uint32 new_height = 9 [default = 0]; + optional uint32 new_width = 10 [default = 0]; + // Specify if the images are color or gray + optional bool is_color = 11 [default = true]; + // DEPRECATED. See TransformationParameter. For data pre-processing, we can do + // simple scaling and subtracting the data mean, if provided. Note that the + // mean subtraction is always carried out before scaling. + optional float scale = 2 [default = 1]; + optional string mean_file = 3; + // DEPRECATED. See TransformationParameter. Specify if we would like to randomly + // crop an image. + optional uint32 crop_size = 5 [default = 0]; + // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror + // data. + optional bool mirror = 6 [default = false]; + optional string root_folder = 12 [default = ""]; +} + +message InfogainLossParameter { + // Specify the infogain matrix source. + optional string source = 1; +} + +message InnerProductParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + optional bool bias_term = 2 [default = true]; // whether to have bias terms + optional FillerParameter weight_filler = 3; // The filler for the weight + optional FillerParameter bias_filler = 4; // The filler for the bias + + // The first axis to be lumped into a single inner product computation; + // all preceding axes are retained in the output. + // May be negative to index from the end (e.g., -1 for the last axis). + optional int32 axis = 5 [default = 1]; +} + +message CInnerProductParameter { + optional float gamma = 1 [default = 0.001]; // The compress parameter of current layer + optional float power = 2 [default = 1]; + optional float iter_stop = 3 [default = 8000]; + optional float c_rate = 4 [default = 3]; + optional FillerParameter weight_mask_filler = 5; // The filler for the weight + optional FillerParameter bias_mask_filler = 6; // The filler for the bias +} + +// Message that stores parameters used by LogLayer +message LogParameter { + // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. + // Or if base is set to the default (-1), base is set to e, + // so y = ln(shift + scale * x) = log_e(shift + scale * x) + optional float base = 1 [default = -1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + +// Message that stores parameters used by LRNLayer +message LRNParameter { + optional uint32 local_size = 1 [default = 5]; + optional float alpha = 2 [default = 1.]; + optional float beta = 3 [default = 0.75]; + enum NormRegion { + ACROSS_CHANNELS = 0; + WITHIN_CHANNEL = 1; + } + optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS]; + optional float k = 5 [default = 1.]; +} + +message MemoryDataParameter { + optional uint32 batch_size = 1; + optional uint32 channels = 2; + optional uint32 height = 3; + optional uint32 width = 4; +} + +message MVNParameter { + // This parameter can be set to false to normalize mean only + optional bool normalize_variance = 1 [default = true]; + + // This parameter can be set to true to perform DNN-like MVN + optional bool across_channels = 2 [default = false]; + + // Epsilon for not dividing by zero while normalizing variance + optional float eps = 3 [default = 1e-9]; +} + +message PoolingParameter { + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional PoolMethod pool = 1 [default = MAX]; // The pooling method + // Pad, kernel size, and stride are all given as a single value for equal + // dimensions in height and width or as Y, X pairs. + optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X) + optional uint32 pad_h = 9 [default = 0]; // The padding height + optional uint32 pad_w = 10 [default = 0]; // The padding width + optional uint32 kernel_size = 2; // The kernel size (square) + optional uint32 kernel_h = 5; // The kernel height + optional uint32 kernel_w = 6; // The kernel width + optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X) + optional uint32 stride_h = 7; // The stride height + optional uint32 stride_w = 8; // The stride width + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 11 [default = DEFAULT]; + // If global_pooling then it will pool over the size of the bottom by doing + // kernel_h = bottom->height and kernel_w = bottom->width + optional bool global_pooling = 12 [default = false]; +} + +message PowerParameter { + // PowerLayer computes outputs y = (shift + scale * x) ^ power. + optional float power = 1 [default = 1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + +message PythonParameter { + optional string module = 1; + optional string layer = 2; + // This value is set to the attribute `param_str` of the `PythonLayer` object + // in Python before calling the `setup()` method. This could be a number, + // string, dictionary in Python dict format, JSON, etc. You may parse this + // string in `setup` method and use it in `forward` and `backward`. + optional string param_str = 3 [default = '']; + // Whether this PythonLayer is shared among worker solvers during data parallelism. + // If true, each worker solver sequentially run forward from this layer. + // This value should be set true if you are using it as a data layer. + optional bool share_in_parallel = 4 [default = false]; +} + +// Message that stores parameters used by ReductionLayer +message ReductionParameter { + enum ReductionOp { + SUM = 1; + ASUM = 2; + SUMSQ = 3; + MEAN = 4; + } + + optional ReductionOp operation = 1 [default = SUM]; // reduction operation + + // The first axis to reduce to a scalar -- may be negative to index from the + // end (e.g., -1 for the last axis). + // (Currently, only reduction along ALL "tail" axes is supported; reduction + // of axis M through N, where N < num_axes - 1, is unsupported.) + // Suppose we have an n-axis bottom Blob with shape: + // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). + // If axis == m, the output Blob will have shape + // (d0, d1, d2, ..., d(m-1)), + // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) + // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. + // If axis == 0 (the default), the output Blob always has the empty shape + // (count 1), performing reduction across the entire input -- + // often useful for creating new loss functions. + optional int32 axis = 2 [default = 0]; + + optional float coeff = 3 [default = 1.0]; // coefficient for output +} + +// Message that stores parameters used by ReLULayer +message ReLUParameter { + // Allow non-zero slope for negative inputs to speed up optimization + // Described in: + // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities + // improve neural network acoustic models. In ICML Workshop on Deep Learning + // for Audio, Speech, and Language Processing. + optional float negative_slope = 1 [default = 0]; + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 2 [default = DEFAULT]; +} + +message ReshapeParameter { + // Specify the output dimensions. If some of the dimensions are set to 0, + // the corresponding dimension from the bottom layer is used (unchanged). + // Exactly one dimension may be set to -1, in which case its value is + // inferred from the count of the bottom blob and the remaining dimensions. + // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: + // + // layer { + // type: "Reshape" bottom: "input" top: "output" + // reshape_param { ... } + // } + // + // If "input" is 2D with shape 2 x 8, then the following reshape_param + // specifications are all equivalent, producing a 3D blob "output" with shape + // 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } + // reshape_param { shape { dim: -1 dim: 0 dim: 2 } } + // + optional BlobShape shape = 1; + + // axis and num_axes control the portion of the bottom blob's shape that are + // replaced by (included in) the reshape. By default (axis == 0 and + // num_axes == -1), the entire bottom blob shape is included in the reshape, + // and hence the shape field must specify the entire output shape. + // + // axis may be non-zero to retain some portion of the beginning of the input + // shape (and may be negative to index from the end; e.g., -1 to begin the + // reshape after the last axis, including nothing in the reshape, + // -2 to include only the last axis, etc.). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are all equivalent, + // producing a blob "output" with shape 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } + // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } + // + // num_axes specifies the extent of the reshape. + // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on + // input axes in the range [axis, axis+num_axes]. + // num_axes may also be -1, the default, to include all remaining axes + // (starting from axis). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are equivalent, + // producing a blob "output" with shape 1 x 2 x 8. + // + // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } + // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } + // reshape_param { shape { dim: 1 } num_axes: 0 } + // + // On the other hand, these would produce output blob shape 2 x 1 x 8: + // + // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } + // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } + // + optional int32 axis = 2 [default = 0]; + optional int32 num_axes = 3 [default = -1]; +} + +message SigmoidParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 1 [default = DEFAULT]; +} + +message SliceParameter { + // The axis along which to slice -- may be negative to index from the end + // (e.g., -1 for the last axis). + // By default, SliceLayer concatenates blobs along the "channels" axis (1). + optional int32 axis = 3 [default = 1]; + repeated uint32 slice_point = 2; + + // DEPRECATED: alias for "axis" -- does not support negative indexing. + optional uint32 slice_dim = 1 [default = 1]; +} + +// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer +message SoftmaxParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 1 [default = DEFAULT]; + + // The axis along which to perform the softmax -- may be negative to index + // from the end (e.g., -1 for the last axis). + // Any other axes will be evaluated as independent softmaxes. + optional int32 axis = 2 [default = 1]; +} + +message TanHParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 1 [default = DEFAULT]; +} + +message ThresholdParameter { + optional float threshold = 1 [default = 0]; // Strictly positive values +} + +message WindowDataParameter { + // Specify the data source. + optional string source = 1; + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 2 [default = 1]; + optional string mean_file = 3; + // Specify the batch size. + optional uint32 batch_size = 4; + // Specify if we would like to randomly crop an image. + optional uint32 crop_size = 5 [default = 0]; + // Specify if we want to randomly mirror data. + optional bool mirror = 6 [default = false]; + // Foreground (object) overlap threshold + optional float fg_threshold = 7 [default = 0.5]; + // Background (non-object) overlap threshold + optional float bg_threshold = 8 [default = 0.5]; + // Fraction of batch that should be foreground objects + optional float fg_fraction = 9 [default = 0.25]; + // Amount of contextual padding to add around a window + // (used only by the window_data_layer) + optional uint32 context_pad = 10 [default = 0]; + // Mode for cropping out a detection window + // warp: cropped window is warped to a fixed size and aspect ratio + // square: the tightest square around the window is cropped + optional string crop_mode = 11 [default = "warp"]; + // cache_images: will load all images in memory for faster access + optional bool cache_images = 12 [default = false]; + // append root_folder to locate images + optional string root_folder = 13 [default = ""]; +} + +message SPPParameter { + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional uint32 pyramid_height = 1; + optional PoolMethod pool = 2 [default = MAX]; // The pooling method + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 6 [default = DEFAULT]; +} + +// DEPRECATED: use LayerParameter. +message V1LayerParameter { + repeated string bottom = 2; + repeated string top = 3; + optional string name = 4; + repeated NetStateRule include = 32; + repeated NetStateRule exclude = 33; + enum LayerType { + NONE = 0; + ABSVAL = 35; + ACCURACY = 1; + ARGMAX = 30; + BNLL = 2; + CONCAT = 3; + CONTRASTIVE_LOSS = 37; + CONVOLUTION = 4; + DATA = 5; + DECONVOLUTION = 39; + DROPOUT = 6; + DUMMY_DATA = 32; + EUCLIDEAN_LOSS = 7; + ELTWISE = 25; + EXP = 38; + FLATTEN = 8; + HDF5_DATA = 9; + HDF5_OUTPUT = 10; + HINGE_LOSS = 28; + IM2COL = 11; + IMAGE_DATA = 12; + INFOGAIN_LOSS = 13; + INNER_PRODUCT = 14; + LRN = 15; + MEMORY_DATA = 29; + MULTINOMIAL_LOGISTIC_LOSS = 16; + MVN = 34; + POOLING = 17; + POWER = 26; + RELU = 18; + SIGMOID = 19; + SIGMOID_CROSS_ENTROPY_LOSS = 27; + SILENCE = 36; + SOFTMAX = 20; + SOFTMAX_LOSS = 21; + SPLIT = 22; + SLICE = 33; + TANH = 23; + WINDOW_DATA = 24; + THRESHOLD = 31; + } + optional LayerType type = 5; + repeated BlobProto blobs = 6; + repeated string param = 1001; + repeated DimCheckMode blob_share_mode = 1002; + enum DimCheckMode { + STRICT = 0; + PERMISSIVE = 1; + } + repeated float blobs_lr = 7; + repeated float weight_decay = 8; + repeated float loss_weight = 35; + optional AccuracyParameter accuracy_param = 27; + optional ArgMaxParameter argmax_param = 23; + optional ConcatParameter concat_param = 9; + optional ContrastiveLossParameter contrastive_loss_param = 40; + optional ConvolutionParameter convolution_param = 10; + optional DataParameter data_param = 11; + optional DropoutParameter dropout_param = 12; + optional DummyDataParameter dummy_data_param = 26; + optional EltwiseParameter eltwise_param = 24; + optional ExpParameter exp_param = 41; + optional HDF5DataParameter hdf5_data_param = 13; + optional HDF5OutputParameter hdf5_output_param = 14; + optional HingeLossParameter hinge_loss_param = 29; + optional ImageDataParameter image_data_param = 15; + optional InfogainLossParameter infogain_loss_param = 16; + optional InnerProductParameter inner_product_param = 17; + optional LRNParameter lrn_param = 18; + optional MemoryDataParameter memory_data_param = 22; + optional MVNParameter mvn_param = 34; + optional PoolingParameter pooling_param = 19; + optional PowerParameter power_param = 21; + optional ReLUParameter relu_param = 30; + optional SigmoidParameter sigmoid_param = 38; + optional SoftmaxParameter softmax_param = 39; + optional SliceParameter slice_param = 31; + optional TanHParameter tanh_param = 37; + optional ThresholdParameter threshold_param = 25; + optional WindowDataParameter window_data_param = 20; + optional TransformationParameter transform_param = 36; + optional LossParameter loss_param = 42; + optional V0LayerParameter layer = 1; +} + +// DEPRECATED: V0LayerParameter is the old way of specifying layer parameters +// in Caffe. We keep this message type around for legacy support. +message V0LayerParameter { + optional string name = 1; // the layer name + optional string type = 2; // the string to specify the layer type + + // Parameters to specify layers with inner products. + optional uint32 num_output = 3; // The number of outputs for the layer + optional bool biasterm = 4 [default = true]; // whether to have bias terms + optional FillerParameter weight_filler = 5; // The filler for the weight + optional FillerParameter bias_filler = 6; // The filler for the bias + + optional uint32 pad = 7 [default = 0]; // The padding size + optional uint32 kernelsize = 8; // The kernel size + optional uint32 group = 9 [default = 1]; // The group size for group conv + optional uint32 stride = 10 [default = 1]; // The stride + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional PoolMethod pool = 11 [default = MAX]; // The pooling method + optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio + + optional uint32 local_size = 13 [default = 5]; // for local response norm + optional float alpha = 14 [default = 1.]; // for local response norm + optional float beta = 15 [default = 0.75]; // for local response norm + optional float k = 22 [default = 1.]; + + // For data layers, specify the data source + optional string source = 16; + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 17 [default = 1]; + optional string meanfile = 18; + // For data layers, specify the batch size. + optional uint32 batchsize = 19; + // For data layers, specify if we would like to randomly crop an image. + optional uint32 cropsize = 20 [default = 0]; + // For data layers, specify if we want to randomly mirror data. + optional bool mirror = 21 [default = false]; + + // The blobs containing the numeric parameters of the layer + repeated BlobProto blobs = 50; + // The ratio that is multiplied on the global learning rate. If you want to + // set the learning ratio for one blob, you need to set it for all blobs. + repeated float blobs_lr = 51; + // The weight decay that is multiplied on the global weight decay. + repeated float weight_decay = 52; + + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the database. + optional uint32 rand_skip = 53 [default = 0]; + + // Fields related to detection (det_*) + // foreground (object) overlap threshold + optional float det_fg_threshold = 54 [default = 0.5]; + // background (non-object) overlap threshold + optional float det_bg_threshold = 55 [default = 0.5]; + // Fraction of batch that should be foreground objects + optional float det_fg_fraction = 56 [default = 0.25]; + + // optional bool OBSOLETE_can_clobber = 57 [default = true]; + + // Amount of contextual padding to add around a window + // (used only by the window_data_layer) + optional uint32 det_context_pad = 58 [default = 0]; + + // Mode for cropping out a detection window + // warp: cropped window is warped to a fixed size and aspect ratio + // square: the tightest square around the window is cropped + optional string det_crop_mode = 59 [default = "warp"]; + + // For ReshapeLayer, one needs to specify the new dimensions. + optional int32 new_num = 60 [default = 0]; + optional int32 new_channels = 61 [default = 0]; + optional int32 new_height = 62 [default = 0]; + optional int32 new_width = 63 [default = 0]; + + // Whether or not ImageLayer should shuffle the list of files at every epoch. + // It will also resize images if new_height or new_width are not zero. + optional bool shuffle_images = 64 [default = false]; + + // For ConcatLayer, one needs to specify the dimension for concatenation, and + // the other dimensions must be the same for all the bottom blobs. + // By default it will concatenate blobs along the channels dimension. + optional uint32 concat_dim = 65 [default = 1]; + + optional HDF5OutputParameter hdf5_output_param = 1001; +} + +message PReLUParameter { + // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: + // Surpassing Human-Level Performance on ImageNet Classification, 2015. + + // Initial value of a_i. Default is a_i=0.25 for all i. + optional FillerParameter filler = 1; + // Whether or not slope paramters are shared across channels. + optional bool channel_shared = 2 [default = false]; +} + diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp new file mode 100755 index 0000000..fa73376 --- /dev/null +++ b/src/caffe/solver.cpp @@ -0,0 +1,1232 @@ +#include + +#include +#include +#include + +#include "hdf5.h" +#include "hdf5_hl.h" + +#include "caffe/net.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/solver.hpp" +#include "caffe/util/hdf5.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/upgrade_proto.hpp" + +namespace caffe { + +template +Solver::Solver(const SolverParameter& param, const Solver* root_solver) + : net_(), callbacks_(), root_solver_(root_solver) { + Init(param); +} + +template +Solver::Solver(const string& param_file, const Solver* root_solver) + : net_(), callbacks_(), root_solver_(root_solver) { + SolverParameter param; + ReadProtoFromTextFileOrDie(param_file, ¶m); + Init(param); +} + +template +void Solver::Init(const SolverParameter& param) { + CHECK(Caffe::root_solver() || root_solver_) + << "root_solver_ needs to be set for all non-root solvers"; + LOG_IF(INFO, Caffe::root_solver()) << "Initializing solver from parameters: " + << std::endl << param.DebugString(); + param_ = param; + CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; + if (Caffe::root_solver() && param_.random_seed() >= 0) { + Caffe::set_random_seed(param_.random_seed()); + } + // Scaffolding code + InitTrainNet(); + if (Caffe::root_solver()) { + InitTestNets(); + LOG(INFO) << "Solver scaffolding done."; + } + iter_ = 0; + current_step_ = 0; +} + +template +void Solver::InitTrainNet() { + const int num_train_nets = param_.has_net() + param_.has_net_param() + + param_.has_train_net() + param_.has_train_net_param(); + const string& field_names = "net, net_param, train_net, train_net_param"; + CHECK_GE(num_train_nets, 1) << "SolverParameter must specify a train net " + << "using one of these fields: " << field_names; + CHECK_LE(num_train_nets, 1) << "SolverParameter must not contain more than " + << "one of these fields specifying a train_net: " << field_names; + NetParameter net_param; + if (param_.has_train_net_param()) { + LOG_IF(INFO, Caffe::root_solver()) + << "Creating training net specified in train_net_param."; + net_param.CopyFrom(param_.train_net_param()); + } else if (param_.has_train_net()) { + LOG_IF(INFO, Caffe::root_solver()) + << "Creating training net from train_net file: " << param_.train_net(); + ReadNetParamsFromTextFileOrDie(param_.train_net(), &net_param); + } + if (param_.has_net_param()) { + LOG_IF(INFO, Caffe::root_solver()) + << "Creating training net specified in net_param."; + net_param.CopyFrom(param_.net_param()); + } + if (param_.has_net()) { + LOG_IF(INFO, Caffe::root_solver()) + << "Creating training net from net file: " << param_.net(); + ReadNetParamsFromTextFileOrDie(param_.net(), &net_param); + } + // Set the correct NetState. We start with the solver defaults (lowest + // precedence); then, merge in any NetState specified by the net_param itself; + // finally, merge in any NetState specified by the train_state (highest + // precedence). + NetState net_state; + net_state.set_phase(TRAIN); + net_state.MergeFrom(net_param.state()); + net_state.MergeFrom(param_.train_state()); + net_param.mutable_state()->CopyFrom(net_state); + if (Caffe::root_solver()) { + net_.reset(new Net(net_param)); + } else { + net_.reset(new Net(net_param, root_solver_->net_.get())); + } +} + +template +void Solver::InitTestNets() { + CHECK(Caffe::root_solver()); + const bool has_net_param = param_.has_net_param(); + const bool has_net_file = param_.has_net(); + const int num_generic_nets = has_net_param + has_net_file; + CHECK_LE(num_generic_nets, 1) + << "Both net_param and net_file may not be specified."; + const int num_test_net_params = param_.test_net_param_size(); + const int num_test_net_files = param_.test_net_size(); + const int num_test_nets = num_test_net_params + num_test_net_files; + if (num_generic_nets) { + CHECK_GE(param_.test_iter_size(), num_test_nets) + << "test_iter must be specified for each test network."; + } else { + CHECK_EQ(param_.test_iter_size(), num_test_nets) + << "test_iter must be specified for each test network."; + } + // If we have a generic net (specified by net or net_param, rather than + // test_net or test_net_param), we may have an unlimited number of actual + // test networks -- the actual number is given by the number of remaining + // test_iters after any test nets specified by test_net_param and/or test_net + // are evaluated. + const int num_generic_net_instances = param_.test_iter_size() - num_test_nets; + const int num_test_net_instances = num_test_nets + num_generic_net_instances; + if (param_.test_state_size()) { + CHECK_EQ(param_.test_state_size(), num_test_net_instances) + << "test_state must be unspecified or specified once per test net."; + } + if (num_test_net_instances) { + CHECK_GT(param_.test_interval(), 0); + } + int test_net_id = 0; + vector sources(num_test_net_instances); + vector net_params(num_test_net_instances); + for (int i = 0; i < num_test_net_params; ++i, ++test_net_id) { + sources[test_net_id] = "test_net_param"; + net_params[test_net_id].CopyFrom(param_.test_net_param(i)); + } + for (int i = 0; i < num_test_net_files; ++i, ++test_net_id) { + sources[test_net_id] = "test_net file: " + param_.test_net(i); + ReadNetParamsFromTextFileOrDie(param_.test_net(i), + &net_params[test_net_id]); + } + const int remaining_test_nets = param_.test_iter_size() - test_net_id; + if (has_net_param) { + for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { + sources[test_net_id] = "net_param"; + net_params[test_net_id].CopyFrom(param_.net_param()); + } + } + if (has_net_file) { + for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { + sources[test_net_id] = "net file: " + param_.net(); + ReadNetParamsFromTextFileOrDie(param_.net(), &net_params[test_net_id]); + } + } + test_nets_.resize(num_test_net_instances); + for (int i = 0; i < num_test_net_instances; ++i) { + // Set the correct NetState. We start with the solver defaults (lowest + // precedence); then, merge in any NetState specified by the net_param + // itself; finally, merge in any NetState specified by the test_state + // (highest precedence). + NetState net_state; + net_state.set_phase(TEST); + net_state.MergeFrom(net_params[i].state()); + if (param_.test_state_size()) { + net_state.MergeFrom(param_.test_state(i)); + } + net_params[i].mutable_state()->CopyFrom(net_state); + LOG(INFO) + << "Creating test net (#" << i << ") specified by " << sources[i]; + if (Caffe::root_solver()) { + test_nets_[i].reset(new Net(net_params[i])); + } else { + test_nets_[i].reset(new Net(net_params[i], + root_solver_->test_nets_[i].get())); + } + test_nets_[i]->set_debug_info(param_.debug_info()); + } +} + +template +void Solver::Step(int iters) { + vector*> bottom_vec; + const int start_iter = iter_; + const int stop_iter = iter_ + iters; + int average_loss = this->param_.average_loss(); + vector losses; + Dtype smoothed_loss = 0; + + while (iter_ < stop_iter) { + // zero-init the params + net_->ClearParamDiffs(); + if (param_.test_interval() && iter_ % param_.test_interval() == 0 + && (iter_ > 0 || param_.test_initialization()) + && Caffe::root_solver()) { + TestAll(); + } + + for (int i = 0; i < callbacks_.size(); ++i) { + callbacks_[i]->on_start(); + } + const bool display = param_.display() && iter_ % param_.display() == 0; + net_->set_debug_info(display && param_.debug_info()); + + // Set current iteration number for dynamic network surgery + net_->set_current_iter_num(iter_); + + // accumulate the loss and gradient + Dtype loss = 0; + for (int i = 0; i < param_.iter_size(); ++i) { + loss += net_->ForwardBackward(bottom_vec); + } + loss /= param_.iter_size(); + // average the loss across iterations for smoothed reporting + if (losses.size() < average_loss) { + losses.push_back(loss); + int size = losses.size(); + smoothed_loss = (smoothed_loss * (size - 1) + loss) / size; + } else { + int idx = (iter_ - start_iter) % average_loss; + smoothed_loss += (loss - losses[idx]) / average_loss; + losses[idx] = loss; + } + if (display) { + LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_ + << ", loss = " << smoothed_loss; + const vector*>& result = net_->output_blobs(); + int score_index = 0; + for (int j = 0; j < result.size(); ++j) { + const Dtype* result_vec = result[j]->cpu_data(); + const string& output_name = + net_->blob_names()[net_->output_blob_indices()[j]]; + const Dtype loss_weight = + net_->blob_loss_weights()[net_->output_blob_indices()[j]]; + for (int k = 0; k < result[j]->count(); ++k) { + ostringstream loss_msg_stream; + if (loss_weight) { + loss_msg_stream << " (* " << loss_weight + << " = " << loss_weight * result_vec[k] << " loss)"; + } + LOG_IF(INFO, Caffe::root_solver()) << " Train net output #" + << score_index++ << ": " << output_name << " = " + << result_vec[k] << loss_msg_stream.str(); + } + } + } + for (int i = 0; i < callbacks_.size(); ++i) { + callbacks_[i]->on_gradients_ready(); + } + ApplyUpdate(); + + // Increment the internal iter_ counter -- its value should always indicate + // the number of times the weights have been updated. + ++iter_; + + // Save a snapshot if needed. + if (param_.snapshot() + && iter_ % param_.snapshot() == 0 + && Caffe::root_solver()) { + Snapshot(); + } + } +} + +template +void Solver::Solve(const char* resume_file) { + CHECK(Caffe::root_solver()); + LOG(INFO) << "Solving " << net_->name(); + LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy(); + + if (resume_file) { + LOG(INFO) << "Restoring previous solver status from " << resume_file; + Restore(resume_file); + } + + // For a network that is trained by the solver, no bottom or top vecs + // should be given, and we will just provide dummy vecs. + Step(param_.max_iter() - iter_); + // If we haven't already, save a snapshot after optimization, unless + // overridden by setting snapshot_after_train := false + if (param_.snapshot_after_train() + && (!param_.snapshot() || iter_ % param_.snapshot() != 0)) { + Snapshot(); + } + // After the optimization is done, run an additional train and test pass to + // display the train and test loss/outputs if appropriate (based on the + // display and test_interval settings, respectively). Unlike in the rest of + // training, for the train net we only run a forward pass as we've already + // updated the parameters "max_iter" times -- this final pass is only done to + // display the loss, which is computed in the forward pass. + if (param_.display() && iter_ % param_.display() == 0) { + Dtype loss; + net_->ForwardPrefilled(&loss); + LOG(INFO) << "Iteration " << iter_ << ", loss = " << loss; + } + if (param_.test_interval() && iter_ % param_.test_interval() == 0) { + TestAll(); + } + LOG(INFO) << "Optimization Done."; +} + + +template +void Solver::TestAll() { + for (int test_net_id = 0; test_net_id < test_nets_.size(); ++test_net_id) { + Test(test_net_id); + } +} + +template +void Solver::Test(const int test_net_id) { + CHECK(Caffe::root_solver()); + LOG(INFO) << "Iteration " << iter_ + << ", Testing net (#" << test_net_id << ")"; + CHECK_NOTNULL(test_nets_[test_net_id].get())-> + ShareTrainedLayersWith(net_.get()); + vector test_score; + vector test_score_output_id; + vector*> bottom_vec; + const shared_ptr >& test_net = test_nets_[test_net_id]; + Dtype loss = 0; + for (int i = 0; i < param_.test_iter(test_net_id); ++i) { + Dtype iter_loss; + const vector*>& result = + test_net->Forward(bottom_vec, &iter_loss); + if (param_.test_compute_loss()) { + loss += iter_loss; + } + if (i == 0) { + for (int j = 0; j < result.size(); ++j) { + const Dtype* result_vec = result[j]->cpu_data(); + for (int k = 0; k < result[j]->count(); ++k) { + test_score.push_back(result_vec[k]); + test_score_output_id.push_back(j); + } + } + } else { + int idx = 0; + for (int j = 0; j < result.size(); ++j) { + const Dtype* result_vec = result[j]->cpu_data(); + for (int k = 0; k < result[j]->count(); ++k) { + test_score[idx++] += result_vec[k]; + } + } + } + } + if (param_.test_compute_loss()) { + loss /= param_.test_iter(test_net_id); + LOG(INFO) << "Test loss: " << loss; + } + for (int i = 0; i < test_score.size(); ++i) { + const int output_blob_index = + test_net->output_blob_indices()[test_score_output_id[i]]; + const string& output_name = test_net->blob_names()[output_blob_index]; + const Dtype loss_weight = test_net->blob_loss_weights()[output_blob_index]; + ostringstream loss_msg_stream; + const Dtype mean_score = test_score[i] / param_.test_iter(test_net_id); + if (loss_weight) { + loss_msg_stream << " (* " << loss_weight + << " = " << loss_weight * mean_score << " loss)"; + } + LOG(INFO) << " Test net output #" << i << ": " << output_name << " = " + << mean_score << loss_msg_stream.str(); + } +} + + +template +void Solver::Snapshot() { + CHECK(Caffe::root_solver()); + string model_filename; + switch (param_.snapshot_format()) { + case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: + model_filename = SnapshotToBinaryProto(); + break; + case caffe::SolverParameter_SnapshotFormat_HDF5: + model_filename = SnapshotToHDF5(); + break; + default: + LOG(FATAL) << "Unsupported snapshot format."; + } + + SnapshotSolverState(model_filename); +} + +template +string Solver::SnapshotFilename(const string extension) { + string filename(param_.snapshot_prefix()); + const int kBufferSize = 20; + char iter_str_buffer[kBufferSize]; + snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); + return filename + iter_str_buffer + extension; +} + +template +string Solver::SnapshotToBinaryProto() { + string model_filename = SnapshotFilename(".caffemodel"); + LOG(INFO) << "Snapshotting to binary proto file " << model_filename; + NetParameter net_param; + net_->ToProto(&net_param, param_.snapshot_diff()); + WriteProtoToBinaryFile(net_param, model_filename); + return model_filename; +} + +template +string Solver::SnapshotToHDF5() { + string model_filename = SnapshotFilename(".caffemodel.h5"); + LOG(INFO) << "Snapshotting to HDF5 file " << model_filename; + net_->ToHDF5(model_filename, param_.snapshot_diff()); + return model_filename; +} + +template +void Solver::Restore(const char* state_file) { + CHECK(Caffe::root_solver()); + string state_filename(state_file); + if (state_filename.size() >= 3 && + state_filename.compare(state_filename.size() - 3, 3, ".h5") == 0) { + RestoreSolverStateFromHDF5(state_filename); + } else { + RestoreSolverStateFromBinaryProto(state_filename); + } +} + +// Return the current learning rate. The currently implemented learning rate +// policies are as follows: +// - fixed: always return base_lr. +// - step: return base_lr * gamma ^ (floor(iter / step)) +// - exp: return base_lr * gamma ^ iter +// - inv: return base_lr * (1 + gamma * iter) ^ (- power) +// - multistep: similar to step but it allows non uniform steps defined by +// stepvalue +// - poly: the effective learning rate follows a polynomial decay, to be +// zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) +// - sigmoid: the effective learning rate follows a sigmod decay +// return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) +// +// where base_lr, max_iter, gamma, step, stepvalue and power are defined +// in the solver parameter protocol buffer, and iter is the current iteration. +template +Dtype SGDSolver::GetLearningRate() { + Dtype rate; + const string& lr_policy = this->param_.lr_policy(); + if (lr_policy == "fixed") { + rate = this->param_.base_lr(); + } else if (lr_policy == "step") { + this->current_step_ = this->iter_ / this->param_.stepsize(); + rate = this->param_.base_lr() * + pow(this->param_.gamma(), this->current_step_); + } else if (lr_policy == "exp") { + rate = this->param_.base_lr() * pow(this->param_.gamma(), this->iter_); + } else if (lr_policy == "inv") { + rate = this->param_.base_lr() * + pow(Dtype(1) + this->param_.gamma() * this->iter_, + - this->param_.power()); + } else if (lr_policy == "multistep") { + if (this->current_step_ < this->param_.stepvalue_size() && + this->iter_ >= this->param_.stepvalue(this->current_step_)) { + this->current_step_++; + LOG(INFO) << "MultiStep Status: Iteration " << + this->iter_ << ", step = " << this->current_step_; + } + rate = this->param_.base_lr() * + pow(this->param_.gamma(), this->current_step_); + } else if (lr_policy == "poly") { + rate = this->param_.base_lr() * pow(Dtype(1.) - + (Dtype(this->iter_) / Dtype(this->param_.max_iter())), + this->param_.power()); + } else if (lr_policy == "sigmoid") { + rate = this->param_.base_lr() * (Dtype(1.) / + (Dtype(1.) + exp(-this->param_.gamma() * (Dtype(this->iter_) - + Dtype(this->param_.stepsize()))))); + } else { + LOG(FATAL) << "Unknown learning rate policy: " << lr_policy; + } + return rate; +} + +template +void SGDSolver::PreSolve() { + // Initialize the history + const vector*>& net_params = this->net_->learnable_params(); + history_.clear(); + update_.clear(); + temp_.clear(); + for (int i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + history_.push_back(shared_ptr >(new Blob(shape))); + update_.push_back(shared_ptr >(new Blob(shape))); + temp_.push_back(shared_ptr >(new Blob(shape))); + } +} + +template +void SGDSolver::ClipGradients() { + const Dtype clip_gradients = this->param_.clip_gradients(); + if (clip_gradients < 0) { return; } + const vector*>& net_params = this->net_->learnable_params(); + Dtype sumsq_diff = 0; + for (int i = 0; i < net_params.size(); ++i) { + sumsq_diff += net_params[i]->sumsq_diff(); + } + const Dtype l2norm_diff = std::sqrt(sumsq_diff); + if (l2norm_diff > clip_gradients) { + Dtype scale_factor = clip_gradients / l2norm_diff; + LOG(INFO) << "Gradient clipping: scaling down gradients (L2 norm " + << l2norm_diff << " > " << clip_gradients << ") " + << "by scale factor " << scale_factor; + for (int i = 0; i < net_params.size(); ++i) { + net_params[i]->scale_diff(scale_factor); + } + } +} + +template +void SGDSolver::ApplyUpdate() { + CHECK(Caffe::root_solver()); + Dtype rate = GetLearningRate(); + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + ClipGradients(); + for (int param_id = 0; param_id < this->net_->learnable_params().size(); + ++param_id) { + Normalize(param_id); + Regularize(param_id); + ComputeUpdateValue(param_id, rate); + } + this->net_->Update(); +} + +template +void SGDSolver::Normalize(int param_id) { + if (this->param_.iter_size() == 1) { return; } + // Scale gradient to counterbalance accumulation. + const vector*>& net_params = this->net_->learnable_params(); + const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); + switch (Caffe::mode()) { + case Caffe::CPU: { + caffe_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + caffe_gpu_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::Regularize(int param_id) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + switch (Caffe::mode()) { + case Caffe::CPU: { + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + // Compute the update to history, then copy it to the parameter diff. + switch (Caffe::mode()) { + case Caffe::CPU: { + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + caffe_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + history_[param_id]->mutable_gpu_data()); + caffe_copy(net_params[param_id]->count(), + history_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::SnapshotSolverState(const string& model_filename) { + switch (this->param_.snapshot_format()) { + case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: + SnapshotSolverStateToBinaryProto(model_filename); + break; + case caffe::SolverParameter_SnapshotFormat_HDF5: + SnapshotSolverStateToHDF5(model_filename); + break; + default: + LOG(FATAL) << "Unsupported snapshot format."; + } +} + +template +void SGDSolver::SnapshotSolverStateToBinaryProto( + const string& model_filename) { + SolverState state; + state.set_iter(this->iter_); + state.set_learned_net(model_filename); + state.set_current_step(this->current_step_); + state.clear_history(); + for (int i = 0; i < history_.size(); ++i) { + // Add history + BlobProto* history_blob = state.add_history(); + history_[i]->ToProto(history_blob); + } + string snapshot_filename = Solver::SnapshotFilename(".solverstate"); + LOG(INFO) + << "Snapshotting solver state to binary proto file" << snapshot_filename; + WriteProtoToBinaryFile(state, snapshot_filename.c_str()); +} + +template +void SGDSolver::SnapshotSolverStateToHDF5( + const string& model_filename) { + string snapshot_filename = + Solver::SnapshotFilename(".solverstate.h5"); + LOG(INFO) << "Snapshotting solver state to HDF5 file " << snapshot_filename; + hid_t file_hid = H5Fcreate(snapshot_filename.c_str(), H5F_ACC_TRUNC, + H5P_DEFAULT, H5P_DEFAULT); + CHECK_GE(file_hid, 0) + << "Couldn't open " << snapshot_filename << " to save solver state."; + hdf5_save_int(file_hid, "iter", this->iter_); + hdf5_save_string(file_hid, "learned_net", model_filename); + hdf5_save_int(file_hid, "current_step", this->current_step_); + hid_t history_hid = H5Gcreate2(file_hid, "history", H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK_GE(history_hid, 0) + << "Error saving solver state to " << snapshot_filename << "."; + for (int i = 0; i < history_.size(); ++i) { + ostringstream oss; + oss << i; + hdf5_save_nd_dataset(history_hid, oss.str(), *history_[i]); + } + H5Gclose(history_hid); + H5Fclose(file_hid); +} + +template +void SGDSolver::RestoreSolverStateFromBinaryProto( + const string& state_file) { + SolverState state; + ReadProtoFromBinaryFile(state_file, &state); + this->iter_ = state.iter(); + if (state.has_learned_net()) { + NetParameter net_param; + ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); + this->net_->CopyTrainedLayersFrom(net_param); + } + this->current_step_ = state.current_step(); + CHECK_EQ(state.history_size(), history_.size()) + << "Incorrect length of history blobs."; + LOG(INFO) << "SGDSolver: restoring history"; + for (int i = 0; i < history_.size(); ++i) { + history_[i]->FromProto(state.history(i)); + } +} + +template +void SGDSolver::RestoreSolverStateFromHDF5(const string& state_file) { + hid_t file_hid = H5Fopen(state_file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK_GE(file_hid, 0) << "Couldn't open solver state file " << state_file; + this->iter_ = hdf5_load_int(file_hid, "iter"); + if (H5LTfind_dataset(file_hid, "learned_net")) { + string learned_net = hdf5_load_string(file_hid, "learned_net"); + this->net_->CopyTrainedLayersFrom(learned_net); + } + this->current_step_ = hdf5_load_int(file_hid, "current_step"); + hid_t history_hid = H5Gopen2(file_hid, "history", H5P_DEFAULT); + CHECK_GE(history_hid, 0) << "Error reading history from " << state_file; + int state_history_size = hdf5_get_num_links(history_hid); + CHECK_EQ(state_history_size, history_.size()) + << "Incorrect length of history blobs."; + for (int i = 0; i < history_.size(); ++i) { + ostringstream oss; + oss << i; + hdf5_load_nd_dataset(history_hid, oss.str().c_str(), 0, + kMaxBlobAxes, history_[i].get()); + } + H5Gclose(history_hid); + H5Fclose(file_hid); +} + +template +void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { + CHECK(Caffe::root_solver()); + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + switch (Caffe::mode()) { + case Caffe::CPU: { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // compute update: step back then over step + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->cpu_data(), -momentum, + this->update_[param_id]->mutable_cpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // compute update: step back then over step + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->gpu_data(), -momentum, + this->update_[param_id]->mutable_gpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { + CHECK(Caffe::root_solver()); + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype delta = this->param_.delta(); + Dtype local_rate = rate * net_params_lr[param_id]; + switch (Caffe::mode()) { + case Caffe::CPU: { + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_add(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->history_[param_id]->mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_add(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->history_[param_id]->mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // scale and copy + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void RMSPropSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + + // get the learning rate + Dtype delta = this->param_.delta(); + Dtype rms_decay = this->param_.rms_decay(); + Dtype local_rate = rate * net_params_lr[param_id]; + + switch (Caffe::mode()) { + case Caffe::CPU: + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_cpu_axpby(net_params[param_id] -> count(), + Dtype(1-rms_decay), this->update_[param_id]->cpu_data(), + rms_decay, this->history_[param_id]-> mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); + break; + case Caffe::GPU: +#ifndef CPU_ONLY + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_axpby(net_params[param_id] -> count(), + Dtype(1-rms_decay), this->update_[param_id]->gpu_data(), + rms_decay, this->history_[param_id]-> mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void AdaDeltaSolver::AdaDeltaPreSolve() { + // Add the extra history entries for AdaDelta after those from + // SGDSolver::PreSolve + const vector*>& net_params = this->net_->learnable_params(); + for (int i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + this->history_.push_back( + shared_ptr >(new Blob(shape))); + } +} + +template +void AdaDeltaSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype delta = this->param_.delta(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + size_t update_history_offset = net_params.size(); + switch (Caffe::mode()) { + case Caffe::CPU: { + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history of gradients + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->cpu_data(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // add delta to history to guard against dividing by zero later + caffe_set(net_params[param_id]->count(), delta, + this->temp_[param_id]->mutable_cpu_data()); + + caffe_add(net_params[param_id]->count(), + this->temp_[param_id]->cpu_data(), + this->history_[update_history_offset + param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add(net_params[param_id]->count(), + this->temp_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + + // divide history of updates by history of gradients + caffe_div(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->temp_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // jointly compute the RMS of both for update and gradient history + caffe_powx(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + // compute the update + caffe_mul(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + + // compute square of update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history of updates + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->cpu_data(), momentum, + this->history_[update_history_offset + param_id]->mutable_cpu_data()); + + // apply learning rate + caffe_cpu_scale(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history of gradients + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->gpu_data(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // add delta to history to guard against dividing by zero later + caffe_gpu_set(net_params[param_id]->count(), delta, + this->temp_[param_id]->mutable_gpu_data()); + + caffe_gpu_add(net_params[param_id]->count(), + this->temp_[param_id]->gpu_data(), + this->history_[update_history_offset + param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add(net_params[param_id]->count(), + this->temp_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + + // divide history of updates by history of gradients + caffe_gpu_div(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->temp_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // jointly compute the RMS of both for update and gradient history + caffe_gpu_powx(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + // compute the update and copy to net_diff + caffe_gpu_mul(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + + // compute square of update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history of updates + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->gpu_data(), momentum, + this->history_[update_history_offset + param_id]->mutable_gpu_data()); + + // apply learning rate + caffe_gpu_scale(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void AdamSolver::AdamPreSolve() { + // Add the extra history entries for Adam after those from + // SGDSolver::PreSolve + const vector*>& net_params = this->net_->learnable_params(); + for (int i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + this->history_.push_back( + shared_ptr >(new Blob(shape))); + } +} + +template +void AdamSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype local_rate = rate * net_params_lr[param_id]; + const Dtype beta1 = this->param_.momentum(); + const Dtype beta2 = this->param_.momentum2(); + + // we create aliases for convenience + size_t update_history_offset = net_params.size(); + Blob* val_m = this->history_[param_id].get(); + Blob* val_v = this->history_[param_id + update_history_offset].get(); + Blob* val_t = this->temp_[param_id].get(); + + const int t = this->iter_ + 1; + const Dtype correction = std::sqrt(Dtype(1) - pow(beta2, t)) / + (Dtype(1.) - pow(beta1, t)); + const int N = net_params[param_id]->count(); + const Dtype eps_hat = this->param_.delta(); + + switch (Caffe::mode()) { + case Caffe::CPU: { + // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t + caffe_cpu_axpby(N, Dtype(1)-beta1, + net_params[param_id]->cpu_diff(), beta1, + val_m->mutable_cpu_data()); + + // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 + caffe_mul(N, + net_params[param_id]->cpu_diff(), + net_params[param_id]->cpu_diff(), + val_t->mutable_cpu_data()); + caffe_cpu_axpby(N, Dtype(1)-beta2, + val_t->cpu_data(), beta2, + val_v->mutable_cpu_data()); + + // set update + caffe_powx(N, + val_v->cpu_data(), Dtype(0.5), + val_t->mutable_cpu_data()); + caffe_add_scalar(N, eps_hat, val_t->mutable_cpu_data()); + caffe_div(N, + val_m->cpu_data(), + val_t->cpu_data(), + val_t->mutable_cpu_data()); + + caffe_cpu_scale(N, local_rate*correction, + val_t->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t + caffe_gpu_axpby(N, Dtype(1)-beta1, + net_params[param_id]->gpu_diff(), beta1, + val_m->mutable_gpu_data()); + + // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 + caffe_gpu_mul(N, + net_params[param_id]->gpu_diff(), + net_params[param_id]->gpu_diff(), + val_t->mutable_gpu_data()); + caffe_gpu_axpby(N, Dtype(1)-beta2, + val_t->gpu_data(), beta2, + val_v->mutable_gpu_data()); + + // set update + caffe_gpu_powx(N, + val_v->gpu_data(), Dtype(0.5), + val_t->mutable_gpu_data()); + caffe_gpu_add_scalar(N, eps_hat, + val_t->mutable_gpu_data()); + caffe_gpu_div(N, + val_m->gpu_data(), + val_t->gpu_data(), + val_t->mutable_gpu_data()); + + caffe_gpu_scale(N, local_rate*correction, + val_t->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +INSTANTIATE_CLASS(Solver); +INSTANTIATE_CLASS(SGDSolver); +INSTANTIATE_CLASS(NesterovSolver); +INSTANTIATE_CLASS(AdaGradSolver); +INSTANTIATE_CLASS(RMSPropSolver); +INSTANTIATE_CLASS(AdaDeltaSolver); +INSTANTIATE_CLASS(AdamSolver); + +} // namespace caffe diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp new file mode 100755 index 0000000..a667a86 --- /dev/null +++ b/src/caffe/syncedmem.cpp @@ -0,0 +1,157 @@ +#include + +#include "caffe/common.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +SyncedMemory::~SyncedMemory() { + if (cpu_ptr_ && own_cpu_data_) { + CaffeFreeHost(cpu_ptr_); + } + +#ifndef CPU_ONLY + if (gpu_ptr_ && own_gpu_data_) { + int initial_device; + cudaGetDevice(&initial_device); + if (gpu_device_ != -1) { + CUDA_CHECK(cudaSetDevice(gpu_device_)); + } + CUDA_CHECK(cudaFree(gpu_ptr_)); + cudaSetDevice(initial_device); + } +#endif // CPU_ONLY +} + +inline void SyncedMemory::to_cpu() { + switch (head_) { + case UNINITIALIZED: + CaffeMallocHost(&cpu_ptr_, size_); + caffe_memset(size_, 0, cpu_ptr_); + head_ = HEAD_AT_CPU; + own_cpu_data_ = true; + break; + case HEAD_AT_GPU: +#ifndef CPU_ONLY + if (cpu_ptr_ == NULL) { + CaffeMallocHost(&cpu_ptr_, size_); + own_cpu_data_ = true; + } + caffe_gpu_memcpy(size_, gpu_ptr_, cpu_ptr_); + head_ = SYNCED; +#else + NO_GPU; +#endif + break; + case HEAD_AT_CPU: + case SYNCED: + break; + } +} + +inline void SyncedMemory::to_gpu() { +#ifndef CPU_ONLY + switch (head_) { + case UNINITIALIZED: + CUDA_CHECK(cudaGetDevice(&gpu_device_)); + CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); + caffe_gpu_memset(size_, 0, gpu_ptr_); + head_ = HEAD_AT_GPU; + own_gpu_data_ = true; + break; + case HEAD_AT_CPU: + if (gpu_ptr_ == NULL) { + CUDA_CHECK(cudaGetDevice(&gpu_device_)); + CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); + own_gpu_data_ = true; + } + caffe_gpu_memcpy(size_, cpu_ptr_, gpu_ptr_); + head_ = SYNCED; + break; + case HEAD_AT_GPU: + case SYNCED: + break; + } +#else + NO_GPU; +#endif +} + +const void* SyncedMemory::cpu_data() { + to_cpu(); + return (const void*)cpu_ptr_; +} + +void SyncedMemory::set_cpu_data(void* data) { + CHECK(data); + if (own_cpu_data_) { + CaffeFreeHost(cpu_ptr_); + } + cpu_ptr_ = data; + head_ = HEAD_AT_CPU; + own_cpu_data_ = false; +} + +const void* SyncedMemory::gpu_data() { +#ifndef CPU_ONLY + to_gpu(); + return (const void*)gpu_ptr_; +#else + NO_GPU; +#endif +} + +void SyncedMemory::set_gpu_data(void* data) { +#ifndef CPU_ONLY + CHECK(data); + if (own_gpu_data_) { + int initial_device; + cudaGetDevice(&initial_device); + if (gpu_device_ != -1) { + CUDA_CHECK(cudaSetDevice(gpu_device_)); + } + CUDA_CHECK(cudaFree(gpu_ptr_)); + cudaSetDevice(initial_device); + } + gpu_ptr_ = data; + head_ = HEAD_AT_GPU; + own_gpu_data_ = false; +#else + NO_GPU; +#endif +} + +void* SyncedMemory::mutable_cpu_data() { + to_cpu(); + head_ = HEAD_AT_CPU; + return cpu_ptr_; +} + +void* SyncedMemory::mutable_gpu_data() { +#ifndef CPU_ONLY + to_gpu(); + head_ = HEAD_AT_GPU; + return gpu_ptr_; +#else + NO_GPU; +#endif +} + +#ifndef CPU_ONLY +void SyncedMemory::async_gpu_push(const cudaStream_t& stream) { + CHECK(head_ == HEAD_AT_CPU); + if (gpu_ptr_ == NULL) { + CUDA_CHECK(cudaGetDevice(&gpu_device_)); + CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); + own_gpu_data_ = true; + } + const cudaMemcpyKind put = cudaMemcpyHostToDevice; + CUDA_CHECK(cudaMemcpyAsync(gpu_ptr_, cpu_ptr_, size_, put, stream)); + // Assume caller will synchronize on the stream before use + head_ = SYNCED; +} +#endif + +} // namespace caffe + diff --git a/src/caffe/test/CMakeLists.txt b/src/caffe/test/CMakeLists.txt new file mode 100755 index 0000000..35a803f --- /dev/null +++ b/src/caffe/test/CMakeLists.txt @@ -0,0 +1,36 @@ +# The option allows to include in build only selected test files and exclude all others +# Usage example: +# cmake -DBUILD_only_tests="common,net,blob,im2col_kernel" +set(BUILD_only_tests "" CACHE STRING "Blank or comma-separated list of test files to build without 'test_' prefix and extention") +caffe_leave_only_selected_tests(test_srcs ${BUILD_only_tests}) +caffe_leave_only_selected_tests(test_cuda ${BUILD_only_tests}) + +# For 'make runtest' target we don't need to embed test data paths to +# source files, because test target is executed in source directory +# That's why the lines below are commented. TODO: remove them + +# definition needed to include CMake generated files +#add_definitions(-DCMAKE_BUILD) + +# generates test_data/sample_data_list.txt.gen.cmake +#caffe_configure_testdatafile(test_data/sample_data_list.txt) + +set(the_target test.testbin) +set(test_args --gtest_shuffle) + +if(HAVE_CUDA) + caffe_cuda_compile(test_cuda_objs ${test_cuda}) + list(APPEND test_srcs ${test_cuda_objs} ${test_cuda}) +else() + list(APPEND test_args --gtest_filter="-*GPU*") +endif() + +# ---[ Adding test target +add_executable(${the_target} EXCLUDE_FROM_ALL ${test_srcs}) +target_link_libraries(${the_target} gtest ${Caffe_LINK}) +caffe_default_properties(${the_target}) +caffe_set_runtime_directory(${the_target} "${PROJECT_BINARY_DIR}/test") + +# ---[ Adding runtest +add_custom_target(runtest COMMAND ${the_target} ${test_args} + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp new file mode 100755 index 0000000..c14b67c --- /dev/null +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -0,0 +1,231 @@ +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/util/rng.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class AccuracyLayerTest : public CPUDeviceTest { + protected: + AccuracyLayerTest() + : blob_bottom_data_(new Blob()), + blob_bottom_label_(new Blob()), + blob_top_(new Blob()), + top_k_(3) { + vector shape(2); + shape[0] = 100; + shape[1] = 10; + blob_bottom_data_->Reshape(shape); + shape.resize(1); + blob_bottom_label_->Reshape(shape); + FillBottoms(); + + blob_bottom_vec_.push_back(blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_label_); + blob_top_vec_.push_back(blob_top_); + } + + virtual void FillBottoms() { + // fill the probability values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_); + + const unsigned int prefetch_rng_seed = caffe_rng_rand(); + shared_ptr rng(new Caffe::RNG(prefetch_rng_seed)); + caffe::rng_t* prefetch_rng = + static_cast(rng->generator()); + Dtype* label_data = blob_bottom_label_->mutable_cpu_data(); + for (int i = 0; i < blob_bottom_label_->count(); ++i) { + label_data[i] = (*prefetch_rng)() % 10; + } + } + + virtual ~AccuracyLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_label_; + delete blob_top_; + } + Blob* const blob_bottom_data_; + Blob* const blob_bottom_label_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + int top_k_; +}; + +TYPED_TEST_CASE(AccuracyLayerTest, TestDtypes); + +TYPED_TEST(AccuracyLayerTest, TestSetup) { + LayerParameter layer_param; + AccuracyLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(AccuracyLayerTest, TestSetupTopK) { + LayerParameter layer_param; + AccuracyParameter* accuracy_param = + layer_param.mutable_accuracy_param(); + accuracy_param->set_top_k(5); + AccuracyLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { + LayerParameter layer_param; + AccuracyLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + TypeParam max_value; + int max_id; + int num_correct_labels = 0; + for (int i = 0; i < 100; ++i) { + max_value = -FLT_MAX; + max_id = 0; + for (int j = 0; j < 10; ++j) { + if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { + max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); + max_id = j; + } + } + if (max_id == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { + ++num_correct_labels; + } + } + EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), + num_correct_labels / 100.0, 1e-4); +} + +TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { + this->blob_bottom_data_->Reshape(2, 10, 4, 5); + vector label_shape(3); + label_shape[0] = 2; label_shape[1] = 4; label_shape[2] = 5; + this->blob_bottom_label_->Reshape(label_shape); + this->FillBottoms(); + LayerParameter layer_param; + layer_param.mutable_accuracy_param()->set_axis(1); + AccuracyLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + TypeParam max_value; + const int num_labels = this->blob_bottom_label_->count(); + int max_id; + int num_correct_labels = 0; + vector label_offset(3); + for (int n = 0; n < this->blob_bottom_data_->num(); ++n) { + for (int h = 0; h < this->blob_bottom_data_->height(); ++h) { + for (int w = 0; w < this->blob_bottom_data_->width(); ++w) { + max_value = -FLT_MAX; + max_id = 0; + for (int c = 0; c < this->blob_bottom_data_->channels(); ++c) { + const TypeParam pred_value = + this->blob_bottom_data_->data_at(n, c, h, w); + if (pred_value > max_value) { + max_value = pred_value; + max_id = c; + } + } + label_offset[0] = n; label_offset[1] = h; label_offset[2] = w; + const int correct_label = + static_cast(this->blob_bottom_label_->data_at(label_offset)); + if (max_id == correct_label) { + ++num_correct_labels; + } + } + } + } + EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), + num_correct_labels / TypeParam(num_labels), 1e-4); +} + +TYPED_TEST(AccuracyLayerTest, TestForwardIgnoreLabel) { + LayerParameter layer_param; + const TypeParam kIgnoreLabelValue = -1; + layer_param.mutable_accuracy_param()->set_ignore_label(kIgnoreLabelValue); + AccuracyLayer layer(layer_param); + // Manually set some labels to the ignore label value (-1). + this->blob_bottom_label_->mutable_cpu_data()[2] = kIgnoreLabelValue; + this->blob_bottom_label_->mutable_cpu_data()[5] = kIgnoreLabelValue; + this->blob_bottom_label_->mutable_cpu_data()[32] = kIgnoreLabelValue; + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + TypeParam max_value; + int max_id; + int num_correct_labels = 0; + int count = 0; + for (int i = 0; i < 100; ++i) { + if (kIgnoreLabelValue == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { + continue; + } + ++count; + max_value = -FLT_MAX; + max_id = 0; + for (int j = 0; j < 10; ++j) { + if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { + max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); + max_id = j; + } + } + if (max_id == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { + ++num_correct_labels; + } + } + EXPECT_EQ(count, 97); // We set 3 out of 100 labels to kIgnoreLabelValue. + EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), + num_correct_labels / TypeParam(count), 1e-4); +} + +TYPED_TEST(AccuracyLayerTest, TestForwardCPUTopK) { + LayerParameter layer_param; + AccuracyParameter* accuracy_param = layer_param.mutable_accuracy_param(); + accuracy_param->set_top_k(this->top_k_); + AccuracyLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + TypeParam current_value; + int current_rank; + int num_correct_labels = 0; + for (int i = 0; i < 100; ++i) { + for (int j = 0; j < 10; ++j) { + current_value = this->blob_bottom_data_->data_at(i, j, 0, 0); + current_rank = 0; + for (int k = 0; k < 10; ++k) { + if (this->blob_bottom_data_->data_at(i, k, 0, 0) > current_value) { + ++current_rank; + } + } + if (current_rank < this->top_k_ && + j == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { + ++num_correct_labels; + } + } + } + + EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), + num_correct_labels / 100.0, 1e-4); +} + +} // namespace caffe diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp new file mode 100755 index 0000000..895c3d3 --- /dev/null +++ b/src/caffe/test/test_argmax_layer.cpp @@ -0,0 +1,168 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class ArgMaxLayerTest : public CPUDeviceTest { + protected: + ArgMaxLayerTest() + : blob_bottom_(new Blob(10, 20, 1, 1)), + blob_top_(new Blob()), + top_k_(5) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~ArgMaxLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + size_t top_k_; +}; + +TYPED_TEST_CASE(ArgMaxLayerTest, TestDtypes); + +TYPED_TEST(ArgMaxLayerTest, TestSetup) { + LayerParameter layer_param; + ArgMaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), 1); +} + +TYPED_TEST(ArgMaxLayerTest, TestSetupMaxVal) { + LayerParameter layer_param; + ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); + argmax_param->set_out_max_val(true); + ArgMaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), 2); +} + +TYPED_TEST(ArgMaxLayerTest, TestCPU) { + LayerParameter layer_param; + ArgMaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + int max_ind; + TypeParam max_val; + int num = this->blob_bottom_->num(); + int dim = this->blob_bottom_->count() / num; + for (int i = 0; i < num; ++i) { + EXPECT_GE(top_data[i], 0); + EXPECT_LE(top_data[i], dim); + max_ind = top_data[i]; + max_val = bottom_data[i * dim + max_ind]; + for (int j = 0; j < dim; ++j) { + EXPECT_LE(bottom_data[i * dim + j], max_val); + } + } +} + +TYPED_TEST(ArgMaxLayerTest, TestCPUMaxVal) { + LayerParameter layer_param; + ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); + argmax_param->set_out_max_val(true); + ArgMaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + int max_ind; + TypeParam max_val; + int num = this->blob_bottom_->num(); + int dim = this->blob_bottom_->count() / num; + for (int i = 0; i < num; ++i) { + EXPECT_GE(top_data[i], 0); + EXPECT_LE(top_data[i], dim); + max_ind = top_data[i * 2]; + max_val = top_data[i * 2 + 1]; + EXPECT_EQ(bottom_data[i * dim + max_ind], max_val); + for (int j = 0; j < dim; ++j) { + EXPECT_LE(bottom_data[i * dim + j], max_val); + } + } +} + +TYPED_TEST(ArgMaxLayerTest, TestCPUTopK) { + LayerParameter layer_param; + ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); + argmax_param->set_top_k(this->top_k_); + ArgMaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + int max_ind; + TypeParam max_val; + int num = this->blob_bottom_->num(); + int dim = this->blob_bottom_->count() / num; + for (int i = 0; i < num; ++i) { + EXPECT_GE(this->blob_top_->data_at(i, 0, 0, 0), 0); + EXPECT_LE(this->blob_top_->data_at(i, 0, 0, 0), dim); + for (int j = 0; j < this->top_k_; ++j) { + max_ind = this->blob_top_->data_at(i, 0, j, 0); + max_val = this->blob_bottom_->data_at(i, max_ind, 0, 0); + int count = 0; + for (int k = 0; k < dim; ++k) { + if (this->blob_bottom_->data_at(i, k, 0, 0) > max_val) { + ++count; + } + } + EXPECT_EQ(j, count); + } + } +} + +TYPED_TEST(ArgMaxLayerTest, TestCPUMaxValTopK) { + LayerParameter layer_param; + ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); + argmax_param->set_out_max_val(true); + argmax_param->set_top_k(this->top_k_); + ArgMaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + int max_ind; + TypeParam max_val; + int num = this->blob_bottom_->num(); + int dim = this->blob_bottom_->count() / num; + for (int i = 0; i < num; ++i) { + EXPECT_GE(this->blob_top_->data_at(i, 0, 0, 0), 0); + EXPECT_LE(this->blob_top_->data_at(i, 0, 0, 0), dim); + for (int j = 0; j < this->top_k_; ++j) { + max_ind = this->blob_top_->data_at(i, 0, j, 0); + max_val = this->blob_top_->data_at(i, 1, j, 0); + EXPECT_EQ(this->blob_bottom_->data_at(i, max_ind, 0, 0), max_val); + int count = 0; + for (int k = 0; k < dim; ++k) { + if (this->blob_bottom_->data_at(i, k, 0, 0) > max_val) { + ++count; + } + } + EXPECT_EQ(j, count); + } + } +} + + +} // namespace caffe diff --git a/src/caffe/test/test_benchmark.cpp b/src/caffe/test/test_benchmark.cpp new file mode 100755 index 0000000..43aaa63 --- /dev/null +++ b/src/caffe/test/test_benchmark.cpp @@ -0,0 +1,90 @@ +#include // for usleep + +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/util/benchmark.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +const float kMillisecondsThreshold = 30; + +template +class BenchmarkTest : public MultiDeviceTest {}; + +TYPED_TEST_CASE(BenchmarkTest, TestDtypesAndDevices); + +TYPED_TEST(BenchmarkTest, TestTimerConstructor) { + Timer timer; + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_FALSE(timer.has_run_at_least_once()); +} + +TYPED_TEST(BenchmarkTest, TestTimerStart) { + Timer timer; + timer.Start(); + EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); + timer.Start(); + EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); + timer.Stop(); + timer.Start(); + EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); +} + +TYPED_TEST(BenchmarkTest, TestTimerStop) { + Timer timer; + timer.Stop(); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_FALSE(timer.has_run_at_least_once()); + timer.Start(); + timer.Stop(); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); + timer.Stop(); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); +} + +TYPED_TEST(BenchmarkTest, TestTimerMilliSeconds) { + Timer timer; + EXPECT_EQ(timer.MilliSeconds(), 0); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_FALSE(timer.has_run_at_least_once()); + timer.Start(); + usleep(300 * 1000); + EXPECT_GE(timer.MilliSeconds(), 300 - kMillisecondsThreshold); + EXPECT_LE(timer.MilliSeconds(), 300 + kMillisecondsThreshold); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); +} + +TYPED_TEST(BenchmarkTest, TestTimerSeconds) { + Timer timer; + EXPECT_EQ(timer.Seconds(), 0); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_FALSE(timer.has_run_at_least_once()); + timer.Start(); + usleep(300 * 1000); + EXPECT_GE(timer.Seconds(), 0.3 - kMillisecondsThreshold / 1000.); + EXPECT_LE(timer.Seconds(), 0.3 + kMillisecondsThreshold / 1000.); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); +} + +} // namespace caffe diff --git a/src/caffe/test/test_blob.cpp b/src/caffe/test/test_blob.cpp new file mode 100755 index 0000000..7da6423 --- /dev/null +++ b/src/caffe/test/test_blob.cpp @@ -0,0 +1,294 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class BlobSimpleTest : public ::testing::Test { + protected: + BlobSimpleTest() + : blob_(new Blob()), + blob_preshaped_(new Blob(2, 3, 4, 5)) {} + virtual ~BlobSimpleTest() { delete blob_; delete blob_preshaped_; } + Blob* const blob_; + Blob* const blob_preshaped_; +}; + +TYPED_TEST_CASE(BlobSimpleTest, TestDtypes); + +TYPED_TEST(BlobSimpleTest, TestInitialization) { + EXPECT_TRUE(this->blob_); + EXPECT_TRUE(this->blob_preshaped_); + EXPECT_EQ(this->blob_preshaped_->num(), 2); + EXPECT_EQ(this->blob_preshaped_->channels(), 3); + EXPECT_EQ(this->blob_preshaped_->height(), 4); + EXPECT_EQ(this->blob_preshaped_->width(), 5); + EXPECT_EQ(this->blob_preshaped_->count(), 120); + EXPECT_EQ(this->blob_->num_axes(), 0); + EXPECT_EQ(this->blob_->count(), 0); +} + +TYPED_TEST(BlobSimpleTest, TestPointersCPUGPU) { + EXPECT_TRUE(this->blob_preshaped_->gpu_data()); + EXPECT_TRUE(this->blob_preshaped_->cpu_data()); + EXPECT_TRUE(this->blob_preshaped_->mutable_gpu_data()); + EXPECT_TRUE(this->blob_preshaped_->mutable_cpu_data()); +} + +TYPED_TEST(BlobSimpleTest, TestReshape) { + this->blob_->Reshape(2, 3, 4, 5); + EXPECT_EQ(this->blob_->num(), 2); + EXPECT_EQ(this->blob_->channels(), 3); + EXPECT_EQ(this->blob_->height(), 4); + EXPECT_EQ(this->blob_->width(), 5); + EXPECT_EQ(this->blob_->count(), 120); +} + +TYPED_TEST(BlobSimpleTest, TestLegacyBlobProtoShapeEquals) { + BlobProto blob_proto; + + // Reshape to (3 x 2). + vector shape(2); + shape[0] = 3; + shape[1] = 2; + this->blob_->Reshape(shape); + + // (3 x 2) blob == (1 x 1 x 3 x 2) legacy blob + blob_proto.set_num(1); + blob_proto.set_channels(1); + blob_proto.set_height(3); + blob_proto.set_width(2); + EXPECT_TRUE(this->blob_->ShapeEquals(blob_proto)); + + // (3 x 2) blob != (0 x 1 x 3 x 2) legacy blob + blob_proto.set_num(0); + blob_proto.set_channels(1); + blob_proto.set_height(3); + blob_proto.set_width(2); + EXPECT_FALSE(this->blob_->ShapeEquals(blob_proto)); + + // (3 x 2) blob != (3 x 1 x 3 x 2) legacy blob + blob_proto.set_num(3); + blob_proto.set_channels(1); + blob_proto.set_height(3); + blob_proto.set_width(2); + EXPECT_FALSE(this->blob_->ShapeEquals(blob_proto)); + + // Reshape to (1 x 3 x 2). + shape.insert(shape.begin(), 1); + this->blob_->Reshape(shape); + + // (1 x 3 x 2) blob == (1 x 1 x 3 x 2) legacy blob + blob_proto.set_num(1); + blob_proto.set_channels(1); + blob_proto.set_height(3); + blob_proto.set_width(2); + EXPECT_TRUE(this->blob_->ShapeEquals(blob_proto)); + + // Reshape to (2 x 3 x 2). + shape[0] = 2; + this->blob_->Reshape(shape); + + // (2 x 3 x 2) blob != (1 x 1 x 3 x 2) legacy blob + blob_proto.set_num(1); + blob_proto.set_channels(1); + blob_proto.set_height(3); + blob_proto.set_width(2); + EXPECT_FALSE(this->blob_->ShapeEquals(blob_proto)); +} + +template +class BlobMathTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: + BlobMathTest() + : blob_(new Blob(2, 3, 4, 5)), + epsilon_(1e-6) {} + + virtual ~BlobMathTest() { delete blob_; } + Blob* const blob_; + Dtype epsilon_; +}; + +TYPED_TEST_CASE(BlobMathTest, TestDtypesAndDevices); + +TYPED_TEST(BlobMathTest, TestSumOfSquares) { + typedef typename TypeParam::Dtype Dtype; + + // Uninitialized Blob should have sum of squares == 0. + EXPECT_EQ(0, this->blob_->sumsq_data()); + EXPECT_EQ(0, this->blob_->sumsq_diff()); + FillerParameter filler_param; + filler_param.set_min(-3); + filler_param.set_max(3); + UniformFiller filler(filler_param); + filler.Fill(this->blob_); + Dtype expected_sumsq = 0; + const Dtype* data = this->blob_->cpu_data(); + for (int i = 0; i < this->blob_->count(); ++i) { + expected_sumsq += data[i] * data[i]; + } + // Do a mutable access on the current device, + // so that the sumsq computation is done on that device. + // (Otherwise, this would only check the CPU sumsq implementation.) + switch (TypeParam::device) { + case Caffe::CPU: + this->blob_->mutable_cpu_data(); + break; + case Caffe::GPU: + this->blob_->mutable_gpu_data(); + break; + default: + LOG(FATAL) << "Unknown device: " << TypeParam::device; + } + EXPECT_NEAR(expected_sumsq, this->blob_->sumsq_data(), + this->epsilon_ * expected_sumsq); + EXPECT_EQ(0, this->blob_->sumsq_diff()); + + // Check sumsq_diff too. + const Dtype kDiffScaleFactor = 7; + caffe_cpu_scale(this->blob_->count(), kDiffScaleFactor, data, + this->blob_->mutable_cpu_diff()); + switch (TypeParam::device) { + case Caffe::CPU: + this->blob_->mutable_cpu_diff(); + break; + case Caffe::GPU: + this->blob_->mutable_gpu_diff(); + break; + default: + LOG(FATAL) << "Unknown device: " << TypeParam::device; + } + EXPECT_NEAR(expected_sumsq, this->blob_->sumsq_data(), + this->epsilon_ * expected_sumsq); + const Dtype expected_sumsq_diff = + expected_sumsq * kDiffScaleFactor * kDiffScaleFactor; + EXPECT_NEAR(expected_sumsq_diff, this->blob_->sumsq_diff(), + this->epsilon_ * expected_sumsq_diff); +} + +TYPED_TEST(BlobMathTest, TestAsum) { + typedef typename TypeParam::Dtype Dtype; + + // Uninitialized Blob should have asum == 0. + EXPECT_EQ(0, this->blob_->asum_data()); + EXPECT_EQ(0, this->blob_->asum_diff()); + FillerParameter filler_param; + filler_param.set_min(-3); + filler_param.set_max(3); + UniformFiller filler(filler_param); + filler.Fill(this->blob_); + Dtype expected_asum = 0; + const Dtype* data = this->blob_->cpu_data(); + for (int i = 0; i < this->blob_->count(); ++i) { + expected_asum += std::fabs(data[i]); + } + // Do a mutable access on the current device, + // so that the asum computation is done on that device. + // (Otherwise, this would only check the CPU asum implementation.) + switch (TypeParam::device) { + case Caffe::CPU: + this->blob_->mutable_cpu_data(); + break; + case Caffe::GPU: + this->blob_->mutable_gpu_data(); + break; + default: + LOG(FATAL) << "Unknown device: " << TypeParam::device; + } + EXPECT_NEAR(expected_asum, this->blob_->asum_data(), + this->epsilon_ * expected_asum); + EXPECT_EQ(0, this->blob_->asum_diff()); + + // Check asum_diff too. + const Dtype kDiffScaleFactor = 7; + caffe_cpu_scale(this->blob_->count(), kDiffScaleFactor, data, + this->blob_->mutable_cpu_diff()); + switch (TypeParam::device) { + case Caffe::CPU: + this->blob_->mutable_cpu_diff(); + break; + case Caffe::GPU: + this->blob_->mutable_gpu_diff(); + break; + default: + LOG(FATAL) << "Unknown device: " << TypeParam::device; + } + EXPECT_NEAR(expected_asum, this->blob_->asum_data(), + this->epsilon_ * expected_asum); + const Dtype expected_diff_asum = expected_asum * kDiffScaleFactor; + EXPECT_NEAR(expected_diff_asum, this->blob_->asum_diff(), + this->epsilon_ * expected_diff_asum); +} + +TYPED_TEST(BlobMathTest, TestScaleData) { + typedef typename TypeParam::Dtype Dtype; + + EXPECT_EQ(0, this->blob_->asum_data()); + EXPECT_EQ(0, this->blob_->asum_diff()); + FillerParameter filler_param; + filler_param.set_min(-3); + filler_param.set_max(3); + UniformFiller filler(filler_param); + filler.Fill(this->blob_); + const Dtype asum_before_scale = this->blob_->asum_data(); + // Do a mutable access on the current device, + // so that the asum computation is done on that device. + // (Otherwise, this would only check the CPU asum implementation.) + switch (TypeParam::device) { + case Caffe::CPU: + this->blob_->mutable_cpu_data(); + break; + case Caffe::GPU: + this->blob_->mutable_gpu_data(); + break; + default: + LOG(FATAL) << "Unknown device: " << TypeParam::device; + } + const Dtype kDataScaleFactor = 3; + this->blob_->scale_data(kDataScaleFactor); + EXPECT_NEAR(asum_before_scale * kDataScaleFactor, this->blob_->asum_data(), + this->epsilon_ * asum_before_scale * kDataScaleFactor); + EXPECT_EQ(0, this->blob_->asum_diff()); + + // Check scale_diff too. + const Dtype kDataToDiffScaleFactor = 7; + const Dtype* data = this->blob_->cpu_data(); + caffe_cpu_scale(this->blob_->count(), kDataToDiffScaleFactor, data, + this->blob_->mutable_cpu_diff()); + const Dtype expected_asum_before_scale = asum_before_scale * kDataScaleFactor; + EXPECT_NEAR(expected_asum_before_scale, this->blob_->asum_data(), + this->epsilon_ * expected_asum_before_scale); + const Dtype expected_diff_asum_before_scale = + asum_before_scale * kDataScaleFactor * kDataToDiffScaleFactor; + EXPECT_NEAR(expected_diff_asum_before_scale, this->blob_->asum_diff(), + this->epsilon_ * expected_diff_asum_before_scale); + switch (TypeParam::device) { + case Caffe::CPU: + this->blob_->mutable_cpu_diff(); + break; + case Caffe::GPU: + this->blob_->mutable_gpu_diff(); + break; + default: + LOG(FATAL) << "Unknown device: " << TypeParam::device; + } + const Dtype kDiffScaleFactor = 3; + this->blob_->scale_diff(kDiffScaleFactor); + EXPECT_NEAR(asum_before_scale * kDataScaleFactor, this->blob_->asum_data(), + this->epsilon_ * asum_before_scale * kDataScaleFactor); + const Dtype expected_diff_asum = + expected_diff_asum_before_scale * kDiffScaleFactor; + EXPECT_NEAR(expected_diff_asum, this->blob_->asum_diff(), + this->epsilon_ * expected_diff_asum); +} + +} // namespace caffe diff --git a/src/caffe/test/test_caffe_main.cpp b/src/caffe/test/test_caffe_main.cpp new file mode 100755 index 0000000..c8caf5a --- /dev/null +++ b/src/caffe/test/test_caffe_main.cpp @@ -0,0 +1,40 @@ +// The main caffe test code. Your test cpp code should include this hpp +// to allow a main function to be compiled into the binary. + +#include "caffe/caffe.hpp" +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { +#ifndef CPU_ONLY + cudaDeviceProp CAFFE_TEST_CUDA_PROP; +#endif +} + +#ifndef CPU_ONLY +using caffe::CAFFE_TEST_CUDA_PROP; +#endif + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + caffe::GlobalInit(&argc, &argv); +#ifndef CPU_ONLY + // Before starting testing, let's first print out a few cuda defice info. + int device; + cudaGetDeviceCount(&device); + cout << "Cuda number of devices: " << device << endl; + if (argc > 1) { + // Use the given device + device = atoi(argv[1]); + cudaSetDevice(device); + cout << "Setting to use device " << device << endl; + } else if (CUDA_TEST_DEVICE >= 0) { + // Use the device assigned in build configuration; but with a lower priority + device = CUDA_TEST_DEVICE; + } + cudaGetDevice(&device); + cout << "Current device id: " << device << endl; + cudaGetDeviceProperties(&CAFFE_TEST_CUDA_PROP, device); +#endif + // invoke the test. + return RUN_ALL_TESTS(); +} diff --git a/src/caffe/test/test_common.cpp b/src/caffe/test/test_common.cpp new file mode 100755 index 0000000..b3a61b0 --- /dev/null +++ b/src/caffe/test/test_common.cpp @@ -0,0 +1,66 @@ +#include + +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +class CommonTest : public ::testing::Test {}; + +#ifndef CPU_ONLY // GPU Caffe singleton test. + +TEST_F(CommonTest, TestCublasHandlerGPU) { + int cuda_device_id; + CUDA_CHECK(cudaGetDevice(&cuda_device_id)); + EXPECT_TRUE(Caffe::cublas_handle()); +} + +#endif + +TEST_F(CommonTest, TestBrewMode) { + Caffe::set_mode(Caffe::CPU); + EXPECT_EQ(Caffe::mode(), Caffe::CPU); + Caffe::set_mode(Caffe::GPU); + EXPECT_EQ(Caffe::mode(), Caffe::GPU); +} + +TEST_F(CommonTest, TestRandSeedCPU) { + SyncedMemory data_a(10 * sizeof(int)); + SyncedMemory data_b(10 * sizeof(int)); + Caffe::set_random_seed(1701); + caffe_rng_bernoulli(10, 0.5, static_cast(data_a.mutable_cpu_data())); + + Caffe::set_random_seed(1701); + caffe_rng_bernoulli(10, 0.5, static_cast(data_b.mutable_cpu_data())); + + for (int i = 0; i < 10; ++i) { + EXPECT_EQ(static_cast(data_a.cpu_data())[i], + static_cast(data_b.cpu_data())[i]); + } +} + +#ifndef CPU_ONLY // GPU Caffe singleton test. + +TEST_F(CommonTest, TestRandSeedGPU) { + SyncedMemory data_a(10 * sizeof(unsigned int)); + SyncedMemory data_b(10 * sizeof(unsigned int)); + Caffe::set_random_seed(1701); + CURAND_CHECK(curandGenerate(Caffe::curand_generator(), + static_cast(data_a.mutable_gpu_data()), 10)); + Caffe::set_random_seed(1701); + CURAND_CHECK(curandGenerate(Caffe::curand_generator(), + static_cast(data_b.mutable_gpu_data()), 10)); + for (int i = 0; i < 10; ++i) { + EXPECT_EQ(((const unsigned int*)(data_a.cpu_data()))[i], + ((const unsigned int*)(data_b.cpu_data()))[i]); + } +} + +#endif + +} // namespace caffe diff --git a/src/caffe/test/test_concat_layer.cpp b/src/caffe/test/test_concat_layer.cpp new file mode 100755 index 0000000..662a50f --- /dev/null +++ b/src/caffe/test/test_concat_layer.cpp @@ -0,0 +1,176 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class ConcatLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + ConcatLayerTest() + : blob_bottom_0_(new Blob(2, 3, 6, 5)), + blob_bottom_1_(new Blob(2, 5, 6, 5)), + blob_bottom_2_(new Blob(5, 3, 6, 5)), + blob_top_(new Blob()) {} + virtual void SetUp() { + // fill the values + shared_ptr > filler; + FillerParameter filler_param; + filler_param.set_value(1.); + filler.reset(new ConstantFiller(filler_param)); + filler->Fill(this->blob_bottom_0_); + filler_param.set_value(2.); + filler.reset(new ConstantFiller(filler_param)); + filler->Fill(this->blob_bottom_1_); + filler_param.set_value(3.); + filler.reset(new ConstantFiller(filler_param)); + filler->Fill(this->blob_bottom_2_); + blob_bottom_vec_0_.push_back(blob_bottom_0_); + blob_bottom_vec_0_.push_back(blob_bottom_1_); + blob_bottom_vec_1_.push_back(blob_bottom_0_); + blob_bottom_vec_1_.push_back(blob_bottom_2_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~ConcatLayerTest() { + delete blob_bottom_0_; delete blob_bottom_1_; + delete blob_bottom_2_; delete blob_top_; + } + + Blob* const blob_bottom_0_; + Blob* const blob_bottom_1_; + Blob* const blob_bottom_2_; + Blob* const blob_top_; + vector*> blob_bottom_vec_0_, blob_bottom_vec_1_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(ConcatLayerTest, TestDtypesAndDevices); + +TYPED_TEST(ConcatLayerTest, TestSetupNum) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_concat_param()->set_axis(0); + ConcatLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_1_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), + this->blob_bottom_0_->num() + this->blob_bottom_2_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_0_->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0_->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); +} + +TYPED_TEST(ConcatLayerTest, TestSetupChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConcatLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0_->num()); + EXPECT_EQ(this->blob_top_->channels(), + this->blob_bottom_0_->channels() + this->blob_bottom_1_->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0_->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); +} + +TYPED_TEST(ConcatLayerTest, TestSetupChannelsNegativeIndexing) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConcatLayer layer(layer_param); + // "channels" index is the third one from the end -- test negative indexing + // by setting axis to -3 and checking that we get the same results as above in + // TestSetupChannels. + layer_param.mutable_concat_param()->set_axis(-3); + layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0_->num()); + EXPECT_EQ(this->blob_top_->channels(), + this->blob_bottom_0_->channels() + this->blob_bottom_1_->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0_->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); +} + +TYPED_TEST(ConcatLayerTest, TestForwardNum) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_concat_param()->set_axis(0); + ConcatLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_1_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_1_, this->blob_top_vec_); + for (int n = 0; n < this->blob_bottom_vec_1_[0]->num(); ++n) { + for (int c = 0; c < this->blob_top_->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + EXPECT_EQ(this->blob_top_->data_at(n, c, h, w), + this->blob_bottom_vec_1_[0]->data_at(n, c, h, w)); + } + } + } + } + for (int n = 0; n < this->blob_bottom_vec_1_[1]->num(); ++n) { + for (int c = 0; c < this->blob_top_->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + EXPECT_EQ(this->blob_top_->data_at(n + 2, c, h, w), + this->blob_bottom_vec_1_[1]->data_at(n, c, h, w)); + } + } + } + } +} + +TYPED_TEST(ConcatLayerTest, TestForwardChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConcatLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_0_, this->blob_top_vec_); + for (int n = 0; n < this->blob_top_->num(); ++n) { + for (int c = 0; c < this->blob_bottom_0_->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + EXPECT_EQ(this->blob_top_->data_at(n, c, h, w), + this->blob_bottom_vec_0_[0]->data_at(n, c, h, w)); + } + } + } + for (int c = 0; c < this->blob_bottom_1_->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + EXPECT_EQ(this->blob_top_->data_at(n, c + 3, h, w), + this->blob_bottom_vec_0_[1]->data_at(n, c, h, w)); + } + } + } + } +} + +TYPED_TEST(ConcatLayerTest, TestGradientNum) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_concat_param()->set_axis(0); + ConcatLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradient(&layer, this->blob_bottom_vec_1_, + this->blob_top_vec_); +} + +TYPED_TEST(ConcatLayerTest, TestGradientChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConcatLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradient(&layer, this->blob_bottom_vec_0_, + this->blob_top_vec_); +} + +} // namespace caffe diff --git a/src/caffe/test/test_contrastive_loss_layer.cpp b/src/caffe/test/test_contrastive_loss_layer.cpp new file mode 100755 index 0000000..1e9447c --- /dev/null +++ b/src/caffe/test/test_contrastive_loss_layer.cpp @@ -0,0 +1,146 @@ +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class ContrastiveLossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + ContrastiveLossLayerTest() + : blob_bottom_data_i_(new Blob(512, 2, 1, 1)), + blob_bottom_data_j_(new Blob(512, 2, 1, 1)), + blob_bottom_y_(new Blob(512, 1, 1, 1)), + blob_top_loss_(new Blob()) { + // fill the values + FillerParameter filler_param; + filler_param.set_min(-1.0); + filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin + UniformFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_i_); + blob_bottom_vec_.push_back(blob_bottom_data_i_); + filler.Fill(this->blob_bottom_data_j_); + blob_bottom_vec_.push_back(blob_bottom_data_j_); + for (int i = 0; i < blob_bottom_y_->count(); ++i) { + blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 + } + blob_bottom_vec_.push_back(blob_bottom_y_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~ContrastiveLossLayerTest() { + delete blob_bottom_data_i_; + delete blob_bottom_data_j_; + delete blob_bottom_y_; + delete blob_top_loss_; + } + + Blob* const blob_bottom_data_i_; + Blob* const blob_bottom_data_j_; + Blob* const blob_bottom_y_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(ContrastiveLossLayerTest, TestDtypesAndDevices); + +TYPED_TEST(ContrastiveLossLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ContrastiveLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.contrastive_loss_param().margin(); + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff*diff; + } + if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + Dtype dist = std::max(margin - sqrt(dist_sq), 0.0); + loss += dist*dist; + } + } + loss /= static_cast(num) * Dtype(2); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} + +TYPED_TEST(ContrastiveLossLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ContrastiveLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + // check the gradient for the first two bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); +} + +TYPED_TEST(ContrastiveLossLayerTest, TestForwardLegacy) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_contrastive_loss_param()->set_legacy_version(true); + ContrastiveLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.contrastive_loss_param().margin(); + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff*diff; + } + if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + loss += std::max(margin - dist_sq, Dtype(0.0)); + } + } + loss /= static_cast(num) * Dtype(2); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} + +TYPED_TEST(ContrastiveLossLayerTest, TestGradientLegacy) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_contrastive_loss_param()->set_legacy_version(true); + ContrastiveLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + // check the gradient for the first two bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); +} + +} // namespace caffe diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp new file mode 100755 index 0000000..67d41ff --- /dev/null +++ b/src/caffe/test/test_convolution_layer.cpp @@ -0,0 +1,699 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +// Reference convolution for checking results: +// accumulate through explicit loops over input, output, and filters. +template +void caffe_conv(const Blob* in, ConvolutionParameter* conv_param, + const vector > >& weights, + Blob* out) { + // Kernel size, stride, and pad + int kernel_h, kernel_w; + if (conv_param->has_kernel_size()) { + kernel_h = kernel_w = conv_param->kernel_size(); + } else { + kernel_h = conv_param->kernel_h(); + kernel_w = conv_param->kernel_w(); + } + int pad_h, pad_w; + if (!conv_param->has_pad_h()) { + pad_h = pad_w = conv_param->pad(); + } else { + pad_h = conv_param->pad_h(); + pad_w = conv_param->pad_w(); + } + int stride_h, stride_w; + if (!conv_param->has_stride_h()) { + stride_h = stride_w = conv_param->stride(); + } else { + stride_h = conv_param->stride_h(); + stride_w = conv_param->stride_w(); + } + // Groups + int groups = conv_param->group(); + int o_g = out->channels() / groups; + int k_g = in->channels() / groups; + int o_head, k_head; + // Convolution + const Dtype* in_data = in->cpu_data(); + const Dtype* weight_data = weights[0]->cpu_data(); + Dtype* out_data = out->mutable_cpu_data(); + for (int n = 0; n < out->num(); n++) { + for (int g = 0; g < groups; g++) { + o_head = o_g * g; + k_head = k_g * g; + for (int o = 0; o < o_g; o++) { + for (int k = 0; k < k_g; k++) { + for (int y = 0; y < out->height(); y++) { + for (int x = 0; x < out->width(); x++) { + for (int p = 0; p < kernel_h; p++) { + for (int q = 0; q < kernel_w; q++) { + int in_y = y * stride_h - pad_h + p; + int in_x = x * stride_w - pad_w + q; + if (in_y >= 0 && in_y < in->height() + && in_x >= 0 && in_x < in->width()) { + out_data[out->offset(n, o + o_head, y, x)] += + in_data[in->offset(n, k + k_head, in_y, in_x)] + * weight_data[weights[0]->offset(o + o_head, k, p, q)]; + } + } + } + } + } + } + } + } + } + // Bias + if (conv_param->bias_term()) { + const Dtype* bias_data = weights[1]->cpu_data(); + for (int n = 0; n < out->num(); n++) { + for (int o = 0; o < out->channels(); o++) { + for (int y = 0; y < out->height(); y++) { + for (int x = 0; x < out->width(); x++) { + out_data[out->offset(n, o, y, x)] += bias_data[o]; + } + } + } + } + } +} + +template void caffe_conv(const Blob* in, + ConvolutionParameter* conv_param, + const vector > >& weights, + Blob* out); +template void caffe_conv(const Blob* in, + ConvolutionParameter* conv_param, + const vector > >& weights, + Blob* out); + +template +class ConvolutionLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + ConvolutionLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 4)), + blob_bottom_2_(new Blob(2, 3, 6, 4)), + blob_top_(new Blob()), + blob_top_2_(new Blob()) {} + virtual void SetUp() { + // fill the values + FillerParameter filler_param; + filler_param.set_value(1.); + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + filler.Fill(this->blob_bottom_2_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~ConvolutionLayerTest() { + delete blob_bottom_; + delete blob_bottom_2_; + delete blob_top_; + delete blob_top_2_; + } + + virtual Blob* MakeReferenceTop(Blob* top) { + this->ref_blob_top_.reset(new Blob()); + this->ref_blob_top_->ReshapeLike(*top); + return this->ref_blob_top_.get(); + } + + Blob* const blob_bottom_; + Blob* const blob_bottom_2_; + Blob* const blob_top_; + Blob* const blob_top_2_; + shared_ptr > ref_blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(ConvolutionLayerTest, TestDtypesAndDevices); + +TYPED_TEST(ConvolutionLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(4); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + shared_ptr > layer( + new ConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 4); + EXPECT_EQ(this->blob_top_->height(), 2); + EXPECT_EQ(this->blob_top_->width(), 1); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 4); + EXPECT_EQ(this->blob_top_2_->height(), 2); + EXPECT_EQ(this->blob_top_2_->width(), 1); + // setting group should not change the shape + convolution_param->set_num_output(3); + convolution_param->set_group(3); + layer.reset(new ConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 2); + EXPECT_EQ(this->blob_top_->width(), 1); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 3); + EXPECT_EQ(this->blob_top_2_->height(), 2); + EXPECT_EQ(this->blob_top_2_->width(), 1); +} + +TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolution) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new ConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } + caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_2_)); + top_data = this->blob_top_2_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +} + +TYPED_TEST(ConvolutionLayerTest, Test1x1Convolution) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(1); + convolution_param->set_stride(1); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new ConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +} + +TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolutionGroup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new ConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +} + +TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { + // Test separable convolution by computing the Sobel operator + // as a single filter then comparing the result + // as the convolution of two rectangular filters. + typedef typename TypeParam::Dtype Dtype; + // Fill bottoms with identical Gaussian noise. + shared_ptr > filler; + FillerParameter filler_param; + filler_param.set_value(1.); + filler.reset(new GaussianFiller(filler_param)); + filler->Fill(this->blob_bottom_); + this->blob_bottom_2_->CopyFrom(*this->blob_bottom_); + // Compute Sobel G_x operator as 3 x 3 convolution. + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + shared_ptr > layer( + new ConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 3, 3, 3)); + Dtype* weights = layer->blobs()[0]->mutable_cpu_data(); + for (int c = 0; c < 3; ++c) { + int i = c * 9; // 3 x 3 filter + weights[i + 0] = -1; + weights[i + 1] = 0; + weights[i + 2] = 1; + weights[i + 3] = -2; + weights[i + 4] = 0; + weights[i + 5] = 2; + weights[i + 6] = -1; + weights[i + 7] = 0; + weights[i + 8] = 1; + } + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Compute Sobel G_x operator as separable 3 x 1 and 1 x 3 convolutions. + // (1) the [1 2 1] column filter + vector*> sep_blob_bottom_vec; + vector*> sep_blob_top_vec; + shared_ptr > blob_sep(new Blob()); + sep_blob_bottom_vec.push_back(this->blob_bottom_2_); + sep_blob_top_vec.push_back(this->blob_top_2_); + convolution_param->clear_kernel_size(); + convolution_param->clear_stride(); + convolution_param->set_kernel_h(3); + convolution_param->set_kernel_w(1); + convolution_param->set_stride_h(2); + convolution_param->set_stride_w(1); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + layer.reset(new ConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 3, 3, 1)); + Dtype* weights_1 = layer->blobs()[0]->mutable_cpu_data(); + for (int c = 0; c < 3; ++c) { + int i = c * 3; // 3 x 1 filter + weights_1[i + 0] = 1; + weights_1[i + 1] = 2; + weights_1[i + 2] = 1; + } + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); + // (2) the [-1 0 1] row filter + blob_sep->CopyFrom(*this->blob_top_2_, false, true); + sep_blob_bottom_vec.clear(); + sep_blob_bottom_vec.push_back(blob_sep.get()); + convolution_param->set_kernel_h(1); + convolution_param->set_kernel_w(3); + convolution_param->set_stride_h(1); + convolution_param->set_stride_w(2); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + layer.reset(new ConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 3, 1, 3)); + Dtype* weights_2 = layer->blobs()[0]->mutable_cpu_data(); + for (int c = 0; c < 3; ++c) { + int i = c * 3; // 1 x 3 filter + weights_2[i + 0] = -1; + weights_2[i + 1] = 0; + weights_2[i + 2] = 1; + } + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); + // Test equivalence of full and separable filters. + const Dtype* top_data = this->blob_top_->cpu_data(); + const Dtype* sep_top_data = this->blob_top_2_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], sep_top_data[i], 1e-4); + } +} + +TYPED_TEST(ConvolutionLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + ConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(ConvolutionLayerTest, Test1x1Gradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + convolution_param->set_kernel_size(1); + convolution_param->set_stride(1); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + ConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + ConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +#ifdef USE_CUDNN + +template +class CuDNNConvolutionLayerTest : public GPUDeviceTest { + protected: + CuDNNConvolutionLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 4)), + blob_bottom_2_(new Blob(2, 3, 6, 4)), + blob_top_(new Blob()), + blob_top_2_(new Blob()) {} + virtual void SetUp() { + // fill the values + FillerParameter filler_param; + filler_param.set_value(1.); + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + filler.Fill(this->blob_bottom_2_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~CuDNNConvolutionLayerTest() { + delete blob_bottom_; + delete blob_bottom_2_; + delete blob_top_; + delete blob_top_2_; + } + + virtual Blob* MakeReferenceTop(Blob* top) { + this->ref_blob_top_.reset(new Blob()); + this->ref_blob_top_->ReshapeLike(*top); + return this->ref_blob_top_.get(); + } + + Blob* const blob_bottom_; + Blob* const blob_bottom_2_; + Blob* const blob_top_; + Blob* const blob_top_2_; + shared_ptr > ref_blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(CuDNNConvolutionLayerTest, TestDtypes); + +TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(4); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + shared_ptr > layer( + new CuDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 4); + EXPECT_EQ(this->blob_top_->height(), 2); + EXPECT_EQ(this->blob_top_->width(), 1); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 4); + EXPECT_EQ(this->blob_top_2_->height(), 2); + EXPECT_EQ(this->blob_top_2_->width(), 1); + // setting group should not change the shape + convolution_param->set_num_output(3); + convolution_param->set_group(3); + layer.reset(new CuDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 2); + EXPECT_EQ(this->blob_top_->width(), 1); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 3); + EXPECT_EQ(this->blob_top_2_->height(), 2); + EXPECT_EQ(this->blob_top_2_->width(), 1); +} + +TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new CuDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const TypeParam* top_data; + const TypeParam* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } + caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_2_)); + top_data = this->blob_top_2_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +} + +TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new CuDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const TypeParam* top_data; + const TypeParam* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +} + +TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { + // Test separable convolution by computing the Sobel operator + // as a single filter then comparing the result + // as the convolution of two rectangular filters. + + // Fill bottoms with identical Gaussian noise. + shared_ptr > filler; + FillerParameter filler_param; + filler_param.set_value(1.); + filler.reset(new GaussianFiller(filler_param)); + filler->Fill(this->blob_bottom_); + this->blob_bottom_2_->CopyFrom(*this->blob_bottom_); + // Compute Sobel G_x operator as 3 x 3 convolution. + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + shared_ptr > layer( + new CuDNNConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 3, 3, 3)); + TypeParam* weights = layer->blobs()[0]->mutable_cpu_data(); + for (int c = 0; c < 3; ++c) { + int i = c * 9; // 3 x 3 filter + weights[i + 0] = -1; + weights[i + 1] = 0; + weights[i + 2] = 1; + weights[i + 3] = -2; + weights[i + 4] = 0; + weights[i + 5] = 2; + weights[i + 6] = -1; + weights[i + 7] = 0; + weights[i + 8] = 1; + } + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Compute Sobel G_x operator as separable 3 x 1 and 1 x 3 convolutions. + // (1) the [1 2 1] column filter + vector*> sep_blob_bottom_vec; + vector*> sep_blob_top_vec; + shared_ptr > blob_sep(new Blob()); + sep_blob_bottom_vec.push_back(this->blob_bottom_2_); + sep_blob_top_vec.push_back(this->blob_top_2_); + convolution_param->clear_kernel_size(); + convolution_param->clear_stride(); + convolution_param->set_kernel_h(3); + convolution_param->set_kernel_w(1); + convolution_param->set_stride_h(2); + convolution_param->set_stride_w(1); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + layer.reset(new CuDNNConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 3, 3, 1)); + TypeParam* weights_1 = layer->blobs()[0]->mutable_cpu_data(); + for (int c = 0; c < 3; ++c) { + int i = c * 3; // 3 x 1 filter + weights_1[i + 0] = 1; + weights_1[i + 1] = 2; + weights_1[i + 2] = 1; + } + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); + // (2) the [-1 0 1] row filter + blob_sep->CopyFrom(*this->blob_top_2_, false, true); + sep_blob_bottom_vec.clear(); + sep_blob_bottom_vec.push_back(blob_sep.get()); + convolution_param->set_kernel_h(1); + convolution_param->set_kernel_w(3); + convolution_param->set_stride_h(1); + convolution_param->set_stride_w(2); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + layer.reset(new CuDNNConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 3, 1, 3)); + TypeParam* weights_2 = layer->blobs()[0]->mutable_cpu_data(); + for (int c = 0; c < 3; ++c) { + int i = c * 3; // 1 x 3 filter + weights_2[i + 0] = -1; + weights_2[i + 1] = 0; + weights_2[i + 2] = 1; + } + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); + // Test equivalence of full and separable filters. + const TypeParam* top_data = this->blob_top_->cpu_data(); + const TypeParam* sep_top_data = this->blob_top_2_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], sep_top_data[i], 1e-4); + } +} + +TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + CuDNNConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + CuDNNConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +#endif + +} // namespace caffe diff --git a/src/caffe/test/test_data/generate_sample_data.py b/src/caffe/test/test_data/generate_sample_data.py new file mode 100755 index 0000000..3703b41 --- /dev/null +++ b/src/caffe/test/test_data/generate_sample_data.py @@ -0,0 +1,79 @@ +""" +Generate data used in the HDF5DataLayer and GradientBasedSolver tests. +""" +import os +import numpy as np +import h5py + +script_dir = os.path.dirname(os.path.abspath(__file__)) + +# Generate HDF5DataLayer sample_data.h5 + +num_cols = 8 +num_rows = 10 +height = 6 +width = 5 +total_size = num_cols * num_rows * height * width + +data = np.arange(total_size) +data = data.reshape(num_rows, num_cols, height, width) +data = data.astype('float32') + +# We had a bug where data was copied into label, but the tests weren't +# catching it, so let's make label 1-indexed. +label = 1 + np.arange(num_rows)[:, np.newaxis] +label = label.astype('float32') + +# We add an extra label2 dataset to test HDF5 layer's ability +# to handle arbitrary number of output ("top") Blobs. +label2 = label + 1 + +print data +print label + +with h5py.File(script_dir + '/sample_data.h5', 'w') as f: + f['data'] = data + f['label'] = label + f['label2'] = label2 + +with h5py.File(script_dir + '/sample_data_2_gzip.h5', 'w') as f: + f.create_dataset( + 'data', data=data + total_size, + compression='gzip', compression_opts=1 + ) + f.create_dataset( + 'label', data=label, + compression='gzip', compression_opts=1 + ) + f.create_dataset( + 'label2', data=label2, + compression='gzip', compression_opts=1 + ) + +with open(script_dir + '/sample_data_list.txt', 'w') as f: + f.write(script_dir + '/sample_data.h5\n') + f.write(script_dir + '/sample_data_2_gzip.h5\n') + +# Generate GradientBasedSolver solver_data.h5 + +num_cols = 3 +num_rows = 8 +height = 10 +width = 10 + +data = np.random.randn(num_rows, num_cols, height, width) +data = data.reshape(num_rows, num_cols, height, width) +data = data.astype('float32') + +targets = np.random.randn(num_rows, 1) +targets = targets.astype('float32') + +print data +print targets + +with h5py.File(script_dir + '/solver_data.h5', 'w') as f: + f['data'] = data + f['targets'] = targets + +with open(script_dir + '/solver_data_list.txt', 'w') as f: + f.write(script_dir + '/solver_data.h5\n') diff --git a/src/caffe/test/test_data/sample_data.h5 b/src/caffe/test/test_data/sample_data.h5 new file mode 100755 index 0000000..236e66b Binary files /dev/null and b/src/caffe/test/test_data/sample_data.h5 differ diff --git a/src/caffe/test/test_data/sample_data_2_gzip.h5 b/src/caffe/test/test_data/sample_data_2_gzip.h5 new file mode 100755 index 0000000..a138e03 Binary files /dev/null and b/src/caffe/test/test_data/sample_data_2_gzip.h5 differ diff --git a/src/caffe/test/test_data/sample_data_list.txt b/src/caffe/test/test_data/sample_data_list.txt new file mode 100755 index 0000000..cdf343f --- /dev/null +++ b/src/caffe/test/test_data/sample_data_list.txt @@ -0,0 +1,2 @@ +src/caffe/test/test_data/sample_data.h5 +src/caffe/test/test_data/sample_data_2_gzip.h5 diff --git a/src/caffe/test/test_data/solver_data.h5 b/src/caffe/test/test_data/solver_data.h5 new file mode 100755 index 0000000..7ee05ea Binary files /dev/null and b/src/caffe/test/test_data/solver_data.h5 differ diff --git a/src/caffe/test/test_data/solver_data_list.txt b/src/caffe/test/test_data/solver_data_list.txt new file mode 100755 index 0000000..a6552f5 --- /dev/null +++ b/src/caffe/test/test_data/solver_data_list.txt @@ -0,0 +1 @@ +src/caffe/test/test_data/solver_data.h5 diff --git a/src/caffe/test/test_data_layer.cpp b/src/caffe/test/test_data_layer.cpp new file mode 100755 index 0000000..afe2a40 --- /dev/null +++ b/src/caffe/test/test_data_layer.cpp @@ -0,0 +1,427 @@ +#include +#include + +#include "boost/scoped_ptr.hpp" +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/data_layers.hpp" +#include "caffe/filler.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/db.hpp" +#include "caffe/util/io.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +using boost::scoped_ptr; + +template +class DataLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + DataLayerTest() + : backend_(DataParameter_DB_LEVELDB), + blob_top_data_(new Blob()), + blob_top_label_(new Blob()), + seed_(1701) {} + virtual void SetUp() { + filename_.reset(new string()); + MakeTempDir(filename_.get()); + *filename_ += "/db"; + blob_top_vec_.push_back(blob_top_data_); + blob_top_vec_.push_back(blob_top_label_); + } + + // Fill the DB with data: if unique_pixels, each pixel is unique but + // all images are the same; else each image is unique but all pixels within + // an image are the same. + void Fill(const bool unique_pixels, DataParameter_DB backend) { + backend_ = backend; + LOG(INFO) << "Using temporary dataset " << *filename_; + scoped_ptr db(db::GetDB(backend)); + db->Open(*filename_, db::NEW); + scoped_ptr txn(db->NewTransaction()); + for (int i = 0; i < 5; ++i) { + Datum datum; + datum.set_label(i); + datum.set_channels(2); + datum.set_height(3); + datum.set_width(4); + std::string* data = datum.mutable_data(); + for (int j = 0; j < 24; ++j) { + int datum = unique_pixels ? j : i; + data->push_back(static_cast(datum)); + } + stringstream ss; + ss << i; + string out; + CHECK(datum.SerializeToString(&out)); + txn->Put(ss.str(), out); + } + txn->Commit(); + db->Close(); + } + + void TestRead() { + const Dtype scale = 3; + LayerParameter param; + param.set_phase(TRAIN); + DataParameter* data_param = param.mutable_data_param(); + data_param->set_batch_size(5); + data_param->set_source(filename_->c_str()); + data_param->set_backend(backend_); + + TransformationParameter* transform_param = + param.mutable_transform_param(); + transform_param->set_scale(scale); + + DataLayer layer(param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_data_->num(), 5); + EXPECT_EQ(blob_top_data_->channels(), 2); + EXPECT_EQ(blob_top_data_->height(), 3); + EXPECT_EQ(blob_top_data_->width(), 4); + EXPECT_EQ(blob_top_label_->num(), 5); + EXPECT_EQ(blob_top_label_->channels(), 1); + EXPECT_EQ(blob_top_label_->height(), 1); + EXPECT_EQ(blob_top_label_->width(), 1); + + for (int iter = 0; iter < 100; ++iter) { + layer.Forward(blob_bottom_vec_, blob_top_vec_); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 24; ++j) { + EXPECT_EQ(scale * i, blob_top_data_->cpu_data()[i * 24 + j]) + << "debug: iter " << iter << " i " << i << " j " << j; + } + } + } + } + + void TestReshape(DataParameter_DB backend) { + const int num_inputs = 5; + // Save data of varying shapes. + LOG(INFO) << "Using temporary dataset " << *filename_; + scoped_ptr db(db::GetDB(backend)); + db->Open(*filename_, db::NEW); + scoped_ptr txn(db->NewTransaction()); + for (int i = 0; i < num_inputs; ++i) { + Datum datum; + datum.set_label(i); + datum.set_channels(2); + datum.set_height(i % 2 + 1); + datum.set_width(i % 4 + 1); + std::string* data = datum.mutable_data(); + const int data_size = datum.channels() * datum.height() * datum.width(); + for (int j = 0; j < data_size; ++j) { + data->push_back(static_cast(j)); + } + stringstream ss; + ss << i; + string out; + CHECK(datum.SerializeToString(&out)); + txn->Put(ss.str(), out); + } + txn->Commit(); + db->Close(); + + // Load and check data of various shapes. + LayerParameter param; + param.set_phase(TEST); + DataParameter* data_param = param.mutable_data_param(); + data_param->set_batch_size(1); + data_param->set_source(filename_->c_str()); + data_param->set_backend(backend); + + DataLayer layer(param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_data_->num(), 1); + EXPECT_EQ(blob_top_data_->channels(), 2); + EXPECT_EQ(blob_top_label_->num(), 1); + EXPECT_EQ(blob_top_label_->channels(), 1); + EXPECT_EQ(blob_top_label_->height(), 1); + EXPECT_EQ(blob_top_label_->width(), 1); + + for (int iter = 0; iter < num_inputs; ++iter) { + layer.Forward(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_data_->height(), iter % 2 + 1); + EXPECT_EQ(blob_top_data_->width(), iter % 4 + 1); + EXPECT_EQ(iter, blob_top_label_->cpu_data()[0]); + const int channels = blob_top_data_->channels(); + const int height = blob_top_data_->height(); + const int width = blob_top_data_->width(); + for (int c = 0; c < channels; ++c) { + for (int h = 0; h < height; ++h) { + for (int w = 0; w < width; ++w) { + const int idx = (c * height + h) * width + w; + EXPECT_EQ(idx, static_cast(blob_top_data_->cpu_data()[idx])) + << "debug: iter " << iter << " c " << c + << " h " << h << " w " << w; + } + } + } + } + } + + void TestReadCrop(Phase phase) { + const Dtype scale = 3; + LayerParameter param; + param.set_phase(phase); + Caffe::set_random_seed(1701); + + DataParameter* data_param = param.mutable_data_param(); + data_param->set_batch_size(5); + data_param->set_source(filename_->c_str()); + data_param->set_backend(backend_); + + TransformationParameter* transform_param = + param.mutable_transform_param(); + transform_param->set_scale(scale); + transform_param->set_crop_size(1); + + DataLayer layer(param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_data_->num(), 5); + EXPECT_EQ(blob_top_data_->channels(), 2); + EXPECT_EQ(blob_top_data_->height(), 1); + EXPECT_EQ(blob_top_data_->width(), 1); + EXPECT_EQ(blob_top_label_->num(), 5); + EXPECT_EQ(blob_top_label_->channels(), 1); + EXPECT_EQ(blob_top_label_->height(), 1); + EXPECT_EQ(blob_top_label_->width(), 1); + + for (int iter = 0; iter < 2; ++iter) { + layer.Forward(blob_bottom_vec_, blob_top_vec_); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + int num_with_center_value = 0; + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 2; ++j) { + const Dtype center_value = scale * (j ? 17 : 5); + num_with_center_value += + (center_value == blob_top_data_->cpu_data()[i * 2 + j]); + // At TEST time, check that we always get center value. + if (phase == caffe::TEST) { + EXPECT_EQ(center_value, this->blob_top_data_->cpu_data()[i * 2 + j]) + << "debug: iter " << iter << " i " << i << " j " << j; + } + } + } + // At TRAIN time, check that we did not get the center crop all 10 times. + // (This check fails with probability 1-1/12^10 in a correct + // implementation, so we call set_random_seed.) + if (phase == caffe::TRAIN) { + EXPECT_LT(num_with_center_value, 10); + } + } + } + + void TestReadCropTrainSequenceSeeded() { + LayerParameter param; + param.set_phase(TRAIN); + DataParameter* data_param = param.mutable_data_param(); + data_param->set_batch_size(5); + data_param->set_source(filename_->c_str()); + data_param->set_backend(backend_); + + TransformationParameter* transform_param = + param.mutable_transform_param(); + transform_param->set_crop_size(1); + transform_param->set_mirror(true); + + // Get crop sequence with Caffe seed 1701. + Caffe::set_random_seed(seed_); + vector > crop_sequence; + { + DataLayer layer1(param); + layer1.SetUp(blob_bottom_vec_, blob_top_vec_); + for (int iter = 0; iter < 2; ++iter) { + layer1.Forward(blob_bottom_vec_, blob_top_vec_); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + vector iter_crop_sequence; + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 2; ++j) { + iter_crop_sequence.push_back( + blob_top_data_->cpu_data()[i * 2 + j]); + } + } + crop_sequence.push_back(iter_crop_sequence); + } + } // destroy 1st data layer and unlock the db + + // Get crop sequence after reseeding Caffe with 1701. + // Check that the sequence is the same as the original. + Caffe::set_random_seed(seed_); + DataLayer layer2(param); + layer2.SetUp(blob_bottom_vec_, blob_top_vec_); + for (int iter = 0; iter < 2; ++iter) { + layer2.Forward(blob_bottom_vec_, blob_top_vec_); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 2; ++j) { + EXPECT_EQ(crop_sequence[iter][i * 2 + j], + blob_top_data_->cpu_data()[i * 2 + j]) + << "debug: iter " << iter << " i " << i << " j " << j; + } + } + } + } + + void TestReadCropTrainSequenceUnseeded() { + LayerParameter param; + param.set_phase(TRAIN); + DataParameter* data_param = param.mutable_data_param(); + data_param->set_batch_size(5); + data_param->set_source(filename_->c_str()); + data_param->set_backend(backend_); + + TransformationParameter* transform_param = + param.mutable_transform_param(); + transform_param->set_crop_size(1); + transform_param->set_mirror(true); + + // Get crop sequence with Caffe seed 1701, srand seed 1701. + Caffe::set_random_seed(seed_); + srand(seed_); + vector > crop_sequence; + { + DataLayer layer1(param); + layer1.SetUp(blob_bottom_vec_, blob_top_vec_); + for (int iter = 0; iter < 2; ++iter) { + layer1.Forward(blob_bottom_vec_, blob_top_vec_); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + vector iter_crop_sequence; + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 2; ++j) { + iter_crop_sequence.push_back( + blob_top_data_->cpu_data()[i * 2 + j]); + } + } + crop_sequence.push_back(iter_crop_sequence); + } + } // destroy 1st data layer and unlock the db + + // Get crop sequence continuing from previous Caffe RNG state; reseed + // srand with 1701. Check that the sequence differs from the original. + srand(seed_); + DataLayer layer2(param); + layer2.SetUp(blob_bottom_vec_, blob_top_vec_); + for (int iter = 0; iter < 2; ++iter) { + layer2.Forward(blob_bottom_vec_, blob_top_vec_); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + int num_sequence_matches = 0; + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 2; ++j) { + num_sequence_matches += (crop_sequence[iter][i * 2 + j] == + blob_top_data_->cpu_data()[i * 2 + j]); + } + } + EXPECT_LT(num_sequence_matches, 10); + } + } + + virtual ~DataLayerTest() { delete blob_top_data_; delete blob_top_label_; } + + DataParameter_DB backend_; + shared_ptr filename_; + Blob* const blob_top_data_; + Blob* const blob_top_label_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + int seed_; +}; + +TYPED_TEST_CASE(DataLayerTest, TestDtypesAndDevices); + +TYPED_TEST(DataLayerTest, TestReadLevelDB) { + const bool unique_pixels = false; // all pixels the same; images different + this->Fill(unique_pixels, DataParameter_DB_LEVELDB); + this->TestRead(); +} + +TYPED_TEST(DataLayerTest, TestReshapeLevelDB) { + this->TestReshape(DataParameter_DB_LEVELDB); +} + +TYPED_TEST(DataLayerTest, TestReadCropTrainLevelDB) { + const bool unique_pixels = true; // all images the same; pixels different + this->Fill(unique_pixels, DataParameter_DB_LEVELDB); + this->TestReadCrop(TRAIN); +} + +// Test that the sequence of random crops is consistent when using +// Caffe::set_random_seed. +TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLevelDB) { + const bool unique_pixels = true; // all images the same; pixels different + this->Fill(unique_pixels, DataParameter_DB_LEVELDB); + this->TestReadCropTrainSequenceSeeded(); +} + +// Test that the sequence of random crops differs across iterations when +// Caffe::set_random_seed isn't called (and seeds from srand are ignored). +TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLevelDB) { + const bool unique_pixels = true; // all images the same; pixels different + this->Fill(unique_pixels, DataParameter_DB_LEVELDB); + this->TestReadCropTrainSequenceUnseeded(); +} + +TYPED_TEST(DataLayerTest, TestReadCropTestLevelDB) { + const bool unique_pixels = true; // all images the same; pixels different + this->Fill(unique_pixels, DataParameter_DB_LEVELDB); + this->TestReadCrop(TEST); +} + +TYPED_TEST(DataLayerTest, TestReadLMDB) { + const bool unique_pixels = false; // all pixels the same; images different + this->Fill(unique_pixels, DataParameter_DB_LMDB); + this->TestRead(); +} + +TYPED_TEST(DataLayerTest, TestReshapeLMDB) { + this->TestReshape(DataParameter_DB_LMDB); +} + +TYPED_TEST(DataLayerTest, TestReadCropTrainLMDB) { + const bool unique_pixels = true; // all images the same; pixels different + this->Fill(unique_pixels, DataParameter_DB_LMDB); + this->TestReadCrop(TRAIN); +} + +// Test that the sequence of random crops is consistent when using +// Caffe::set_random_seed. +TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLMDB) { + const bool unique_pixels = true; // all images the same; pixels different + this->Fill(unique_pixels, DataParameter_DB_LMDB); + this->TestReadCropTrainSequenceSeeded(); +} + +// Test that the sequence of random crops differs across iterations when +// Caffe::set_random_seed isn't called (and seeds from srand are ignored). +TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLMDB) { + const bool unique_pixels = true; // all images the same; pixels different + this->Fill(unique_pixels, DataParameter_DB_LMDB); + this->TestReadCropTrainSequenceUnseeded(); +} + +TYPED_TEST(DataLayerTest, TestReadCropTestLMDB) { + const bool unique_pixels = true; // all images the same; pixels different + this->Fill(unique_pixels, DataParameter_DB_LMDB); + this->TestReadCrop(TEST); +} + +} // namespace caffe diff --git a/src/caffe/test/test_data_transformer.cpp b/src/caffe/test/test_data_transformer.cpp new file mode 100755 index 0000000..16570e2 --- /dev/null +++ b/src/caffe/test/test_data_transformer.cpp @@ -0,0 +1,355 @@ +#include +#include + +#include "gtest/gtest.h" +#include "leveldb/db.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/data_transformer.hpp" +#include "caffe/filler.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/io.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +void FillDatum(const int label, const int channels, const int height, + const int width, const bool unique_pixels, Datum * datum) { + datum->set_label(label); + datum->set_channels(channels); + datum->set_height(height); + datum->set_width(width); + int size = channels * height * width; + std::string* data = datum->mutable_data(); + for (int j = 0; j < size; ++j) { + int datum = unique_pixels ? j : label; + data->push_back(static_cast(datum)); + } +} + +template +class DataTransformTest : public ::testing::Test { + protected: + DataTransformTest() + : seed_(1701), + num_iter_(10) {} + + int NumSequenceMatches(const TransformationParameter transform_param, + const Datum& datum, Phase phase) { + // Get crop sequence with Caffe seed 1701. + DataTransformer* transformer = + new DataTransformer(transform_param, phase); + const int crop_size = transform_param.crop_size(); + Caffe::set_random_seed(seed_); + transformer->InitRand(); + Blob* blob = + new Blob(1, datum.channels(), datum.height(), datum.width()); + if (transform_param.crop_size() > 0) { + blob->Reshape(1, datum.channels(), crop_size, crop_size); + } + + vector > crop_sequence; + for (int iter = 0; iter < this->num_iter_; ++iter) { + vector iter_crop_sequence; + transformer->Transform(datum, blob); + for (int j = 0; j < blob->count(); ++j) { + iter_crop_sequence.push_back(blob->cpu_data()[j]); + } + crop_sequence.push_back(iter_crop_sequence); + } + // Check if the sequence differs from the previous + int num_sequence_matches = 0; + for (int iter = 0; iter < this->num_iter_; ++iter) { + vector iter_crop_sequence = crop_sequence[iter]; + transformer->Transform(datum, blob); + for (int j = 0; j < blob->count(); ++j) { + num_sequence_matches += + (crop_sequence[iter][j] == blob->cpu_data()[j]); + } + } + return num_sequence_matches; + } + + virtual ~DataTransformTest() { } + + int seed_; + int num_iter_; +}; + +TYPED_TEST_CASE(DataTransformTest, TestDtypes); + +TYPED_TEST(DataTransformTest, TestEmptyTransform) { + TransformationParameter transform_param; + const bool unique_pixels = false; // all pixels the same equal to label + const int label = 0; + const int channels = 3; + const int height = 4; + const int width = 5; + + Datum datum; + FillDatum(label, channels, height, width, unique_pixels, &datum); + Blob* blob = new Blob(1, channels, height, width); + DataTransformer* transformer = + new DataTransformer(transform_param, TEST); + transformer->InitRand(); + transformer->Transform(datum, blob); + EXPECT_EQ(blob->num(), 1); + EXPECT_EQ(blob->channels(), datum.channels()); + EXPECT_EQ(blob->height(), datum.height()); + EXPECT_EQ(blob->width(), datum.width()); + for (int j = 0; j < blob->count(); ++j) { + EXPECT_EQ(blob->cpu_data()[j], label); + } +} + +TYPED_TEST(DataTransformTest, TestEmptyTransformUniquePixels) { + TransformationParameter transform_param; + const bool unique_pixels = true; // pixels are consecutive ints [0,size] + const int label = 0; + const int channels = 3; + const int height = 4; + const int width = 5; + + Datum datum; + FillDatum(label, channels, height, width, unique_pixels, &datum); + Blob* blob = new Blob(1, 3, 4, 5); + DataTransformer* transformer = + new DataTransformer(transform_param, TEST); + transformer->InitRand(); + transformer->Transform(datum, blob); + EXPECT_EQ(blob->num(), 1); + EXPECT_EQ(blob->channels(), datum.channels()); + EXPECT_EQ(blob->height(), datum.height()); + EXPECT_EQ(blob->width(), datum.width()); + for (int j = 0; j < blob->count(); ++j) { + EXPECT_EQ(blob->cpu_data()[j], j); + } +} + +TYPED_TEST(DataTransformTest, TestCropSize) { + TransformationParameter transform_param; + const bool unique_pixels = false; // all pixels the same equal to label + const int label = 0; + const int channels = 3; + const int height = 4; + const int width = 5; + const int crop_size = 2; + + transform_param.set_crop_size(crop_size); + Datum datum; + FillDatum(label, channels, height, width, unique_pixels, &datum); + DataTransformer* transformer = + new DataTransformer(transform_param, TEST); + transformer->InitRand(); + Blob* blob = + new Blob(1, channels, crop_size, crop_size); + for (int iter = 0; iter < this->num_iter_; ++iter) { + transformer->Transform(datum, blob); + EXPECT_EQ(blob->num(), 1); + EXPECT_EQ(blob->channels(), datum.channels()); + EXPECT_EQ(blob->height(), crop_size); + EXPECT_EQ(blob->width(), crop_size); + for (int j = 0; j < blob->count(); ++j) { + EXPECT_EQ(blob->cpu_data()[j], label); + } + } +} + +TYPED_TEST(DataTransformTest, TestCropTrain) { + TransformationParameter transform_param; + const bool unique_pixels = true; // pixels are consecutive ints [0,size] + const int label = 0; + const int channels = 3; + const int height = 4; + const int width = 5; + const int crop_size = 2; + const int size = channels * crop_size * crop_size; + + transform_param.set_crop_size(crop_size); + Datum datum; + FillDatum(label, channels, height, width, unique_pixels, &datum); + int num_matches = this->NumSequenceMatches(transform_param, datum, TRAIN); + EXPECT_LT(num_matches, size * this->num_iter_); +} + +TYPED_TEST(DataTransformTest, TestCropTest) { + TransformationParameter transform_param; + const bool unique_pixels = true; // pixels are consecutive ints [0,size] + const int label = 0; + const int channels = 3; + const int height = 4; + const int width = 5; + const int crop_size = 2; + const int size = channels * crop_size * crop_size; + + transform_param.set_crop_size(crop_size); + Datum datum; + FillDatum(label, channels, height, width, unique_pixels, &datum); + int num_matches = this->NumSequenceMatches(transform_param, datum, TEST); + EXPECT_EQ(num_matches, size * this->num_iter_); +} + +TYPED_TEST(DataTransformTest, TestMirrorTrain) { + TransformationParameter transform_param; + const bool unique_pixels = true; // pixels are consecutive ints [0,size] + const int label = 0; + const int channels = 3; + const int height = 4; + const int width = 5; + const int size = channels * height * width; + + transform_param.set_mirror(true); + Datum datum; + FillDatum(label, channels, height, width, unique_pixels, &datum); + int num_matches = this->NumSequenceMatches(transform_param, datum, TRAIN); + EXPECT_LT(num_matches, size * this->num_iter_); +} + +TYPED_TEST(DataTransformTest, TestMirrorTest) { + TransformationParameter transform_param; + const bool unique_pixels = true; // pixels are consecutive ints [0,size] + const int label = 0; + const int channels = 3; + const int height = 4; + const int width = 5; + const int size = channels * height * width; + + transform_param.set_mirror(true); + Datum datum; + FillDatum(label, channels, height, width, unique_pixels, &datum); + int num_matches = this->NumSequenceMatches(transform_param, datum, TEST); + EXPECT_LT(num_matches, size * this->num_iter_); +} + +TYPED_TEST(DataTransformTest, TestCropMirrorTrain) { + TransformationParameter transform_param; + const bool unique_pixels = true; // pixels are consecutive ints [0,size] + const int label = 0; + const int channels = 3; + const int height = 4; + const int width = 5; + const int crop_size = 2; + + Datum datum; + FillDatum(label, channels, height, width, unique_pixels, &datum); + transform_param.set_crop_size(crop_size); + int num_matches_crop = this->NumSequenceMatches( + transform_param, datum, TRAIN); + + transform_param.set_mirror(true); + int num_matches_crop_mirror = + this->NumSequenceMatches(transform_param, datum, TRAIN); + // When doing crop and mirror we expect less num_matches than just crop + EXPECT_LE(num_matches_crop_mirror, num_matches_crop); +} + +TYPED_TEST(DataTransformTest, TestCropMirrorTest) { + TransformationParameter transform_param; + const bool unique_pixels = true; // pixels are consecutive ints [0,size] + const int label = 0; + const int channels = 3; + const int height = 4; + const int width = 5; + const int crop_size = 2; + + Datum datum; + FillDatum(label, channels, height, width, unique_pixels, &datum); + transform_param.set_crop_size(crop_size); + int num_matches_crop = this->NumSequenceMatches(transform_param, datum, TEST); + + transform_param.set_mirror(true); + int num_matches_crop_mirror = + this->NumSequenceMatches(transform_param, datum, TEST); + // When doing crop and mirror we expect less num_matches than just crop + EXPECT_LT(num_matches_crop_mirror, num_matches_crop); +} + + +TYPED_TEST(DataTransformTest, TestMeanValue) { + TransformationParameter transform_param; + const bool unique_pixels = false; // pixels are equal to label + const int label = 0; + const int channels = 3; + const int height = 4; + const int width = 5; + const int mean_value = 2; + + transform_param.add_mean_value(mean_value); + Datum datum; + FillDatum(label, channels, height, width, unique_pixels, &datum); + Blob* blob = new Blob(1, channels, height, width); + DataTransformer* transformer = + new DataTransformer(transform_param, TEST); + transformer->InitRand(); + transformer->Transform(datum, blob); + for (int j = 0; j < blob->count(); ++j) { + EXPECT_EQ(blob->cpu_data()[j], label - mean_value); + } +} + +TYPED_TEST(DataTransformTest, TestMeanValues) { + TransformationParameter transform_param; + const bool unique_pixels = false; // pixels are equal to label + const int label = 0; + const int channels = 3; + const int height = 4; + const int width = 5; + + transform_param.add_mean_value(0); + transform_param.add_mean_value(1); + transform_param.add_mean_value(2); + Datum datum; + FillDatum(label, channels, height, width, unique_pixels, &datum); + Blob* blob = new Blob(1, channels, height, width); + DataTransformer* transformer = + new DataTransformer(transform_param, TEST); + transformer->InitRand(); + transformer->Transform(datum, blob); + for (int c = 0; c < channels; ++c) { + for (int j = 0; j < height * width; ++j) { + EXPECT_EQ(blob->cpu_data()[blob->offset(0, c) + j], label - c); + } + } +} + +TYPED_TEST(DataTransformTest, TestMeanFile) { + TransformationParameter transform_param; + const bool unique_pixels = true; // pixels are consecutive ints [0,size] + const int label = 0; + const int channels = 3; + const int height = 4; + const int width = 5; + const int size = channels * height * width; + + // Create a mean file + string* mean_file = new string(); + MakeTempFilename(mean_file); + BlobProto blob_mean; + blob_mean.set_num(1); + blob_mean.set_channels(channels); + blob_mean.set_height(height); + blob_mean.set_width(width); + + for (int j = 0; j < size; ++j) { + blob_mean.add_data(j); + } + + LOG(INFO) << "Using temporary mean_file " << *mean_file; + WriteProtoToBinaryFile(blob_mean, *mean_file); + + transform_param.set_mean_file(*mean_file); + Datum datum; + FillDatum(label, channels, height, width, unique_pixels, &datum); + Blob* blob = new Blob(1, channels, height, width); + DataTransformer* transformer = + new DataTransformer(transform_param, TEST); + transformer->InitRand(); + transformer->Transform(datum, blob); + for (int j = 0; j < blob->count(); ++j) { + EXPECT_EQ(blob->cpu_data()[j], 0); + } +} + +} // namespace caffe diff --git a/src/caffe/test/test_db.cpp b/src/caffe/test/test_db.cpp new file mode 100755 index 0000000..5b2ac23 --- /dev/null +++ b/src/caffe/test/test_db.cpp @@ -0,0 +1,134 @@ +#include + +#include "boost/scoped_ptr.hpp" +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/db.hpp" +#include "caffe/util/io.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +using boost::scoped_ptr; + +template +class DBTest : public ::testing::Test { + protected: + DBTest() + : backend_(TypeParam::backend), + root_images_(string(EXAMPLES_SOURCE_DIR) + string("images/")) {} + + virtual void SetUp() { + MakeTempDir(&source_); + source_ += "/db"; + string keys[] = {"cat.jpg", "fish-bike.jpg"}; + LOG(INFO) << "Using temporary db " << source_; + scoped_ptr db(db::GetDB(TypeParam::backend)); + db->Open(this->source_, db::NEW); + scoped_ptr txn(db->NewTransaction()); + for (int i = 0; i < 2; ++i) { + Datum datum; + ReadImageToDatum(root_images_ + keys[i], i, &datum); + string out; + CHECK(datum.SerializeToString(&out)); + txn->Put(keys[i], out); + } + txn->Commit(); + } + + virtual ~DBTest() { } + + DataParameter_DB backend_; + string source_; + string root_images_; +}; + +struct TypeLevelDB { + static DataParameter_DB backend; +}; +DataParameter_DB TypeLevelDB::backend = DataParameter_DB_LEVELDB; + +struct TypeLMDB { + static DataParameter_DB backend; +}; +DataParameter_DB TypeLMDB::backend = DataParameter_DB_LMDB; + +// typedef ::testing::Types TestTypes; +typedef ::testing::Types TestTypes; + +TYPED_TEST_CASE(DBTest, TestTypes); + +TYPED_TEST(DBTest, TestGetDB) { + scoped_ptr db(db::GetDB(TypeParam::backend)); +} + +TYPED_TEST(DBTest, TestNext) { + scoped_ptr db(db::GetDB(TypeParam::backend)); + db->Open(this->source_, db::READ); + scoped_ptr cursor(db->NewCursor()); + EXPECT_TRUE(cursor->valid()); + cursor->Next(); + EXPECT_TRUE(cursor->valid()); + cursor->Next(); + EXPECT_FALSE(cursor->valid()); +} + +TYPED_TEST(DBTest, TestSeekToFirst) { + scoped_ptr db(db::GetDB(TypeParam::backend)); + db->Open(this->source_, db::READ); + scoped_ptr cursor(db->NewCursor()); + cursor->Next(); + cursor->SeekToFirst(); + EXPECT_TRUE(cursor->valid()); + string key = cursor->key(); + Datum datum; + datum.ParseFromString(cursor->value()); + EXPECT_EQ(key, "cat.jpg"); + EXPECT_EQ(datum.channels(), 3); + EXPECT_EQ(datum.height(), 360); + EXPECT_EQ(datum.width(), 480); +} + +TYPED_TEST(DBTest, TestKeyValue) { + scoped_ptr db(db::GetDB(TypeParam::backend)); + db->Open(this->source_, db::READ); + scoped_ptr cursor(db->NewCursor()); + EXPECT_TRUE(cursor->valid()); + string key = cursor->key(); + Datum datum; + datum.ParseFromString(cursor->value()); + EXPECT_EQ(key, "cat.jpg"); + EXPECT_EQ(datum.channels(), 3); + EXPECT_EQ(datum.height(), 360); + EXPECT_EQ(datum.width(), 480); + cursor->Next(); + EXPECT_TRUE(cursor->valid()); + key = cursor->key(); + datum.ParseFromString(cursor->value()); + EXPECT_EQ(key, "fish-bike.jpg"); + EXPECT_EQ(datum.channels(), 3); + EXPECT_EQ(datum.height(), 323); + EXPECT_EQ(datum.width(), 481); + cursor->Next(); + EXPECT_FALSE(cursor->valid()); +} + +TYPED_TEST(DBTest, TestWrite) { + scoped_ptr db(db::GetDB(TypeParam::backend)); + db->Open(this->source_, db::WRITE); + scoped_ptr txn(db->NewTransaction()); + Datum datum; + ReadFileToDatum(this->root_images_ + "cat.jpg", 0, &datum); + string out; + CHECK(datum.SerializeToString(&out)); + txn->Put("cat.jpg", out); + ReadFileToDatum(this->root_images_ + "fish-bike.jpg", 1, &datum); + CHECK(datum.SerializeToString(&out)); + txn->Put("fish-bike.jpg", out); + txn->Commit(); +} + +} // namespace caffe diff --git a/src/caffe/test/test_deconvolution_layer.cpp b/src/caffe/test/test_deconvolution_layer.cpp new file mode 100755 index 0000000..fc63d5e --- /dev/null +++ b/src/caffe/test/test_deconvolution_layer.cpp @@ -0,0 +1,158 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +// Since ConvolutionLayerTest checks the shared conv/deconv code in detail, +// we'll just do a simple forward test and a gradient check. +template +class DeconvolutionLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + DeconvolutionLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 4)), + blob_bottom_2_(new Blob(2, 3, 6, 4)), + blob_top_(new Blob()), + blob_top_2_(new Blob()) {} + virtual void SetUp() { + // fill the values + FillerParameter filler_param; + filler_param.set_value(1.); + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + filler.Fill(this->blob_bottom_2_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~DeconvolutionLayerTest() { + delete blob_bottom_; + delete blob_bottom_2_; + delete blob_top_; + delete blob_top_2_; + } + + Blob* const blob_bottom_; + Blob* const blob_bottom_2_; + Blob* const blob_top_; + Blob* const blob_top_2_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(DeconvolutionLayerTest, TestDtypesAndDevices); + +TYPED_TEST(DeconvolutionLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(4); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + shared_ptr > layer( + new DeconvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 4); + EXPECT_EQ(this->blob_top_->height(), 13); + EXPECT_EQ(this->blob_top_->width(), 9); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 4); + EXPECT_EQ(this->blob_top_2_->height(), 13); + EXPECT_EQ(this->blob_top_2_->width(), 9); + // setting group should not change the shape + convolution_param->set_num_output(3); + convolution_param->set_group(3); + layer.reset(new DeconvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 13); + EXPECT_EQ(this->blob_top_->width(), 9); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 3); + EXPECT_EQ(this->blob_top_2_->height(), 13); + EXPECT_EQ(this->blob_top_2_->width(), 9); +} + +TYPED_TEST(DeconvolutionLayerTest, TestSimpleDeconvolution) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("constant"); + convolution_param->mutable_weight_filler()->set_value(1); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new DeconvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + // constant-fill the bottom blobs + FillerParameter filler_param; + filler_param.set_value(1.); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + filler.Fill(this->blob_bottom_2_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // simply check that accumulation works with overlapping filters + const Dtype* top_data = this->blob_top_->cpu_data(); + for (int n = 0; n < this->blob_top_->num(); ++n) { + for (int c = 0; c < this->blob_top_->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + Dtype expected = 3.1; + bool h_overlap = h % 2 == 0 && h > 0 + && h < this->blob_top_->height() - 1; + bool w_overlap = w % 2 == 0 && w > 0 + && w < this->blob_top_->width() - 1; + if (h_overlap && w_overlap) { + expected += 9; + } else if (h_overlap || w_overlap) { + expected += 3; + } + EXPECT_NEAR(top_data[this->blob_top_->offset(n, c, h, w)], + expected, 1e-4); + } + } + } + } +} + +TYPED_TEST(DeconvolutionLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + convolution_param->set_kernel_size(2); + convolution_param->set_stride(1); + convolution_param->set_num_output(1); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + DeconvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} // namespace caffe diff --git a/src/caffe/test/test_dummy_data_layer.cpp b/src/caffe/test/test_dummy_data_layer.cpp new file mode 100755 index 0000000..c9ed38d --- /dev/null +++ b/src/caffe/test/test_dummy_data_layer.cpp @@ -0,0 +1,193 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class DummyDataLayerTest : public CPUDeviceTest { + protected: + DummyDataLayerTest() + : blob_top_a_(new Blob()), + blob_top_b_(new Blob()), + blob_top_c_(new Blob()) {} + + virtual void SetUp() { + blob_bottom_vec_.clear(); + blob_top_vec_.clear(); + blob_top_vec_.push_back(blob_top_a_); + blob_top_vec_.push_back(blob_top_b_); + blob_top_vec_.push_back(blob_top_c_); + } + + virtual ~DummyDataLayerTest() { + delete blob_top_a_; + delete blob_top_b_; + delete blob_top_c_; + } + + Blob* const blob_top_a_; + Blob* const blob_top_b_; + Blob* const blob_top_c_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(DummyDataLayerTest, TestDtypes); + +TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { + LayerParameter param; + DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); + dummy_data_param->add_num(5); + dummy_data_param->add_channels(3); + dummy_data_param->add_height(2); + dummy_data_param->add_width(4); + this->blob_top_vec_.resize(1); + DummyDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_a_->num(), 5); + EXPECT_EQ(this->blob_top_a_->channels(), 3); + EXPECT_EQ(this->blob_top_a_->height(), 2); + EXPECT_EQ(this->blob_top_a_->width(), 4); + EXPECT_EQ(this->blob_top_b_->count(), 0); + EXPECT_EQ(this->blob_top_c_->count(), 0); + for (int i = 0; i < this->blob_top_vec_.size(); ++i) { + for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) { + EXPECT_EQ(0, this->blob_top_vec_[i]->cpu_data()[j]); + } + } + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_vec_.size(); ++i) { + for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) { + EXPECT_EQ(0, this->blob_top_vec_[i]->cpu_data()[j]); + } + } +} + +TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { + LayerParameter param; + DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); + dummy_data_param->add_num(5); + dummy_data_param->add_channels(3); + dummy_data_param->add_height(2); + dummy_data_param->add_width(4); + dummy_data_param->add_num(5); + // Don't explicitly set number of channels or height for 2nd top blob; should + // default to first channels and height (as we check later). + dummy_data_param->add_height(1); + FillerParameter* data_filler_param = dummy_data_param->add_data_filler(); + data_filler_param->set_value(7); + this->blob_top_vec_.resize(2); + DummyDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_a_->num(), 5); + EXPECT_EQ(this->blob_top_a_->channels(), 3); + EXPECT_EQ(this->blob_top_a_->height(), 2); + EXPECT_EQ(this->blob_top_a_->width(), 4); + EXPECT_EQ(this->blob_top_b_->num(), 5); + EXPECT_EQ(this->blob_top_b_->channels(), 3); + EXPECT_EQ(this->blob_top_b_->height(), 1); + EXPECT_EQ(this->blob_top_b_->width(), 4); + EXPECT_EQ(this->blob_top_c_->count(), 0); + for (int i = 0; i < this->blob_top_vec_.size(); ++i) { + for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) { + EXPECT_EQ(7, this->blob_top_vec_[i]->cpu_data()[j]); + } + } + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_vec_.size(); ++i) { + for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) { + EXPECT_EQ(7, this->blob_top_vec_[i]->cpu_data()[j]); + } + } +} + +TYPED_TEST(DummyDataLayerTest, TestThreeTopConstantGaussianConstant) { + LayerParameter param; + DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); + dummy_data_param->add_num(5); + dummy_data_param->add_channels(3); + dummy_data_param->add_height(2); + dummy_data_param->add_width(4); + FillerParameter* data_filler_param_a = dummy_data_param->add_data_filler(); + data_filler_param_a->set_value(7); + FillerParameter* data_filler_param_b = dummy_data_param->add_data_filler(); + data_filler_param_b->set_type("gaussian"); + TypeParam gaussian_mean = 3.0; + TypeParam gaussian_std = 0.01; + data_filler_param_b->set_mean(gaussian_mean); + data_filler_param_b->set_std(gaussian_std); + FillerParameter* data_filler_param_c = dummy_data_param->add_data_filler(); + data_filler_param_c->set_value(9); + DummyDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_a_->num(), 5); + EXPECT_EQ(this->blob_top_a_->channels(), 3); + EXPECT_EQ(this->blob_top_a_->height(), 2); + EXPECT_EQ(this->blob_top_a_->width(), 4); + EXPECT_EQ(this->blob_top_b_->num(), 5); + EXPECT_EQ(this->blob_top_b_->channels(), 3); + EXPECT_EQ(this->blob_top_b_->height(), 2); + EXPECT_EQ(this->blob_top_b_->width(), 4); + EXPECT_EQ(this->blob_top_c_->num(), 5); + EXPECT_EQ(this->blob_top_c_->channels(), 3); + EXPECT_EQ(this->blob_top_c_->height(), 2); + EXPECT_EQ(this->blob_top_c_->width(), 4); + for (int i = 0; i < this->blob_top_a_->count(); ++i) { + EXPECT_EQ(7, this->blob_top_a_->cpu_data()[i]); + } + // Blob b uses a Gaussian filler, so SetUp should not have initialized it. + // Blob b's data should therefore be the default Blob data value: 0. + for (int i = 0; i < this->blob_top_b_->count(); ++i) { + EXPECT_EQ(0, this->blob_top_b_->cpu_data()[i]); + } + for (int i = 0; i < this->blob_top_c_->count(); ++i) { + EXPECT_EQ(9, this->blob_top_c_->cpu_data()[i]); + } + + // Do a Forward pass to fill in Blob b with Gaussian data. + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_a_->count(); ++i) { + EXPECT_EQ(7, this->blob_top_a_->cpu_data()[i]); + } + // Check that the Gaussian's data has been filled in with values within + // 10 standard deviations of the mean. Record the first and last sample. + // to check that they're different after the next Forward pass. + for (int i = 0; i < this->blob_top_b_->count(); ++i) { + EXPECT_NEAR(gaussian_mean, this->blob_top_b_->cpu_data()[i], + gaussian_std * 10); + } + const TypeParam first_gaussian_sample = this->blob_top_b_->cpu_data()[0]; + const TypeParam last_gaussian_sample = + this->blob_top_b_->cpu_data()[this->blob_top_b_->count() - 1]; + for (int i = 0; i < this->blob_top_c_->count(); ++i) { + EXPECT_EQ(9, this->blob_top_c_->cpu_data()[i]); + } + + // Do another Forward pass to fill in Blob b with Gaussian data again, + // checking that we get different values. + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_a_->count(); ++i) { + EXPECT_EQ(7, this->blob_top_a_->cpu_data()[i]); + } + for (int i = 0; i < this->blob_top_b_->count(); ++i) { + EXPECT_NEAR(gaussian_mean, this->blob_top_b_->cpu_data()[i], + gaussian_std * 10); + } + EXPECT_NE(first_gaussian_sample, this->blob_top_b_->cpu_data()[0]); + EXPECT_NE(last_gaussian_sample, + this->blob_top_b_->cpu_data()[this->blob_top_b_->count() - 1]); + for (int i = 0; i < this->blob_top_c_->count(); ++i) { + EXPECT_EQ(9, this->blob_top_c_->cpu_data()[i]); + } +} + +} // namespace caffe diff --git a/src/caffe/test/test_eltwise_layer.cpp b/src/caffe/test/test_eltwise_layer.cpp new file mode 100755 index 0000000..be0c134 --- /dev/null +++ b/src/caffe/test/test_eltwise_layer.cpp @@ -0,0 +1,209 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class EltwiseLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + EltwiseLayerTest() + : blob_bottom_a_(new Blob(2, 3, 4, 5)), + blob_bottom_b_(new Blob(2, 3, 4, 5)), + blob_bottom_c_(new Blob(2, 3, 4, 5)), + blob_top_(new Blob()) { + // fill the values + Caffe::set_random_seed(1701); + FillerParameter filler_param; + UniformFiller filler(filler_param); + filler.Fill(this->blob_bottom_a_); + filler.Fill(this->blob_bottom_b_); + filler.Fill(this->blob_bottom_c_); + blob_bottom_vec_.push_back(blob_bottom_a_); + blob_bottom_vec_.push_back(blob_bottom_b_); + blob_bottom_vec_.push_back(blob_bottom_c_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~EltwiseLayerTest() { + delete blob_bottom_a_; + delete blob_bottom_b_; + delete blob_bottom_c_; + delete blob_top_; + } + Blob* const blob_bottom_a_; + Blob* const blob_bottom_b_; + Blob* const blob_bottom_c_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(EltwiseLayerTest, TestDtypesAndDevices); + +TYPED_TEST(EltwiseLayerTest, TestSetUp) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + shared_ptr > layer( + new EltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 4); + EXPECT_EQ(this->blob_top_->width(), 5); +} + +TYPED_TEST(EltwiseLayerTest, TestProd) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + shared_ptr > layer( + new EltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); + const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); + const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i]); + } +} + +TYPED_TEST(EltwiseLayerTest, TestSum) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + shared_ptr > layer( + new EltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); + const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); + const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i]); + } +} + +TYPED_TEST(EltwiseLayerTest, TestSumCoeff) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + eltwise_param->add_coeff(1); + eltwise_param->add_coeff(-0.5); + eltwise_param->add_coeff(2); + shared_ptr > layer( + new EltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); + const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); + const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_NEAR(data[i], in_data_a[i] - 0.5*in_data_b[i] + 2*in_data_c[i], + 1e-4); + } +} + +TYPED_TEST(EltwiseLayerTest, TestStableProdGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + eltwise_param->set_stable_prod_grad(true); + EltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(EltwiseLayerTest, TestUnstableProdGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + eltwise_param->set_stable_prod_grad(false); + EltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(EltwiseLayerTest, TestSumGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + EltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(EltwiseLayerTest, TestSumCoeffGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + eltwise_param->add_coeff(1); + eltwise_param->add_coeff(-0.5); + eltwise_param->add_coeff(2); + EltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(EltwiseLayerTest, TestMax) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_MAX); + shared_ptr > layer( + new EltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); + const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); + const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(data[i], + std::max(in_data_a[i], std::max(in_data_b[i], in_data_c[i]))); + } +} + +TYPED_TEST(EltwiseLayerTest, TestMaxGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_MAX); + EltwiseLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} // namespace caffe diff --git a/src/caffe/test/test_euclidean_loss_layer.cpp b/src/caffe/test/test_euclidean_loss_layer.cpp new file mode 100755 index 0000000..1949742 --- /dev/null +++ b/src/caffe/test/test_euclidean_loss_layer.cpp @@ -0,0 +1,91 @@ +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class EuclideanLossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + EuclideanLossLayerTest() + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_label_(new Blob(10, 5, 1, 1)), + blob_top_loss_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); + filler.Fill(this->blob_bottom_label_); + blob_bottom_vec_.push_back(blob_bottom_label_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~EuclideanLossLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_label_; + delete blob_top_loss_; + } + + void TestForward() { + // Get the loss without a specified objective weight -- should be + // equivalent to explicitly specifiying a weight of 1. + LayerParameter layer_param; + EuclideanLossLayer layer_weight_1(layer_param); + layer_weight_1.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype loss_weight_1 = + layer_weight_1.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + // Get the loss again with a different objective weight; check that it is + // scaled appropriately. + const Dtype kLossWeight = 3.7; + layer_param.add_loss_weight(kLossWeight); + EuclideanLossLayer layer_weight_2(layer_param); + layer_weight_2.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype loss_weight_2 = + layer_weight_2.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype kErrorMargin = 1e-5; + EXPECT_NEAR(loss_weight_1 * kLossWeight, loss_weight_2, kErrorMargin); + // Make sure the loss is non-trivial. + const Dtype kNonTrivialAbsThresh = 1e-1; + EXPECT_GE(fabs(loss_weight_1), kNonTrivialAbsThresh); + } + + Blob* const blob_bottom_data_; + Blob* const blob_bottom_label_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(EuclideanLossLayerTest, TestDtypesAndDevices); + +TYPED_TEST(EuclideanLossLayerTest, TestForward) { + this->TestForward(); +} + +TYPED_TEST(EuclideanLossLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + const Dtype kLossWeight = 3.7; + layer_param.add_loss_weight(kLossWeight); + EuclideanLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} // namespace caffe diff --git a/src/caffe/test/test_filler.cpp b/src/caffe/test/test_filler.cpp new file mode 100755 index 0000000..728b8dc --- /dev/null +++ b/src/caffe/test/test_filler.cpp @@ -0,0 +1,243 @@ +#include + +#include "gtest/gtest.h" + +#include "caffe/filler.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class ConstantFillerTest : public ::testing::Test { + protected: + ConstantFillerTest() + : blob_(new Blob(2, 3, 4, 5)), + filler_param_() { + filler_param_.set_value(10.); + filler_.reset(new ConstantFiller(filler_param_)); + filler_->Fill(blob_); + } + virtual ~ConstantFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(ConstantFillerTest, TestDtypes); + +TYPED_TEST(ConstantFillerTest, TestFill) { + EXPECT_TRUE(this->blob_); + const int count = this->blob_->count(); + const TypeParam* data = this->blob_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_GE(data[i], this->filler_param_.value()); + } +} + + +template +class UniformFillerTest : public ::testing::Test { + protected: + UniformFillerTest() + : blob_(new Blob(2, 3, 4, 5)), + filler_param_() { + filler_param_.set_min(1.); + filler_param_.set_max(2.); + filler_.reset(new UniformFiller(filler_param_)); + filler_->Fill(blob_); + } + virtual ~UniformFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(UniformFillerTest, TestDtypes); + +TYPED_TEST(UniformFillerTest, TestFill) { + EXPECT_TRUE(this->blob_); + const int count = this->blob_->count(); + const TypeParam* data = this->blob_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_GE(data[i], this->filler_param_.min()); + EXPECT_LE(data[i], this->filler_param_.max()); + } +} + +template +class PositiveUnitballFillerTest : public ::testing::Test { + protected: + PositiveUnitballFillerTest() + : blob_(new Blob(2, 3, 4, 5)), + filler_param_() { + filler_.reset(new PositiveUnitballFiller(filler_param_)); + filler_->Fill(blob_); + } + virtual ~PositiveUnitballFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(PositiveUnitballFillerTest, TestDtypes); + +TYPED_TEST(PositiveUnitballFillerTest, TestFill) { + EXPECT_TRUE(this->blob_); + const int num = this->blob_->num(); + const int count = this->blob_->count(); + const int dim = count / num; + const TypeParam* data = this->blob_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_GE(data[i], 0); + EXPECT_LE(data[i], 1); + } + for (int i = 0; i < num; ++i) { + TypeParam sum = 0; + for (int j = 0; j < dim; ++j) { + sum += data[i * dim + j]; + } + EXPECT_GE(sum, 0.999); + EXPECT_LE(sum, 1.001); + } +} + +template +class GaussianFillerTest : public ::testing::Test { + protected: + GaussianFillerTest() + : blob_(new Blob(2, 3, 4, 5)), + filler_param_() { + filler_param_.set_mean(10.); + filler_param_.set_std(0.1); + filler_.reset(new GaussianFiller(filler_param_)); + filler_->Fill(blob_); + } + virtual ~GaussianFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(GaussianFillerTest, TestDtypes); + +TYPED_TEST(GaussianFillerTest, TestFill) { + EXPECT_TRUE(this->blob_); + const int count = this->blob_->count(); + const TypeParam* data = this->blob_->cpu_data(); + TypeParam mean = 0.; + TypeParam var = 0.; + for (int i = 0; i < count; ++i) { + mean += data[i]; + var += (data[i] - this->filler_param_.mean()) * + (data[i] - this->filler_param_.mean()); + } + mean /= count; + var /= count; + // Very loose test. + EXPECT_GE(mean, this->filler_param_.mean() - this->filler_param_.std() * 5); + EXPECT_LE(mean, this->filler_param_.mean() + this->filler_param_.std() * 5); + TypeParam target_var = this->filler_param_.std() * this->filler_param_.std(); + EXPECT_GE(var, target_var / 5.); + EXPECT_LE(var, target_var * 5.); +} + +template +class XavierFillerTest : public ::testing::Test { + protected: + XavierFillerTest() + : blob_(new Blob(1000, 2, 4, 5)), + filler_param_() { + } + virtual void test_params(FillerParameter_VarianceNorm variance_norm, + Dtype n) { + this->filler_param_.set_variance_norm(variance_norm); + this->filler_.reset(new XavierFiller(this->filler_param_)); + this->filler_->Fill(blob_); + EXPECT_TRUE(this->blob_); + const int count = this->blob_->count(); + const Dtype* data = this->blob_->cpu_data(); + Dtype mean = 0.; + Dtype ex2 = 0.; + for (int i = 0; i < count; ++i) { + mean += data[i]; + ex2 += data[i] * data[i]; + } + mean /= count; + ex2 /= count; + Dtype std = sqrt(ex2 - mean*mean); + Dtype target_std = sqrt(2.0 / n); + EXPECT_NEAR(mean, 0.0, 0.1); + EXPECT_NEAR(std, target_std, 0.1); + } + virtual ~XavierFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(XavierFillerTest, TestDtypes); + +TYPED_TEST(XavierFillerTest, TestFillFanIn) { + TypeParam n = 2*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_IN, n); +} +TYPED_TEST(XavierFillerTest, TestFillFanOut) { + TypeParam n = 1000*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n); +} +TYPED_TEST(XavierFillerTest, TestFillAverage) { + TypeParam n = (2*4*5 + 1000*4*5) / 2.0; + this->test_params(FillerParameter_VarianceNorm_AVERAGE, n); +} + +template +class MSRAFillerTest : public ::testing::Test { + protected: + MSRAFillerTest() + : blob_(new Blob(1000, 2, 4, 5)), + filler_param_() { + } + virtual void test_params(FillerParameter_VarianceNorm variance_norm, + Dtype n) { + this->filler_param_.set_variance_norm(variance_norm); + this->filler_.reset(new MSRAFiller(this->filler_param_)); + this->filler_->Fill(blob_); + EXPECT_TRUE(this->blob_); + const int count = this->blob_->count(); + const Dtype* data = this->blob_->cpu_data(); + Dtype mean = 0.; + Dtype ex2 = 0.; + for (int i = 0; i < count; ++i) { + mean += data[i]; + ex2 += data[i] * data[i]; + } + mean /= count; + ex2 /= count; + Dtype std = sqrt(ex2 - mean*mean); + Dtype target_std = sqrt(2.0 / n); + EXPECT_NEAR(mean, 0.0, 0.1); + EXPECT_NEAR(std, target_std, 0.1); + } + virtual ~MSRAFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(MSRAFillerTest, TestDtypes); + +TYPED_TEST(MSRAFillerTest, TestFillFanIn) { + TypeParam n = 2*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_IN, n); +} +TYPED_TEST(MSRAFillerTest, TestFillFanOut) { + TypeParam n = 1000*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n); +} +TYPED_TEST(MSRAFillerTest, TestFillAverage) { + TypeParam n = (2*4*5 + 1000*4*5) / 2.0; + this->test_params(FillerParameter_VarianceNorm_AVERAGE, n); +} + +} // namespace caffe diff --git a/src/caffe/test/test_filter_layer.cpp b/src/caffe/test/test_filter_layer.cpp new file mode 100755 index 0000000..c641b6e --- /dev/null +++ b/src/caffe/test/test_filter_layer.cpp @@ -0,0 +1,128 @@ +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class FilterLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + FilterLayerTest() + : blob_bottom_data_(new Blob(4, 3, 6, 4)), + blob_bottom_labels_(new Blob(4, 1, 1, 1)), + blob_bottom_selector_(new Blob(4, 1, 1, 1)), + blob_top_data_(new Blob()), + blob_top_labels_(new Blob()) {} + virtual void SetUp() { + // fill the values + Caffe::set_random_seed(1890); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + // fill the selector blob + Dtype* bottom_data_selector_ = blob_bottom_selector_->mutable_cpu_data(); + bottom_data_selector_[0] = 0; + bottom_data_selector_[1] = 1; + bottom_data_selector_[2] = 1; + bottom_data_selector_[3] = 0; + // fill the other bottom blobs + filler.Fill(blob_bottom_data_); + for (int i = 0; i < blob_bottom_labels_->count(); ++i) { + blob_bottom_labels_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; + } + blob_bottom_vec_.push_back(blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_labels_); + blob_bottom_vec_.push_back(blob_bottom_selector_); + blob_top_vec_.push_back(blob_top_data_); + blob_top_vec_.push_back(blob_top_labels_); + } + virtual ~FilterLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_labels_; + delete blob_bottom_selector_; + delete blob_top_data_; + delete blob_top_labels_; + } + Blob* const blob_bottom_data_; + Blob* const blob_bottom_labels_; + Blob* const blob_bottom_selector_; + // blobs for the top of FilterLayer + Blob* const blob_top_data_; + Blob* const blob_top_labels_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(FilterLayerTest, TestDtypesAndDevices); + +TYPED_TEST(FilterLayerTest, TestReshape) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + FilterLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Reshape(this->blob_bottom_vec_, this->blob_top_vec_); + // In the test first and last items should have been filtered + // so we just expect 2 remaining items + EXPECT_EQ(this->blob_top_data_->shape(0), 2); + EXPECT_EQ(this->blob_top_labels_->shape(0), 2); + EXPECT_GT(this->blob_bottom_data_->shape(0), + this->blob_top_data_->shape(0)); + EXPECT_GT(this->blob_bottom_labels_->shape(0), + this->blob_top_labels_->shape(0)); + for (int i = 1; i < this->blob_bottom_labels_->num_axes(); i++) { + EXPECT_EQ(this->blob_bottom_labels_->shape(i), + this->blob_top_labels_->shape(i)); + } +} + +TYPED_TEST(FilterLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + FilterLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Reshape(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_labels_->data_at(0, 0, 0, 0), + this->blob_bottom_labels_->data_at(1, 0, 0, 0)); + EXPECT_EQ(this->blob_top_labels_->data_at(1, 0, 0, 0), + this->blob_bottom_labels_->data_at(2, 0, 0, 0)); + + int dim = this->blob_top_data_->count() / + this->blob_top_data_->shape(0); + const Dtype* top_data = this->blob_top_data_->cpu_data(); + const Dtype* bottom_data = this->blob_bottom_data_->cpu_data(); + // selector is 0 1 1 0, so we need to compare bottom(1,c,h,w) + // with top(0,c,h,w) and bottom(2,c,h,w) with top(1,c,h,w) + bottom_data += dim; // bottom(1,c,h,w) + for (size_t n = 0; n < dim; n++) + EXPECT_EQ(top_data[n], bottom_data[n]); + + bottom_data += dim; // bottom(2,c,h,w) + top_data += dim; // top(1,c,h,w) + for (size_t n = 0; n < dim; n++) + EXPECT_EQ(top_data[n], bottom_data[n]); +} + +TYPED_TEST(FilterLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + FilterLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + // check only input 0 (data) because labels and selector + // don't need backpropagation + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); +} + +} // namespace caffe diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp new file mode 100755 index 0000000..7b6757c --- /dev/null +++ b/src/caffe/test/test_flatten_layer.cpp @@ -0,0 +1,109 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class FlattenLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: + FlattenLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 5)), + blob_top_(new Blob()) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~FlattenLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(FlattenLayerTest, TestDtypesAndDevices); + +TYPED_TEST(FlattenLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + FlattenLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 2); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3 * 6 * 5); +} + +TYPED_TEST(FlattenLayerTest, TestSetupWithAxis) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_flatten_param()->set_axis(2); + FlattenLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 3); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3); + EXPECT_EQ(this->blob_top_->shape(2), 6 * 5); +} + +TYPED_TEST(FlattenLayerTest, TestSetupWithEndAxis) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_flatten_param()->set_end_axis(-2); + FlattenLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 3); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3 * 6); + EXPECT_EQ(this->blob_top_->shape(2), 5); +} + +TYPED_TEST(FlattenLayerTest, TestSetupWithStartAndEndAxis) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_flatten_param()->set_axis(0); + layer_param.mutable_flatten_param()->set_end_axis(-2); + FlattenLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 2); + EXPECT_EQ(this->blob_top_->shape(0), 2 * 3 * 6); + EXPECT_EQ(this->blob_top_->shape(1), 5); +} + +TYPED_TEST(FlattenLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + FlattenLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int c = 0; c < 3 * 6 * 5; ++c) { + EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0), + this->blob_bottom_->data_at(0, c / (6 * 5), (c / 5) % 6, c % 5)); + EXPECT_EQ(this->blob_top_->data_at(1, c, 0, 0), + this->blob_bottom_->data_at(1, c / (6 * 5), (c / 5) % 6, c % 5)); + } +} + +TYPED_TEST(FlattenLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + FlattenLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} // namespace caffe diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp new file mode 100755 index 0000000..7ad7467 --- /dev/null +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -0,0 +1,1299 @@ +#include +#include +#include +#include + +#include "google/protobuf/text_format.h" + +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/parallel.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/solver.hpp" +#include "caffe/util/io.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +using std::ostringstream; + +namespace caffe { + +template +class GradientBasedSolverTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + GradientBasedSolverTest() : + seed_(1701), num_(4), channels_(3), height_(10), width_(10), + share_(false) { + input_file_ = new string( + CMAKE_SOURCE_DIR "caffe/test/test_data/solver_data_list.txt" CMAKE_EXT); + } + ~GradientBasedSolverTest() { + delete input_file_; + } + + string snapshot_prefix_; + shared_ptr > solver_; + shared_ptr > sync_; + int seed_; + // Dimensions are determined by generate_sample_data.py + // TODO this is brittle and the hdf5 file should be checked instead. + int num_, channels_, height_, width_; + bool share_; + Dtype delta_; // Stability constant for RMSProp, AdaGrad, AdaDelta and Adam + + // Test data: check out generate_sample_data.py in the same directory. + string* input_file_; + + virtual SolverParameter_SolverType solver_type() = 0; + virtual void InitSolver(const SolverParameter& param) = 0; + + virtual void InitSolverFromProtoString(const string& proto) { + SolverParameter param; + CHECK(google::protobuf::TextFormat::ParseFromString(proto, ¶m)); + // Set the solver_mode according to current Caffe::mode. + switch (Caffe::mode()) { + case Caffe::CPU: + param.set_solver_mode(SolverParameter_SolverMode_CPU); + break; + case Caffe::GPU: + param.set_solver_mode(SolverParameter_SolverMode_GPU); + break; + default: + LOG(FATAL) << "Unknown Caffe mode: " << Caffe::mode(); + } + InitSolver(param); + delta_ = param.delta(); + } + + string RunLeastSquaresSolver(const Dtype learning_rate, + const Dtype weight_decay, const Dtype momentum, const int num_iters, + const int iter_size = 1, const int devices = 1, + const bool snapshot = false, const char* from_snapshot = NULL) { + ostringstream proto; + int device_id = 0; +#ifndef CPU_ONLY + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaGetDevice(&device_id)); + } +#endif + proto << + "snapshot_after_train: " << snapshot << " " + "max_iter: " << num_iters << " " + "base_lr: " << learning_rate << " " + "lr_policy: 'fixed' " + "iter_size: " << iter_size << " " + "device_id: " << device_id << " " + "net_param { " + " name: 'TestNetwork' " + " layer { " + " name: 'data' " + " type: 'HDF5Data' " + " hdf5_data_param { " + " source: '" << *(this->input_file_) << "' " + " batch_size: " << num_ / iter_size << " " + " } " + " top: 'data' " + " top: 'targets' " + " } "; + if (share_) { + proto << + " layer { " + " name: 'slice' " + " type: 'Slice' " + " bottom: 'data' " + " top: 'data1' " + " top: 'data2' " + " slice_param { " + " axis: 0 " + " } " + " } "; + } + proto << + " layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " param { name: 'weights' } " + " param { name: 'bias' } " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 1.0 " + " } " + " bias_filler { " + " type: 'gaussian' " + " std: 1.0 " + " } " + " } " + " bottom: '" << string(share_ ? "data1": "data") << "' " + " top: '" << string(share_ ? "innerprod1": "innerprod") << "' " + " } "; + if (share_) { + proto << + " layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " param { name: 'weights' } " + " param { name: 'bias' } " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 1.0 " + " } " + " bias_filler { " + " type: 'gaussian' " + " std: 1.0 " + " } " + " } " + " bottom: 'data2' " + " top: 'innerprod2' " + " } " + " layer { " + " name: 'concat' " + " type: 'Concat' " + " bottom: 'innerprod1' " + " bottom: 'innerprod2' " + " top: 'innerprod' " + " concat_param { " + " axis: 0 " + " } " + " } "; + } + proto << + " layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod' " + " bottom: 'targets' " + " } " + "} "; + if (weight_decay != 0) { + proto << "weight_decay: " << weight_decay << " "; + } + if (momentum != 0) { + proto << "momentum: " << momentum << " "; + } + MakeTempDir(&snapshot_prefix_); + proto << "snapshot_prefix: '" << snapshot_prefix_ << "/' "; + if (snapshot) { + proto << "snapshot: " << num_iters << " "; + } + Caffe::set_random_seed(this->seed_); + this->InitSolverFromProtoString(proto.str()); + if (from_snapshot != NULL) { + this->solver_->Restore(from_snapshot); + vector*> empty_bottom_vec; + for (int i = 0; i < this->solver_->iter(); ++i) { + this->solver_->net()->Forward(empty_bottom_vec); + } + } + if (devices == 1) { + this->solver_->Solve(); + } else { + LOG(INFO) << "Multi-GPU test on " << devices << " devices"; + vector gpus; + // put current device at the beginning + int device_id = solver_->param().device_id(); + gpus.push_back(device_id); + for (int i = 0; gpus.size() < devices; ++i) { + if (i != device_id) + gpus.push_back(i); + } + Caffe::set_solver_count(gpus.size()); + this->sync_.reset(new P2PSync( + this->solver_, NULL, this->solver_->param())); + this->sync_->run(gpus); + Caffe::set_solver_count(1); + } + if (snapshot) { + ostringstream resume_file; + resume_file << snapshot_prefix_ << "/_iter_" << num_iters + << ".solverstate"; + string resume_filename = resume_file.str(); + return resume_filename; + } + return string(); + } + + // Compute an update value given the current state of the train net, + // using the analytical formula for the least squares gradient. + // updated_params will store the updated weight and bias results, + // using the blobs' diffs to hold the update values themselves. + void ComputeLeastSquaresUpdate(const Dtype learning_rate, + const Dtype weight_decay, const Dtype momentum, const int num_iters, + vector > >* updated_params) { + const int N = num_; + const int D = channels_ * height_ * width_; + + // Run a forward pass, and manually compute the update values from the + // result. + Net& net = *this->solver_->net(); + vector*> empty_bottom_vec; + net.Forward(empty_bottom_vec); + ASSERT_TRUE(net.has_blob("data")); + const Blob& data = *net.blob_by_name("data"); + ASSERT_TRUE(net.has_blob("targets")); + const Blob& targets = *net.blob_by_name("targets"); + ASSERT_TRUE(net.has_layer("innerprod")); + const vector > >& param_blobs = + net.layer_by_name("innerprod")->blobs(); + const int num_param_blobs = 2; + ASSERT_EQ(num_param_blobs, param_blobs.size()); + const Blob& weights = *param_blobs[0]; + const Blob& bias = *param_blobs[1]; + ASSERT_EQ(D * N, data.count()); + ASSERT_EQ(N, targets.count()); + ASSERT_EQ(D, weights.count()); + ASSERT_EQ(1, bias.count()); + + updated_params->clear(); + updated_params->resize(num_param_blobs); + for (int i = 0; i < num_param_blobs; ++i) { + (*updated_params)[i].reset(new Blob()); + } + Blob& updated_weights = *(*updated_params)[0]; + updated_weights.ReshapeLike(weights); + Blob& updated_bias = *(*updated_params)[1]; + updated_bias.ReshapeLike(bias); + + for (int i = 0; i <= D; ++i) { + // Compute the derivative with respect to the ith weight (i.e., the ith + // element of the gradient). + Dtype grad = 0; + for (int j = 0; j <= D; ++j) { + // Compute element (i, j) of X^T * X. + Dtype element = 0; + for (int k = 0; k < N; ++k) { + // (i, k) in X^T (== (k, i) in X) times (k, j) in X. + const Dtype element_i = (i == D) ? 1 : data.cpu_data()[k * D + i]; + const Dtype element_j = (j == D) ? 1 : data.cpu_data()[k * D + j]; + element += element_i * element_j; + } + if (j == D) { + grad += element * bias.cpu_data()[0]; + } else { + grad += element * weights.cpu_data()[j]; + } + } + for (int k = 0; k < N; ++k) { + const Dtype element_i = (i == D) ? 1 : data.cpu_data()[k * D + i]; + grad -= element_i * targets.cpu_data()[k]; + } + // Scale the gradient over the N samples. + grad /= N; + // Add the weight decay to the gradient. + grad += weight_decay * + ((i == D) ? bias.cpu_data()[0] : weights.cpu_data()[i]); + // Finally, compute update. + const vector > >& history = solver_->history(); + if (solver_type() != SolverParameter_SolverType_ADADELTA + && solver_type() != SolverParameter_SolverType_ADAM) { + ASSERT_EQ(2, history.size()); // 1 blob for weights, 1 for bias + } else { + ASSERT_EQ(4, history.size()); // additional blobs for update history + } + Dtype update_value = learning_rate * grad; + const Dtype history_value = (i == D) ? + history[1]->cpu_data()[0] : history[0]->cpu_data()[i]; + const Dtype temp = momentum * history_value; + switch (solver_type()) { + case SolverParameter_SolverType_SGD: + update_value += temp; + break; + case SolverParameter_SolverType_NESTEROV: + update_value += temp; + // step back then over-step + update_value = (1 + momentum) * update_value - temp; + break; + case SolverParameter_SolverType_ADAGRAD: + update_value /= std::sqrt(history_value + grad * grad) + delta_; + break; + case SolverParameter_SolverType_RMSPROP: { + const Dtype rms_decay = 0.95; + update_value /= std::sqrt(rms_decay*history_value + + grad * grad * (1 - rms_decay)) + delta_; + } + break; + case SolverParameter_SolverType_ADADELTA: + { + const Dtype update_history_value = (i == D) ? + history[1 + num_param_blobs]->cpu_data()[0] : + history[0 + num_param_blobs]->cpu_data()[i]; + const Dtype weighted_gradient_average = + momentum * history_value + (1 - momentum) * (grad * grad); + update_value = grad * std::sqrt((update_history_value + delta_) / + (weighted_gradient_average + delta_)) * learning_rate; + // not actually needed, just here for illustrative purposes + // const Dtype weighted_update_average = + // momentum * update_history_value + (1 - momentum) * (update_value); + break; + } + case SolverParameter_SolverType_ADAM: { + const Dtype momentum2 = 0.999; + const Dtype m = history_value; + const Dtype v = (i == D) ? + history[1 + num_param_blobs]->cpu_data()[0] : + history[0 + num_param_blobs]->cpu_data()[i]; + const Dtype val_m = (1 - momentum) * grad + momentum * m; + const Dtype val_v = (1 - momentum2) * grad * grad + momentum2 * v; + Dtype alpha_t = learning_rate * + std::sqrt(Dtype(1) - pow(momentum2, num_iters)) / + (Dtype(1.) - pow(momentum, num_iters)); + update_value = alpha_t * val_m / (std::sqrt(val_v) + delta_); + break; + } + default: + LOG(FATAL) << "Unknown solver type: " << solver_type(); + } + if (i == D) { + updated_bias.mutable_cpu_diff()[0] = update_value; + updated_bias.mutable_cpu_data()[0] = bias.cpu_data()[0] - update_value; + } else { + updated_weights.mutable_cpu_diff()[i] = update_value; + updated_weights.mutable_cpu_data()[i] = + weights.cpu_data()[i] - update_value; + } + } + } + + void CheckLeastSquaresUpdate( + const vector > >& updated_params) { + const int D = channels_ * height_ * width_; + + const Blob& updated_weights = *updated_params[0]; + const Blob& updated_bias = *updated_params[1]; + + Net& net = *this->solver_->net(); + ASSERT_TRUE(net.has_layer("innerprod")); + const vector > >& param_blobs = + net.layer_by_name("innerprod")->blobs(); + ASSERT_EQ(2, param_blobs.size()); + const Blob& solver_updated_weights = *param_blobs[0]; + ASSERT_EQ(D, solver_updated_weights.count()); + const double kPrecision = 1e-2; + const double kMinPrecision = 1e-7; + for (int i = 0; i < D; ++i) { + const Dtype expected_updated_weight = updated_weights.cpu_data()[i]; + const Dtype solver_updated_weight = solver_updated_weights.cpu_data()[i]; + const Dtype error_margin = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_updated_weight), fabs(solver_updated_weight))); + EXPECT_NEAR(expected_updated_weight, solver_updated_weight, error_margin); + } + const Blob& solver_updated_bias_blob = *param_blobs[1]; + ASSERT_EQ(1, solver_updated_bias_blob.count()); + const Dtype expected_updated_bias = updated_bias.cpu_data()[0]; + const Dtype solver_updated_bias = solver_updated_bias_blob.cpu_data()[0]; + const Dtype error_margin = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_updated_bias), fabs(solver_updated_bias))); + EXPECT_NEAR(expected_updated_bias, solver_updated_bias, error_margin); + + // Check the solver's history -- should contain the previous update value. + if (solver_type() == SolverParameter_SolverType_SGD) { + const vector > >& history = solver_->history(); + ASSERT_EQ(2, history.size()); + for (int i = 0; i < D; ++i) { + const Dtype expected_history = updated_weights.cpu_diff()[i]; + const Dtype solver_history = history[0]->cpu_data()[i]; + const Dtype error_margin_hist = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_history), fabs(solver_history))); + EXPECT_NEAR(expected_history, solver_history, error_margin_hist); + } + const Dtype expected_history = updated_bias.cpu_diff()[0]; + const Dtype solver_history = history[1]->cpu_data()[0]; + const Dtype error_margin_hist = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_history), fabs(solver_history))); + EXPECT_NEAR(expected_history, solver_history, error_margin_hist); + } + } + + void CheckAccumulation(const Dtype kLearningRate, const Dtype kWeightDecay, + const Dtype kMomentum, const int kNumIters, const int kIterSize) { + const double kPrecision = 1e-2; + const double kMinPrecision = 1e-7; + // Solve without accumulation and save parameters. + this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, + kNumIters); + // Save parameters for comparison. + Net& net = *this->solver_->net(); + const vector > >& param_blobs = + net.layer_by_name("innerprod")->blobs(); + vector > > noaccum_params(param_blobs.size()); + for (int i = 0; i < param_blobs.size(); ++i) { + noaccum_params[i].reset(new Blob()); + noaccum_params[i]->CopyFrom(*param_blobs[i], false, true); + } + // Solve by equivalent accumulation of gradients over divided batches. + this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, + kNumIters, kIterSize); + Net& net_accum = *this->solver_->net(); + const vector > >& accum_params = + net_accum.layer_by_name("innerprod")->blobs(); + // Compare accumulated parameters against no accumulation standard. + const int D = this->channels_ * this->height_ * this->width_; + for (int i = 0; i < D; ++i) { + const Dtype expected_param = noaccum_params[0]->cpu_data()[i]; + const Dtype accum_param = accum_params[0]->cpu_data()[i]; + const Dtype error_margin = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_param), fabs(accum_param))); + EXPECT_NEAR(expected_param, accum_param, error_margin); + } + ASSERT_EQ(1, accum_params[1]->count()); + const Dtype expected_bias = noaccum_params[1]->cpu_data()[0]; + const Dtype accum_bias = accum_params[1]->cpu_data()[0]; + const Dtype error_margin = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_bias), fabs(accum_bias))); + EXPECT_NEAR(expected_bias, accum_bias, error_margin); + } + + // Test that the correct update is computed for a regularized least squares + // problem: + // + // E = (1/(2n)) || X w - y ||^2 + (lambda / 2) || w ||^2 + // \nabla_w E = (1/n) (X^T X w - X^T y) + lambda * w + // + // X \in R^{n x (d+1)} (each example is a row, (d+1)th element is always 1) + // w \in R^{(d+1) x 1} ((d+1)th element is the bias) + // y \in R^{n x 1} + // lambda is weight_decay + // + // TestLeastSquaresUpdate works "inductively", assuming that the solver + // correctly updates the net K (= iter_to_check) times, then given the history + // from the Kth update, we compute the (K+1)th update and check that it + // matches the solver's (K+1)th update. + void TestLeastSquaresUpdate(const Dtype learning_rate = 1.0, + const Dtype weight_decay = 0.0, const Dtype momentum = 0.0, + const int iter_to_check = 0) { + const int kNum = num_; + const int kIterSize = 1; + // Test over all numbers of devices. + int available_devices = 1; +#ifndef CPU_ONLY + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaGetDeviceCount(&available_devices)); + } +#endif + for (int devices = 1; devices <= available_devices; ++devices) { + // Configure batch size for single / multi device equivalence. + // Constant data is needed for multi device as for accumulation. + num_ = kNum * devices; + + // Initialize the solver and run K (= iter_to_check) solver iterations + // (on single device). + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + iter_to_check, kIterSize, 1); + + // Compute the (K+1)th update using the analytic least squares gradient. + vector > > updated_params; + ComputeLeastSquaresUpdate(learning_rate, weight_decay, momentum, + iter_to_check + 1, &updated_params); + + // Reinitialize the solver and run K+1 solver iterations. + num_ = kNum; + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + iter_to_check + 1, kIterSize, devices); + + // Check that the solver's solution matches ours. + CheckLeastSquaresUpdate(updated_params); + } + } + + void TestSnapshot(const Dtype learning_rate = 1.0, + const Dtype weight_decay = 0.0, const Dtype momentum = 0.0, + const int num_iters = 1) { + // Run the solver for num_iters * 2 iterations. + const int total_num_iters = num_iters * 2; + bool snapshot = false; + const int kIterSize = 1; + const int kDevices = 1; + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + total_num_iters, kIterSize, kDevices, snapshot); + + // Save the resulting param values. + vector > > param_copies; + const vector*>& orig_params = + solver_->net()->learnable_params(); + param_copies.resize(orig_params.size()); + for (int i = 0; i < orig_params.size(); ++i) { + param_copies[i].reset(new Blob()); + const bool kReshape = true; + for (int copy_diff = false; copy_diff <= true; ++copy_diff) { + param_copies[i]->CopyFrom(*orig_params[i], copy_diff, kReshape); + } + } + + // Save the solver history + vector > > history_copies; + const vector > >& orig_history = solver_->history(); + history_copies.resize(orig_history.size()); + for (int i = 0; i < orig_history.size(); ++i) { + history_copies[i].reset(new Blob()); + const bool kReshape = true; + for (int copy_diff = false; copy_diff <= true; ++copy_diff) { + history_copies[i]->CopyFrom(*orig_history[i], copy_diff, kReshape); + } + } + + // Run the solver for num_iters iterations and snapshot. + snapshot = true; + string snapshot_name = RunLeastSquaresSolver(learning_rate, weight_decay, + momentum, num_iters, kIterSize, kDevices, snapshot); + + // Reinitialize the solver and run for num_iters more iterations. + snapshot = false; + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + total_num_iters, kIterSize, kDevices, + snapshot, snapshot_name.c_str()); + + // Check that params now match. + const vector*>& params = solver_->net()->learnable_params(); + for (int i = 0; i < params.size(); ++i) { + for (int j = 0; j < params[i]->count(); ++j) { + EXPECT_EQ(param_copies[i]->cpu_data()[j], params[i]->cpu_data()[j]) + << "param " << i << " data differed at dim " << j; + EXPECT_EQ(param_copies[i]->cpu_diff()[j], params[i]->cpu_diff()[j]) + << "param " << i << " diff differed at dim " << j; + } + } + + // Check that history now matches. + const vector > >& history = solver_->history(); + for (int i = 0; i < history.size(); ++i) { + for (int j = 0; j < history[i]->count(); ++j) { + EXPECT_EQ(history_copies[i]->cpu_data()[j], history[i]->cpu_data()[j]) + << "history blob " << i << " data differed at dim " << j; + EXPECT_EQ(history_copies[i]->cpu_diff()[j], history[i]->cpu_diff()[j]) + << "history blob " << i << " diff differed at dim " << j; + } + } + } +}; + + +template +class SGDSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new SGDSolver(param)); + } + + virtual SolverParameter_SolverType solver_type() { + return SolverParameter_SolverType_SGD; + } +}; + +TYPED_TEST_CASE(SGDSolverTest, TestDtypesAndDevices); + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdate) { + this->TestLeastSquaresUpdate(); +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateLROneHundredth) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + this->TestLeastSquaresUpdate(kLearningRate); +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecayMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.5; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(SGDSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + + +template +class AdaGradSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new AdaGradSolver(param)); + } + virtual SolverParameter_SolverType solver_type() { + return SolverParameter_SolverType_ADAGRAD; + } +}; + +TYPED_TEST_CASE(AdaGradSolverTest, TestDtypesAndDevices); + +TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdate) { + this->TestLeastSquaresUpdate(); +} + +TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateLROneHundredth) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + this->TestLeastSquaresUpdate(kLearningRate); +} + +TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); +} + +TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaGradSolverTest, + TestAdaGradLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdaGradSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaGradSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + + +template +class NesterovSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new NesterovSolver(param)); + } + virtual SolverParameter_SolverType solver_type() { + return SolverParameter_SolverType_NESTEROV; + } +}; + +TYPED_TEST_CASE(NesterovSolverTest, TestDtypesAndDevices); + +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdate) { + this->TestLeastSquaresUpdate(); +} + +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateLROneHundredth) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + this->TestLeastSquaresUpdate(kLearningRate); +} + +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); +} + +TYPED_TEST(NesterovSolverTest, + TestNesterovLeastSquaresUpdateWithWeightDecayMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.5; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, + TestNesterovLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(NesterovSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +template +class AdaDeltaSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new AdaDeltaSolver(param)); + } + + virtual SolverParameter_SolverType solver_type() { + return SolverParameter_SolverType_ADADELTA; + } +}; + +TYPED_TEST_CASE(AdaDeltaSolverTest, TestDtypesAndDevices); + +TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdate) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + this->TestLeastSquaresUpdate(kLearningRate); +} + +TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.95; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); +} + +TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithHalfMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.5; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); + } +} + +TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.95; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); + } +} + +TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaDeltaSolverTest, + TestAdaDeltaLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdaDeltaSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaDeltaSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +template +class AdamSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + SolverParameter new_param = param; + const Dtype momentum = 0.9; + new_param.set_momentum(momentum); + const Dtype momentum2 = 0.999; + new_param.set_momentum2(momentum2); + this->solver_.reset(new AdamSolver(new_param)); + } + virtual SolverParameter_SolverType solver_type() { + return SolverParameter_SolverType_ADAM; + } +}; + +TYPED_TEST_CASE(AdamSolverTest, TestDtypesAndDevices); + +TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdate) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.9; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); +} + +TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); +} + +TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdamSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdamSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +template +class RMSPropSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + const Dtype rms_decay = 0.95; + SolverParameter new_param = param; + new_param.set_rms_decay(rms_decay); + this->solver_.reset(new RMSPropSolver(new_param)); + } + virtual SolverParameter_SolverType solver_type() { + return SolverParameter_SolverType_RMSPROP; + } +}; + +TYPED_TEST_CASE(RMSPropSolverTest, TestDtypesAndDevices); + +TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.5; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); +} + +TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithRmsDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, + TestRMSPropLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(RMSPropSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +} // namespace caffe diff --git a/src/caffe/test/test_hdf5_output_layer.cpp b/src/caffe/test/test_hdf5_output_layer.cpp new file mode 100755 index 0000000..b56277b --- /dev/null +++ b/src/caffe/test/test_hdf5_output_layer.cpp @@ -0,0 +1,121 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/hdf5.hpp" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class HDF5OutputLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + HDF5OutputLayerTest() + : input_file_name_( + CMAKE_SOURCE_DIR "caffe/test/test_data/sample_data.h5"), + blob_data_(new Blob()), + blob_label_(new Blob()), + num_(5), + channels_(8), + height_(5), + width_(5) { + MakeTempFilename(&output_file_name_); + } + + virtual ~HDF5OutputLayerTest() { + delete blob_data_; + delete blob_label_; + } + + void CheckBlobEqual(const Blob& b1, const Blob& b2); + + string output_file_name_; + string input_file_name_; + Blob* const blob_data_; + Blob* const blob_label_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + int num_; + int channels_; + int height_; + int width_; +}; + +template +void HDF5OutputLayerTest::CheckBlobEqual(const Blob& b1, + const Blob& b2) { + EXPECT_EQ(b1.num(), b2.num()); + EXPECT_EQ(b1.channels(), b2.channels()); + EXPECT_EQ(b1.height(), b2.height()); + EXPECT_EQ(b1.width(), b2.width()); + for (int n = 0; n < b1.num(); ++n) { + for (int c = 0; c < b1.channels(); ++c) { + for (int h = 0; h < b1.height(); ++h) { + for (int w = 0; w < b1.width(); ++w) { + EXPECT_EQ(b1.data_at(n, c, h, w), b2.data_at(n, c, h, w)); + } + } + } + } +} + +TYPED_TEST_CASE(HDF5OutputLayerTest, TestDtypesAndDevices); + +TYPED_TEST(HDF5OutputLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LOG(INFO) << "Loading HDF5 file " << this->input_file_name_; + hid_t file_id = H5Fopen(this->input_file_name_.c_str(), H5F_ACC_RDONLY, + H5P_DEFAULT); + ASSERT_GE(file_id, 0)<< "Failed to open HDF5 file" << + this->input_file_name_; + hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4, + this->blob_data_); + hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4, + this->blob_label_); + herr_t status = H5Fclose(file_id); + EXPECT_GE(status, 0)<< "Failed to close HDF5 file " << + this->input_file_name_; + this->blob_bottom_vec_.push_back(this->blob_data_); + this->blob_bottom_vec_.push_back(this->blob_label_); + + LayerParameter param; + param.mutable_hdf5_output_param()->set_file_name(this->output_file_name_); + // This code block ensures that the layer is deconstructed and + // the output hdf5 file is closed. + { + HDF5OutputLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(layer.file_name(), this->output_file_name_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + } + file_id = H5Fopen(this->output_file_name_.c_str(), H5F_ACC_RDONLY, + H5P_DEFAULT); + ASSERT_GE( + file_id, 0)<< "Failed to open HDF5 file" << + this->input_file_name_; + + Blob* blob_data = new Blob(); + hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4, + blob_data); + this->CheckBlobEqual(*(this->blob_data_), *blob_data); + + Blob* blob_label = new Blob(); + hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4, + blob_label); + this->CheckBlobEqual(*(this->blob_label_), *blob_label); + + status = H5Fclose(file_id); + EXPECT_GE(status, 0) << "Failed to close HDF5 file " << + this->output_file_name_; +} + +} // namespace caffe diff --git a/src/caffe/test/test_hdf5data_layer.cpp b/src/caffe/test/test_hdf5data_layer.cpp new file mode 100755 index 0000000..c9b027f --- /dev/null +++ b/src/caffe/test/test_hdf5data_layer.cpp @@ -0,0 +1,135 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class HDF5DataLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + HDF5DataLayerTest() + : filename(NULL), + blob_top_data_(new Blob()), + blob_top_label_(new Blob()), + blob_top_label2_(new Blob()) {} + virtual void SetUp() { + blob_top_vec_.push_back(blob_top_data_); + blob_top_vec_.push_back(blob_top_label_); + blob_top_vec_.push_back(blob_top_label2_); + + // Check out generate_sample_data.py in the same directory. + filename = new string( + CMAKE_SOURCE_DIR "caffe/test/test_data/sample_data_list.txt" CMAKE_EXT); + LOG(INFO)<< "Using sample HDF5 data file " << filename; + } + + virtual ~HDF5DataLayerTest() { + delete blob_top_data_; + delete blob_top_label_; + delete blob_top_label2_; + delete filename; + } + + string* filename; + Blob* const blob_top_data_; + Blob* const blob_top_label_; + Blob* const blob_top_label2_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(HDF5DataLayerTest, TestDtypesAndDevices); + +TYPED_TEST(HDF5DataLayerTest, TestRead) { + typedef typename TypeParam::Dtype Dtype; + // Create LayerParameter with the known parameters. + // The data file we are reading has 10 rows and 8 columns, + // with values from 0 to 10*8 reshaped in row-major order. + LayerParameter param; + param.add_top("data"); + param.add_top("label"); + param.add_top("label2"); + + HDF5DataParameter* hdf5_data_param = param.mutable_hdf5_data_param(); + int batch_size = 5; + hdf5_data_param->set_batch_size(batch_size); + hdf5_data_param->set_source(*(this->filename)); + int num_cols = 8; + int height = 6; + int width = 5; + + // Test that the layer setup got the correct parameters. + HDF5DataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_data_->num(), batch_size); + EXPECT_EQ(this->blob_top_data_->channels(), num_cols); + EXPECT_EQ(this->blob_top_data_->height(), height); + EXPECT_EQ(this->blob_top_data_->width(), width); + + EXPECT_EQ(this->blob_top_label_->num_axes(), 2); + EXPECT_EQ(this->blob_top_label_->shape(0), batch_size); + EXPECT_EQ(this->blob_top_label_->shape(1), 1); + + EXPECT_EQ(this->blob_top_label2_->num_axes(), 2); + EXPECT_EQ(this->blob_top_label2_->shape(0), batch_size); + EXPECT_EQ(this->blob_top_label2_->shape(1), 1); + + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + // Go through the data 10 times (5 batches). + const int data_size = num_cols * height * width; + for (int iter = 0; iter < 10; ++iter) { + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + // On even iterations, we're reading the first half of the data. + // On odd iterations, we're reading the second half of the data. + // NB: label is 1-indexed + int label_offset = 1 + ((iter % 2 == 0) ? 0 : batch_size); + int label2_offset = 1 + label_offset; + int data_offset = (iter % 2 == 0) ? 0 : batch_size * data_size; + + // Every two iterations we are reading the second file, + // which has the same labels, but data is offset by total data size, + // which is 2400 (see generate_sample_data). + int file_offset = (iter % 4 < 2) ? 0 : 2400; + + for (int i = 0; i < batch_size; ++i) { + EXPECT_EQ( + label_offset + i, + this->blob_top_label_->cpu_data()[i]); + EXPECT_EQ( + label2_offset + i, + this->blob_top_label2_->cpu_data()[i]); + } + for (int i = 0; i < batch_size; ++i) { + for (int j = 0; j < num_cols; ++j) { + for (int h = 0; h < height; ++h) { + for (int w = 0; w < width; ++w) { + int idx = ( + i * num_cols * height * width + + j * height * width + + h * width + w); + EXPECT_EQ( + file_offset + data_offset + idx, + this->blob_top_data_->cpu_data()[idx]) + << "debug: i " << i << " j " << j + << " iter " << iter; + } + } + } + } + } +} + +} // namespace caffe diff --git a/src/caffe/test/test_hinge_loss_layer.cpp b/src/caffe/test/test_hinge_loss_layer.cpp new file mode 100755 index 0000000..b6a9902 --- /dev/null +++ b/src/caffe/test/test_hinge_loss_layer.cpp @@ -0,0 +1,76 @@ +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class HingeLossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + HingeLossLayerTest() + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_label_(new Blob(10, 1, 1, 1)), + blob_top_loss_(new Blob()) { + // fill the values + Caffe::set_random_seed(1701); + FillerParameter filler_param; + filler_param.set_std(10); + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); + for (int i = 0; i < blob_bottom_label_->count(); ++i) { + blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; + } + blob_bottom_vec_.push_back(blob_bottom_label_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~HingeLossLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_label_; + delete blob_top_loss_; + } + Blob* const blob_bottom_data_; + Blob* const blob_bottom_label_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(HingeLossLayerTest, TestDtypesAndDevices); + + +TYPED_TEST(HingeLossLayerTest, TestGradientL1) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + HingeLossLayer layer(layer_param); + GradientChecker checker(1e-2, 2e-3, 1701, 1, 0.01); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); +} + +TYPED_TEST(HingeLossLayerTest, TestGradientL2) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + // Set norm to L2 + HingeLossParameter* hinge_loss_param = layer_param.mutable_hinge_loss_param(); + hinge_loss_param->set_norm(HingeLossParameter_Norm_L2); + HingeLossLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2, 1701); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); +} + +} // namespace caffe diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu new file mode 100755 index 0000000..0017ac2 --- /dev/null +++ b/src/caffe/test/test_im2col_kernel.cu @@ -0,0 +1,125 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +// Forward declare kernel functions +template +__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + Dtype* data_col); + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class Im2colKernelTest : public GPUDeviceTest { + protected: + Im2colKernelTest() + // big so launches > 1024 threads + : blob_bottom_(new Blob(5, 500, 10, 10)), + blob_top_(new Blob()), + blob_top_cpu_(new Blob()) { + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + + height_ = blob_bottom_->height(); + width_ = blob_bottom_->width(); + channels_ = blob_bottom_->channels(); + pad_ = 0; + stride_ = 2; + kernel_size_ = 3; + height_col_ = (height_ + 2 * pad_ - kernel_size_) / stride_ + 1; + width_col_ = (width_ + 2 * pad_ - kernel_size_) / stride_ + 1; + } + + virtual ~Im2colKernelTest() { + delete blob_bottom_; + delete blob_top_; + delete blob_top_cpu_; + } + + Blob* const blob_bottom_; + Blob* const blob_top_; + Blob* const blob_top_cpu_; + int height_; + int width_; + int channels_; + int pad_; + int stride_; + int kernel_size_; + int height_col_; + int width_col_; +}; + +TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); + +TYPED_TEST(Im2colKernelTest, TestGPU) { + // Reshape the blobs to correct size for im2col output + this->blob_top_->Reshape(this->blob_bottom_->num(), + this->channels_ * this->kernel_size_ * this->kernel_size_, + this->height_col_, + this->width_col_); + + this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), + this->channels_ * this->kernel_size_ * this->kernel_size_, + this->height_col_, + this->width_col_); + + const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); + TypeParam* top_data = this->blob_top_->mutable_gpu_data(); + TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); + + // CPU Version + for (int n = 0; n < this->blob_bottom_->num(); ++n) { + im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), + this->channels_, this->height_, this->width_, + this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, + this->stride_, this->stride_, + cpu_data + this->blob_top_cpu_->offset(n)); + } + + // GPU version + int num_kernels = this->channels_ * this->height_col_ * this->width_col_; + int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); + + // Launch with different grid sizes + for (int grid_div = 2; grid_div <= 8; grid_div++) { + for (int n = 0; n < this->blob_bottom_->num(); ++n) { + int grid_dim = default_grid_dim/grid_div; + // NOLINT_NEXT_LINE(whitespace/operators) + im2col_gpu_kernel<<>>( + num_kernels, bottom_data + this->blob_bottom_->offset(n), + this->height_, this->width_, this->kernel_size_, this->kernel_size_, + this->pad_, this->pad_, this->stride_, this->stride_, + this->height_col_, this->width_col_, + top_data + this->blob_top_->offset(n)); + CUDA_POST_KERNEL_CHECK; + } + + // Compare results against CPU version + for (int i = 0; i < this->blob_top_->count(); ++i) { + TypeParam cpuval = cpu_data[i]; + TypeParam gpuval = this->blob_top_->cpu_data()[i]; + EXPECT_EQ(cpuval, gpuval); + if (cpuval != gpuval) { + break; + } + } + } +} + +} // namespace caffe diff --git a/src/caffe/test/test_im2col_layer.cpp b/src/caffe/test/test_im2col_layer.cpp new file mode 100755 index 0000000..f50abe1 --- /dev/null +++ b/src/caffe/test/test_im2col_layer.cpp @@ -0,0 +1,118 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class Im2colLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: + Im2colLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 5)), + blob_top_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~Im2colLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(Im2colLayerTest, TestDtypesAndDevices); + +TYPED_TEST(Im2colLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + Im2colLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 27); + EXPECT_EQ(this->blob_top_->height(), 2); + EXPECT_EQ(this->blob_top_->width(), 2); +} + +TYPED_TEST(Im2colLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + Im2colLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // We are lazy and will only check the top left block + for (int c = 0; c < 27; ++c) { + EXPECT_EQ(this->blob_bottom_->data_at(0, (c / 9), (c / 3) % 3, c % 3), + this->blob_top_->data_at(0, c, 0, 0)); + } +} + +TYPED_TEST(Im2colLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + Im2colLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + + +TYPED_TEST(Im2colLayerTest, TestRect) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_h(5); + convolution_param->set_kernel_w(3); + convolution_param->set_stride(2); + Im2colLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // We are lazy and will only check the top left block + for (int c = 0; c < 45; ++c) { + EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0), + this->blob_bottom_->data_at(0, (c / 15), (c / 3) % 5, c % 3)); + } +} + + +TYPED_TEST(Im2colLayerTest, TestRectGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_h(5); + convolution_param->set_kernel_w(3); + convolution_param->set_stride(2); + Im2colLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} // namespace caffe diff --git a/src/caffe/test/test_image_data_layer.cpp b/src/caffe/test/test_image_data_layer.cpp new file mode 100755 index 0000000..931a5eb --- /dev/null +++ b/src/caffe/test/test_image_data_layer.cpp @@ -0,0 +1,179 @@ +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class ImageDataLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + ImageDataLayerTest() + : seed_(1701), + blob_top_data_(new Blob()), + blob_top_label_(new Blob()) {} + virtual void SetUp() { + blob_top_vec_.push_back(blob_top_data_); + blob_top_vec_.push_back(blob_top_label_); + Caffe::set_random_seed(seed_); + // Create test input file. + MakeTempFilename(&filename_); + std::ofstream outfile(filename_.c_str(), std::ofstream::out); + LOG(INFO) << "Using temporary file " << filename_; + for (int i = 0; i < 5; ++i) { + outfile << EXAMPLES_SOURCE_DIR "images/cat.jpg " << i; + } + outfile.close(); + // Create test input file for images of distinct sizes. + MakeTempFilename(&filename_reshape_); + std::ofstream reshapefile(filename_reshape_.c_str(), std::ofstream::out); + LOG(INFO) << "Using temporary file " << filename_reshape_; + reshapefile << EXAMPLES_SOURCE_DIR "images/cat.jpg " << 0; + reshapefile << EXAMPLES_SOURCE_DIR "images/fish-bike.jpg " << 1; + reshapefile.close(); + } + + virtual ~ImageDataLayerTest() { + delete blob_top_data_; + delete blob_top_label_; + } + + int seed_; + string filename_; + string filename_reshape_; + Blob* const blob_top_data_; + Blob* const blob_top_label_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(ImageDataLayerTest, TestDtypesAndDevices); + +TYPED_TEST(ImageDataLayerTest, TestRead) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter param; + ImageDataParameter* image_data_param = param.mutable_image_data_param(); + image_data_param->set_batch_size(5); + image_data_param->set_source(this->filename_.c_str()); + image_data_param->set_shuffle(false); + ImageDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_data_->num(), 5); + EXPECT_EQ(this->blob_top_data_->channels(), 3); + EXPECT_EQ(this->blob_top_data_->height(), 360); + EXPECT_EQ(this->blob_top_data_->width(), 480); + EXPECT_EQ(this->blob_top_label_->num(), 5); + EXPECT_EQ(this->blob_top_label_->channels(), 1); + EXPECT_EQ(this->blob_top_label_->height(), 1); + EXPECT_EQ(this->blob_top_label_->width(), 1); + // Go through the data twice + for (int iter = 0; iter < 2; ++iter) { + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); + } + } +} + +TYPED_TEST(ImageDataLayerTest, TestResize) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter param; + ImageDataParameter* image_data_param = param.mutable_image_data_param(); + image_data_param->set_batch_size(5); + image_data_param->set_source(this->filename_.c_str()); + image_data_param->set_new_height(256); + image_data_param->set_new_width(256); + image_data_param->set_shuffle(false); + ImageDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_data_->num(), 5); + EXPECT_EQ(this->blob_top_data_->channels(), 3); + EXPECT_EQ(this->blob_top_data_->height(), 256); + EXPECT_EQ(this->blob_top_data_->width(), 256); + EXPECT_EQ(this->blob_top_label_->num(), 5); + EXPECT_EQ(this->blob_top_label_->channels(), 1); + EXPECT_EQ(this->blob_top_label_->height(), 1); + EXPECT_EQ(this->blob_top_label_->width(), 1); + // Go through the data twice + for (int iter = 0; iter < 2; ++iter) { + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); + } + } +} + +TYPED_TEST(ImageDataLayerTest, TestReshape) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter param; + ImageDataParameter* image_data_param = param.mutable_image_data_param(); + image_data_param->set_batch_size(1); + image_data_param->set_source(this->filename_reshape_.c_str()); + image_data_param->set_shuffle(false); + ImageDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_label_->num(), 1); + EXPECT_EQ(this->blob_top_label_->channels(), 1); + EXPECT_EQ(this->blob_top_label_->height(), 1); + EXPECT_EQ(this->blob_top_label_->width(), 1); + // cat.jpg + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_data_->num(), 1); + EXPECT_EQ(this->blob_top_data_->channels(), 3); + EXPECT_EQ(this->blob_top_data_->height(), 360); + EXPECT_EQ(this->blob_top_data_->width(), 480); + // fish-bike.jpg + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_data_->num(), 1); + EXPECT_EQ(this->blob_top_data_->channels(), 3); + EXPECT_EQ(this->blob_top_data_->height(), 323); + EXPECT_EQ(this->blob_top_data_->width(), 481); +} + +TYPED_TEST(ImageDataLayerTest, TestShuffle) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter param; + ImageDataParameter* image_data_param = param.mutable_image_data_param(); + image_data_param->set_batch_size(5); + image_data_param->set_source(this->filename_.c_str()); + image_data_param->set_shuffle(true); + ImageDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_data_->num(), 5); + EXPECT_EQ(this->blob_top_data_->channels(), 3); + EXPECT_EQ(this->blob_top_data_->height(), 360); + EXPECT_EQ(this->blob_top_data_->width(), 480); + EXPECT_EQ(this->blob_top_label_->num(), 5); + EXPECT_EQ(this->blob_top_label_->channels(), 1); + EXPECT_EQ(this->blob_top_label_->height(), 1); + EXPECT_EQ(this->blob_top_label_->width(), 1); + // Go through the data twice + for (int iter = 0; iter < 2; ++iter) { + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + map values_to_indices; + int num_in_order = 0; + for (int i = 0; i < 5; ++i) { + Dtype value = this->blob_top_label_->cpu_data()[i]; + // Check that the value has not been seen already (no duplicates). + EXPECT_EQ(values_to_indices.find(value), values_to_indices.end()); + values_to_indices[value] = i; + num_in_order += (value == Dtype(i)); + } + EXPECT_EQ(5, values_to_indices.size()); + EXPECT_GT(5, num_in_order); + } +} + +} // namespace caffe diff --git a/src/caffe/test/test_infogain_loss_layer.cpp b/src/caffe/test/test_infogain_loss_layer.cpp new file mode 100755 index 0000000..7ec2f80 --- /dev/null +++ b/src/caffe/test/test_infogain_loss_layer.cpp @@ -0,0 +1,70 @@ +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/loss_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class InfogainLossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + InfogainLossLayerTest() + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_label_(new Blob(10, 1, 1, 1)), + blob_bottom_infogain_(new Blob(1, 1, 5, 5)), + blob_top_loss_(new Blob()) { + Caffe::set_random_seed(1701); + FillerParameter filler_param; + PositiveUnitballFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); + for (int i = 0; i < blob_bottom_label_->count(); ++i) { + blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; + } + blob_bottom_vec_.push_back(blob_bottom_label_); + filler_param.set_min(0.1); + filler_param.set_max(2.0); + UniformFiller infogain_filler(filler_param); + infogain_filler.Fill(this->blob_bottom_infogain_); + blob_bottom_vec_.push_back(blob_bottom_infogain_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~InfogainLossLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_label_; + delete blob_bottom_infogain_; + delete blob_top_loss_; + } + Blob* const blob_bottom_data_; + Blob* const blob_bottom_label_; + Blob* const blob_bottom_infogain_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(InfogainLossLayerTest, TestDtypesAndDevices); + + +TYPED_TEST(InfogainLossLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + InfogainLossLayer layer(layer_param); + GradientChecker checker(1e-4, 2e-2, 1701, 1, 0.01); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); +} + +} // namespace caffe diff --git a/src/caffe/test/test_inner_product_layer.cpp b/src/caffe/test/test_inner_product_layer.cpp new file mode 100755 index 0000000..fbf0c85 --- /dev/null +++ b/src/caffe/test/test_inner_product_layer.cpp @@ -0,0 +1,152 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +#ifndef CPU_ONLY +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; +#endif + +template +class InnerProductLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: + InnerProductLayerTest() + : blob_bottom_(new Blob(2, 3, 4, 5)), + blob_bottom_nobatch_(new Blob(1, 2, 3, 4)), + blob_top_(new Blob()) { + // fill the values + FillerParameter filler_param; + UniformFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~InnerProductLayerTest() { + delete blob_bottom_; + delete blob_bottom_nobatch_; + delete blob_top_; + } + Blob* const blob_bottom_; + Blob* const blob_bottom_nobatch_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(InnerProductLayerTest, TestDtypesAndDevices); + +TYPED_TEST(InnerProductLayerTest, TestSetUp) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_); + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + shared_ptr > layer( + new InnerProductLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); + EXPECT_EQ(this->blob_top_->channels(), 10); +} + +TYPED_TEST(InnerProductLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_); + bool IS_VALID_CUDA = false; +#ifndef CPU_ONLY + IS_VALID_CUDA = CAFFE_TEST_CUDA_PROP.major >= 2; +#endif + if (Caffe::mode() == Caffe::CPU || + sizeof(Dtype) == 4 || IS_VALID_CUDA) { + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); + shared_ptr > layer( + new InnerProductLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + for (int i = 0; i < count; ++i) { + EXPECT_GE(data[i], 1.); + } + } else { + LOG(ERROR) << "Skipping test due to old architecture."; + } +} + +TYPED_TEST(InnerProductLayerTest, TestForwardNoBatch) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_nobatch_); + bool IS_VALID_CUDA = false; +#ifndef CPU_ONLY + IS_VALID_CUDA = CAFFE_TEST_CUDA_PROP.major >= 2; +#endif + if (Caffe::mode() == Caffe::CPU || + sizeof(Dtype) == 4 || IS_VALID_CUDA) { + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); + shared_ptr > layer( + new InnerProductLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + for (int i = 0; i < count; ++i) { + EXPECT_GE(data[i], 1.); + } + } else { + LOG(ERROR) << "Skipping test due to old architecture."; + } +} + +TYPED_TEST(InnerProductLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_); + bool IS_VALID_CUDA = false; +#ifndef CPU_ONLY + IS_VALID_CUDA = CAFFE_TEST_CUDA_PROP.major >= 2; +#endif + if (Caffe::mode() == Caffe::CPU || + sizeof(Dtype) == 4 || IS_VALID_CUDA) { + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("gaussian"); + inner_product_param->mutable_bias_filler()->set_type("gaussian"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); + InnerProductLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } else { + LOG(ERROR) << "Skipping test due to old architecture."; + } +} + +} // namespace caffe diff --git a/src/caffe/test/test_internal_thread.cpp b/src/caffe/test/test_internal_thread.cpp new file mode 100755 index 0000000..93f1cc5 --- /dev/null +++ b/src/caffe/test/test_internal_thread.cpp @@ -0,0 +1,53 @@ +#include "glog/logging.h" +#include "gtest/gtest.h" + +#include "caffe/internal_thread.hpp" +#include "caffe/util/math_functions.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + + +class InternalThreadTest : public ::testing::Test {}; + +TEST_F(InternalThreadTest, TestStartAndExit) { + InternalThread thread; + EXPECT_FALSE(thread.is_started()); + thread.StartInternalThread(); + EXPECT_TRUE(thread.is_started()); + thread.StopInternalThread(); + EXPECT_FALSE(thread.is_started()); +} + +class TestThreadA : public InternalThread { + void InternalThreadEntry() { + EXPECT_EQ(4244559767, caffe_rng_rand()); + } +}; + +class TestThreadB : public InternalThread { + void InternalThreadEntry() { + EXPECT_EQ(1726478280, caffe_rng_rand()); + } +}; + +TEST_F(InternalThreadTest, TestRandomSeed) { + TestThreadA t1; + Caffe::set_random_seed(9658361); + t1.StartInternalThread(); + t1.StopInternalThread(); + + TestThreadA t2; + Caffe::set_random_seed(9658361); + t2.StartInternalThread(); + t2.StopInternalThread(); + + TestThreadB t3; + Caffe::set_random_seed(3435563); + t3.StartInternalThread(); + t3.StopInternalThread(); +} + +} // namespace caffe + diff --git a/src/caffe/test/test_io.cpp b/src/caffe/test/test_io.cpp new file mode 100755 index 0000000..4ab9631 --- /dev/null +++ b/src/caffe/test/test_io.cpp @@ -0,0 +1,422 @@ +#include +#include +#include +#include + +#include + +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/util/io.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +class IOTest : public ::testing::Test {}; + +bool ReadImageToDatumReference(const string& filename, const int label, + const int height, const int width, const bool is_color, Datum* datum) { + cv::Mat cv_img; + int cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR : + CV_LOAD_IMAGE_GRAYSCALE); + + cv::Mat cv_img_origin = cv::imread(filename, cv_read_flag); + if (!cv_img_origin.data) { + LOG(ERROR) << "Could not open or find file " << filename; + return false; + } + if (height > 0 && width > 0) { + cv::resize(cv_img_origin, cv_img, cv::Size(width, height)); + } else { + cv_img = cv_img_origin; + } + + int num_channels = (is_color ? 3 : 1); + datum->set_channels(num_channels); + datum->set_height(cv_img.rows); + datum->set_width(cv_img.cols); + datum->set_label(label); + datum->clear_data(); + datum->clear_float_data(); + string* datum_string = datum->mutable_data(); + if (is_color) { + for (int c = 0; c < num_channels; ++c) { + for (int h = 0; h < cv_img.rows; ++h) { + for (int w = 0; w < cv_img.cols; ++w) { + datum_string->push_back( + static_cast(cv_img.at(h, w)[c])); + } + } + } + } else { // Faster than repeatedly testing is_color for each pixel w/i loop + for (int h = 0; h < cv_img.rows; ++h) { + for (int w = 0; w < cv_img.cols; ++w) { + datum_string->push_back( + static_cast(cv_img.at(h, w))); + } + } + } + return true; +} + +TEST_F(IOTest, TestReadImageToDatum) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum; + ReadImageToDatum(filename, 0, &datum); + EXPECT_EQ(datum.channels(), 3); + EXPECT_EQ(datum.height(), 360); + EXPECT_EQ(datum.width(), 480); +} + +TEST_F(IOTest, TestReadImageToDatumReference) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum, datum_ref; + ReadImageToDatum(filename, 0, 0, 0, true, &datum); + ReadImageToDatumReference(filename, 0, 0, 0, true, &datum_ref); + EXPECT_EQ(datum.channels(), datum_ref.channels()); + EXPECT_EQ(datum.height(), datum_ref.height()); + EXPECT_EQ(datum.width(), datum_ref.width()); + EXPECT_EQ(datum.data().size(), datum_ref.data().size()); + + const string& data = datum.data(); + const string& data_ref = datum.data(); + + for (int i = 0; i < datum.data().size(); ++i) { + EXPECT_TRUE(data[i] == data_ref[i]); + } +} + + +TEST_F(IOTest, TestReadImageToDatumReferenceResized) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum, datum_ref; + ReadImageToDatum(filename, 0, 100, 200, true, &datum); + ReadImageToDatumReference(filename, 0, 100, 200, true, &datum_ref); + EXPECT_EQ(datum.channels(), datum_ref.channels()); + EXPECT_EQ(datum.height(), datum_ref.height()); + EXPECT_EQ(datum.width(), datum_ref.width()); + EXPECT_EQ(datum.data().size(), datum_ref.data().size()); + + const string& data = datum.data(); + const string& data_ref = datum.data(); + + for (int i = 0; i < datum.data().size(); ++i) { + EXPECT_TRUE(data[i] == data_ref[i]); + } +} + +TEST_F(IOTest, TestReadImageToDatumContent) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum; + ReadImageToDatum(filename, 0, &datum); + cv::Mat cv_img = ReadImageToCVMat(filename); + EXPECT_EQ(datum.channels(), cv_img.channels()); + EXPECT_EQ(datum.height(), cv_img.rows); + EXPECT_EQ(datum.width(), cv_img.cols); + + const string& data = datum.data(); + int index = 0; + for (int c = 0; c < datum.channels(); ++c) { + for (int h = 0; h < datum.height(); ++h) { + for (int w = 0; w < datum.width(); ++w) { + EXPECT_TRUE(data[index++] == + static_cast(cv_img.at(h, w)[c])); + } + } + } +} + +TEST_F(IOTest, TestReadImageToDatumContentGray) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum; + const bool is_color = false; + ReadImageToDatum(filename, 0, is_color, &datum); + cv::Mat cv_img = ReadImageToCVMat(filename, is_color); + EXPECT_EQ(datum.channels(), cv_img.channels()); + EXPECT_EQ(datum.height(), cv_img.rows); + EXPECT_EQ(datum.width(), cv_img.cols); + + const string& data = datum.data(); + int index = 0; + for (int h = 0; h < datum.height(); ++h) { + for (int w = 0; w < datum.width(); ++w) { + EXPECT_TRUE(data[index++] == static_cast(cv_img.at(h, w))); + } + } +} + +TEST_F(IOTest, TestReadImageToDatumResized) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum; + ReadImageToDatum(filename, 0, 100, 200, &datum); + EXPECT_EQ(datum.channels(), 3); + EXPECT_EQ(datum.height(), 100); + EXPECT_EQ(datum.width(), 200); +} + + +TEST_F(IOTest, TestReadImageToDatumResizedSquare) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum; + ReadImageToDatum(filename, 0, 256, 256, &datum); + EXPECT_EQ(datum.channels(), 3); + EXPECT_EQ(datum.height(), 256); + EXPECT_EQ(datum.width(), 256); +} + +TEST_F(IOTest, TestReadImageToDatumGray) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum; + const bool is_color = false; + ReadImageToDatum(filename, 0, is_color, &datum); + EXPECT_EQ(datum.channels(), 1); + EXPECT_EQ(datum.height(), 360); + EXPECT_EQ(datum.width(), 480); +} + +TEST_F(IOTest, TestReadImageToDatumResizedGray) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum; + const bool is_color = false; + ReadImageToDatum(filename, 0, 256, 256, is_color, &datum); + EXPECT_EQ(datum.channels(), 1); + EXPECT_EQ(datum.height(), 256); + EXPECT_EQ(datum.width(), 256); +} + +TEST_F(IOTest, TestReadImageToCVMat) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + cv::Mat cv_img = ReadImageToCVMat(filename); + EXPECT_EQ(cv_img.channels(), 3); + EXPECT_EQ(cv_img.rows, 360); + EXPECT_EQ(cv_img.cols, 480); +} + +TEST_F(IOTest, TestReadImageToCVMatResized) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + cv::Mat cv_img = ReadImageToCVMat(filename, 100, 200); + EXPECT_EQ(cv_img.channels(), 3); + EXPECT_EQ(cv_img.rows, 100); + EXPECT_EQ(cv_img.cols, 200); +} + +TEST_F(IOTest, TestReadImageToCVMatResizedSquare) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + cv::Mat cv_img = ReadImageToCVMat(filename, 256, 256); + EXPECT_EQ(cv_img.channels(), 3); + EXPECT_EQ(cv_img.rows, 256); + EXPECT_EQ(cv_img.cols, 256); +} + +TEST_F(IOTest, TestReadImageToCVMatGray) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + const bool is_color = false; + cv::Mat cv_img = ReadImageToCVMat(filename, is_color); + EXPECT_EQ(cv_img.channels(), 1); + EXPECT_EQ(cv_img.rows, 360); + EXPECT_EQ(cv_img.cols, 480); +} + +TEST_F(IOTest, TestReadImageToCVMatResizedGray) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + const bool is_color = false; + cv::Mat cv_img = ReadImageToCVMat(filename, 256, 256, is_color); + EXPECT_EQ(cv_img.channels(), 1); + EXPECT_EQ(cv_img.rows, 256); + EXPECT_EQ(cv_img.cols, 256); +} + +TEST_F(IOTest, TestCVMatToDatum) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + cv::Mat cv_img = ReadImageToCVMat(filename); + Datum datum; + CVMatToDatum(cv_img, &datum); + EXPECT_EQ(datum.channels(), 3); + EXPECT_EQ(datum.height(), 360); + EXPECT_EQ(datum.width(), 480); +} + +TEST_F(IOTest, TestCVMatToDatumContent) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + cv::Mat cv_img = ReadImageToCVMat(filename); + Datum datum; + CVMatToDatum(cv_img, &datum); + Datum datum_ref; + ReadImageToDatum(filename, 0, &datum_ref); + EXPECT_EQ(datum.channels(), datum_ref.channels()); + EXPECT_EQ(datum.height(), datum_ref.height()); + EXPECT_EQ(datum.width(), datum_ref.width()); + EXPECT_EQ(datum.data().size(), datum_ref.data().size()); + + const string& data = datum.data(); + const string& data_ref = datum_ref.data(); + for (int i = 0; i < datum.data().size(); ++i) { + EXPECT_TRUE(data[i] == data_ref[i]); + } +} + +TEST_F(IOTest, TestCVMatToDatumReference) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + cv::Mat cv_img = ReadImageToCVMat(filename); + Datum datum; + CVMatToDatum(cv_img, &datum); + Datum datum_ref; + ReadImageToDatumReference(filename, 0, 0, 0, true, &datum_ref); + EXPECT_EQ(datum.channels(), datum_ref.channels()); + EXPECT_EQ(datum.height(), datum_ref.height()); + EXPECT_EQ(datum.width(), datum_ref.width()); + EXPECT_EQ(datum.data().size(), datum_ref.data().size()); + + const string& data = datum.data(); + const string& data_ref = datum_ref.data(); + for (int i = 0; i < datum.data().size(); ++i) { + EXPECT_TRUE(data[i] == data_ref[i]); + } +} + +TEST_F(IOTest, TestReadFileToDatum) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum; + EXPECT_TRUE(ReadFileToDatum(filename, &datum)); + EXPECT_TRUE(datum.encoded()); + EXPECT_EQ(datum.label(), -1); + EXPECT_EQ(datum.data().size(), 140391); +} + +TEST_F(IOTest, TestDecodeDatum) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum; + EXPECT_TRUE(ReadFileToDatum(filename, &datum)); + EXPECT_TRUE(DecodeDatum(&datum, true)); + EXPECT_FALSE(DecodeDatum(&datum, true)); + Datum datum_ref; + ReadImageToDatumReference(filename, 0, 0, 0, true, &datum_ref); + EXPECT_EQ(datum.channels(), datum_ref.channels()); + EXPECT_EQ(datum.height(), datum_ref.height()); + EXPECT_EQ(datum.width(), datum_ref.width()); + EXPECT_EQ(datum.data().size(), datum_ref.data().size()); + + const string& data = datum.data(); + const string& data_ref = datum_ref.data(); + for (int i = 0; i < datum.data().size(); ++i) { + EXPECT_TRUE(data[i] == data_ref[i]); + } +} + +TEST_F(IOTest, TestDecodeDatumToCVMat) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum; + EXPECT_TRUE(ReadFileToDatum(filename, &datum)); + cv::Mat cv_img = DecodeDatumToCVMat(datum, true); + EXPECT_EQ(cv_img.channels(), 3); + EXPECT_EQ(cv_img.rows, 360); + EXPECT_EQ(cv_img.cols, 480); + cv_img = DecodeDatumToCVMat(datum, false); + EXPECT_EQ(cv_img.channels(), 1); + EXPECT_EQ(cv_img.rows, 360); + EXPECT_EQ(cv_img.cols, 480); +} + +TEST_F(IOTest, TestDecodeDatumToCVMatContent) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum; + EXPECT_TRUE(ReadImageToDatum(filename, 0, std::string("jpg"), &datum)); + cv::Mat cv_img = DecodeDatumToCVMat(datum, true); + cv::Mat cv_img_ref = ReadImageToCVMat(filename); + EXPECT_EQ(cv_img_ref.channels(), cv_img.channels()); + EXPECT_EQ(cv_img_ref.rows, cv_img.rows); + EXPECT_EQ(cv_img_ref.cols, cv_img.cols); + + for (int c = 0; c < datum.channels(); ++c) { + for (int h = 0; h < datum.height(); ++h) { + for (int w = 0; w < datum.width(); ++w) { + EXPECT_TRUE(cv_img.at(h, w)[c]== + cv_img_ref.at(h, w)[c]); + } + } + } +} + +TEST_F(IOTest, TestDecodeDatumNative) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum; + EXPECT_TRUE(ReadFileToDatum(filename, &datum)); + EXPECT_TRUE(DecodeDatumNative(&datum)); + EXPECT_FALSE(DecodeDatumNative(&datum)); + Datum datum_ref; + ReadImageToDatumReference(filename, 0, 0, 0, true, &datum_ref); + EXPECT_EQ(datum.channels(), datum_ref.channels()); + EXPECT_EQ(datum.height(), datum_ref.height()); + EXPECT_EQ(datum.width(), datum_ref.width()); + EXPECT_EQ(datum.data().size(), datum_ref.data().size()); + + const string& data = datum.data(); + const string& data_ref = datum_ref.data(); + for (int i = 0; i < datum.data().size(); ++i) { + EXPECT_TRUE(data[i] == data_ref[i]); + } +} + +TEST_F(IOTest, TestDecodeDatumToCVMatNative) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum; + EXPECT_TRUE(ReadFileToDatum(filename, &datum)); + cv::Mat cv_img = DecodeDatumToCVMatNative(datum); + EXPECT_EQ(cv_img.channels(), 3); + EXPECT_EQ(cv_img.rows, 360); + EXPECT_EQ(cv_img.cols, 480); +} + +TEST_F(IOTest, TestDecodeDatumNativeGray) { + string filename = EXAMPLES_SOURCE_DIR "images/cat_gray.jpg"; + Datum datum; + EXPECT_TRUE(ReadFileToDatum(filename, &datum)); + EXPECT_TRUE(DecodeDatumNative(&datum)); + EXPECT_FALSE(DecodeDatumNative(&datum)); + Datum datum_ref; + ReadImageToDatumReference(filename, 0, 0, 0, false, &datum_ref); + EXPECT_EQ(datum.channels(), datum_ref.channels()); + EXPECT_EQ(datum.height(), datum_ref.height()); + EXPECT_EQ(datum.width(), datum_ref.width()); + EXPECT_EQ(datum.data().size(), datum_ref.data().size()); + + const string& data = datum.data(); + const string& data_ref = datum_ref.data(); + for (int i = 0; i < datum.data().size(); ++i) { + EXPECT_TRUE(data[i] == data_ref[i]); + } +} + +TEST_F(IOTest, TestDecodeDatumToCVMatNativeGray) { + string filename = EXAMPLES_SOURCE_DIR "images/cat_gray.jpg"; + Datum datum; + EXPECT_TRUE(ReadFileToDatum(filename, &datum)); + cv::Mat cv_img = DecodeDatumToCVMatNative(datum); + EXPECT_EQ(cv_img.channels(), 1); + EXPECT_EQ(cv_img.rows, 360); + EXPECT_EQ(cv_img.cols, 480); +} + +TEST_F(IOTest, TestDecodeDatumToCVMatContentNative) { + string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg"; + Datum datum; + EXPECT_TRUE(ReadImageToDatum(filename, 0, std::string("jpg"), &datum)); + cv::Mat cv_img = DecodeDatumToCVMatNative(datum); + cv::Mat cv_img_ref = ReadImageToCVMat(filename); + EXPECT_EQ(cv_img_ref.channels(), cv_img.channels()); + EXPECT_EQ(cv_img_ref.rows, cv_img.rows); + EXPECT_EQ(cv_img_ref.cols, cv_img.cols); + + for (int c = 0; c < datum.channels(); ++c) { + for (int h = 0; h < datum.height(); ++h) { + for (int w = 0; w < datum.width(); ++w) { + EXPECT_TRUE(cv_img.at(h, w)[c]== + cv_img_ref.at(h, w)[c]); + } + } + } +} + +} // namespace caffe diff --git a/src/caffe/test/test_layer_factory.cpp b/src/caffe/test/test_layer_factory.cpp new file mode 100755 index 0000000..c86fafd --- /dev/null +++ b/src/caffe/test/test_layer_factory.cpp @@ -0,0 +1,47 @@ +#include +#include + +#include "boost/scoped_ptr.hpp" +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/layer_factory.hpp" +#include "caffe/util/db.hpp" +#include "caffe/util/io.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class LayerFactoryTest : public MultiDeviceTest {}; + +TYPED_TEST_CASE(LayerFactoryTest, TestDtypesAndDevices); + +TYPED_TEST(LayerFactoryTest, TestCreateLayer) { + typedef typename TypeParam::Dtype Dtype; + typename LayerRegistry::CreatorRegistry& registry = + LayerRegistry::Registry(); + shared_ptr > layer; + for (typename LayerRegistry::CreatorRegistry::iterator iter = + registry.begin(); iter != registry.end(); ++iter) { + // Special case: PythonLayer is checked by pytest + if (iter->first == "Python") { continue; } + LayerParameter layer_param; + // Data layers expect a DB + if (iter->first == "Data") { + string tmp; + MakeTempDir(&tmp); + boost::scoped_ptr db(db::GetDB(DataParameter_DB_LEVELDB)); + db->Open(tmp, db::NEW); + db->Close(); + layer_param.mutable_data_param()->set_source(tmp); + } + layer_param.set_type(iter->first); + layer = LayerRegistry::CreateLayer(layer_param); + EXPECT_EQ(iter->first, layer->type()); + } +} + +} // namespace caffe diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp new file mode 100755 index 0000000..c4e2f8e --- /dev/null +++ b/src/caffe/test/test_lrn_layer.cpp @@ -0,0 +1,250 @@ +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +using std::min; +using std::max; + +namespace caffe { + +template +class LRNLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + LRNLayerTest() + : epsilon_(Dtype(1e-5)), + blob_bottom_(new Blob()), + blob_top_(new Blob()) {} + virtual void SetUp() { + Caffe::set_random_seed(1701); + blob_bottom_->Reshape(2, 7, 3, 3); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~LRNLayerTest() { delete blob_bottom_; delete blob_top_; } + void ReferenceLRNForward(const Blob& blob_bottom, + const LayerParameter& layer_param, Blob* blob_top); + + Dtype epsilon_; + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +template +void LRNLayerTest::ReferenceLRNForward( + const Blob& blob_bottom, const LayerParameter& layer_param, + Blob* blob_top) { + typedef typename TypeParam::Dtype Dtype; + blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(), + blob_bottom.height(), blob_bottom.width()); + Dtype* top_data = blob_top->mutable_cpu_data(); + LRNParameter lrn_param = layer_param.lrn_param(); + Dtype alpha = lrn_param.alpha(); + Dtype beta = lrn_param.beta(); + int size = lrn_param.local_size(); + switch (lrn_param.norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + for (int n = 0; n < blob_bottom.num(); ++n) { + for (int c = 0; c < blob_bottom.channels(); ++c) { + for (int h = 0; h < blob_bottom.height(); ++h) { + for (int w = 0; w < blob_bottom.width(); ++w) { + int c_start = c - (size - 1) / 2; + int c_end = min(c_start + size, blob_bottom.channels()); + c_start = max(c_start, 0); + Dtype scale = 1.; + for (int i = c_start; i < c_end; ++i) { + Dtype value = blob_bottom.data_at(n, i, h, w); + scale += value * value * alpha / size; + } + *(top_data + blob_top->offset(n, c, h, w)) = + blob_bottom.data_at(n, c, h, w) / pow(scale, beta); + } + } + } + } + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + for (int n = 0; n < blob_bottom.num(); ++n) { + for (int c = 0; c < blob_bottom.channels(); ++c) { + for (int h = 0; h < blob_bottom.height(); ++h) { + int h_start = h - (size - 1) / 2; + int h_end = min(h_start + size, blob_bottom.height()); + h_start = max(h_start, 0); + for (int w = 0; w < blob_bottom.width(); ++w) { + Dtype scale = 1.; + int w_start = w - (size - 1) / 2; + int w_end = min(w_start + size, blob_bottom.width()); + w_start = max(w_start, 0); + for (int nh = h_start; nh < h_end; ++nh) { + for (int nw = w_start; nw < w_end; ++nw) { + Dtype value = blob_bottom.data_at(n, c, nh, nw); + scale += value * value * alpha / (size * size); + } + } + *(top_data + blob_top->offset(n, c, h, w)) = + blob_bottom.data_at(n, c, h, w) / pow(scale, beta); + } + } + } + } + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } +} + +TYPED_TEST_CASE(LRNLayerTest, TestDtypesAndDevices); + +TYPED_TEST(LRNLayerTest, TestSetupAcrossChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + LRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 7); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); +} + +TYPED_TEST(LRNLayerTest, TestForwardAcrossChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + LRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } +} + +TYPED_TEST(LRNLayerTest, TestForwardAcrossChannelsLargeRegion) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_local_size(15); + LRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } +} + +TYPED_TEST(LRNLayerTest, TestGradientAcrossChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + LRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + vector propagate_down(this->blob_bottom_vec_.size(), true); + layer.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + // for (int i = 0; i < this->blob_bottom_->count(); ++i) { + // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i] + // << std::endl; + // } + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(LRNLayerTest, TestGradientAcrossChannelsLargeRegion) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_local_size(15); + LRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + vector propagate_down(this->blob_bottom_vec_.size(), true); + layer.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + // for (int i = 0; i < this->blob_bottom_->count(); ++i) { + // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i] + // << std::endl; + // } + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(LRNLayerTest, TestSetupWithinChannel) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + LRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 7); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); +} + +TYPED_TEST(LRNLayerTest, TestForwardWithinChannel) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + LRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } +} + +TYPED_TEST(LRNLayerTest, TestGradientWithinChannel) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + LRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + + +} // namespace caffe diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp new file mode 100755 index 0000000..a095b54 --- /dev/null +++ b/src/caffe/test/test_math_functions.cpp @@ -0,0 +1,246 @@ +#include // for uint32_t & uint64_t +#include +#include +#include // for std::fabs +#include // for rand_r + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/util/math_functions.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class MathFunctionsTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MathFunctionsTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()) { + } + + virtual void SetUp() { + Caffe::set_random_seed(1701); + this->blob_bottom_->Reshape(11, 17, 19, 23); + this->blob_top_->Reshape(11, 17, 19, 23); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + filler.Fill(this->blob_top_); + } + + virtual ~MathFunctionsTest() { + delete blob_bottom_; + delete blob_top_; + } + + // http://en.wikipedia.org/wiki/Hamming_distance + int ReferenceHammingDistance(const int n, const Dtype* x, const Dtype* y) { + int dist = 0; + uint64_t val; + for (int i = 0; i < n; ++i) { + if (sizeof(Dtype) == 8) { + val = static_cast(x[i]) ^ static_cast(y[i]); + } else if (sizeof(Dtype) == 4) { + val = static_cast(x[i]) ^ static_cast(y[i]); + } else { + LOG(FATAL) << "Unrecognized Dtype size: " << sizeof(Dtype); + } + // Count the number of set bits + while (val) { + ++dist; + val &= val - 1; + } + } + return dist; + } + + Blob* const blob_bottom_; + Blob* const blob_top_; +}; + +template +class CPUMathFunctionsTest + : public MathFunctionsTest > { +}; + +TYPED_TEST_CASE(CPUMathFunctionsTest, TestDtypes); + +TYPED_TEST(CPUMathFunctionsTest, TestNothing) { + // The first test case of a test suite takes the longest time + // due to the set up overhead. +} + +TYPED_TEST(CPUMathFunctionsTest, TestHammingDistance) { + int n = this->blob_bottom_->count(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + const TypeParam* y = this->blob_top_->cpu_data(); + EXPECT_EQ(this->ReferenceHammingDistance(n, x, y), + caffe_cpu_hamming_distance(n, x, y)); +} + +TYPED_TEST(CPUMathFunctionsTest, TestAsum) { + int n = this->blob_bottom_->count(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + TypeParam std_asum = 0; + for (int i = 0; i < n; ++i) { + std_asum += std::fabs(x[i]); + } + TypeParam cpu_asum = caffe_cpu_asum(n, x); + EXPECT_LT((cpu_asum - std_asum) / std_asum, 1e-2); +} + +TYPED_TEST(CPUMathFunctionsTest, TestSign) { + int n = this->blob_bottom_->count(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + caffe_cpu_sign(n, x, this->blob_bottom_->mutable_cpu_diff()); + const TypeParam* signs = this->blob_bottom_->cpu_diff(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(signs[i], x[i] > 0 ? 1 : (x[i] < 0 ? -1 : 0)); + } +} + +TYPED_TEST(CPUMathFunctionsTest, TestSgnbit) { + int n = this->blob_bottom_->count(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + caffe_cpu_sgnbit(n, x, this->blob_bottom_->mutable_cpu_diff()); + const TypeParam* signbits = this->blob_bottom_->cpu_diff(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(signbits[i], x[i] < 0 ? 1 : 0); + } +} + +TYPED_TEST(CPUMathFunctionsTest, TestFabs) { + int n = this->blob_bottom_->count(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + caffe_abs(n, x, this->blob_bottom_->mutable_cpu_diff()); + const TypeParam* abs_val = this->blob_bottom_->cpu_diff(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(abs_val[i], x[i] > 0 ? x[i] : -x[i]); + } +} + +TYPED_TEST(CPUMathFunctionsTest, TestScale) { + int n = this->blob_bottom_->count(); + TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % + this->blob_bottom_->count()]; + caffe_cpu_scale(n, alpha, this->blob_bottom_->cpu_data(), + this->blob_bottom_->mutable_cpu_diff()); + const TypeParam* scaled = this->blob_bottom_->cpu_diff(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(scaled[i], x[i] * alpha); + } +} + +TYPED_TEST(CPUMathFunctionsTest, TestCopy) { + const int n = this->blob_bottom_->count(); + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + TypeParam* top_data = this->blob_top_->mutable_cpu_data(); + caffe_copy(n, bottom_data, top_data); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(bottom_data[i], top_data[i]); + } +} + +#ifndef CPU_ONLY + +template +class GPUMathFunctionsTest : public MathFunctionsTest > { +}; + +TYPED_TEST_CASE(GPUMathFunctionsTest, TestDtypes); + +// TODO: Fix caffe_gpu_hamming_distance and re-enable this test. +TYPED_TEST(GPUMathFunctionsTest, DISABLED_TestHammingDistance) { + int n = this->blob_bottom_->count(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + const TypeParam* y = this->blob_top_->cpu_data(); + int reference_distance = this->ReferenceHammingDistance(n, x, y); + x = this->blob_bottom_->gpu_data(); + y = this->blob_top_->gpu_data(); + int computed_distance = caffe_gpu_hamming_distance(n, x, y); + EXPECT_EQ(reference_distance, computed_distance); +} + +TYPED_TEST(GPUMathFunctionsTest, TestAsum) { + int n = this->blob_bottom_->count(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + TypeParam std_asum = 0; + for (int i = 0; i < n; ++i) { + std_asum += std::fabs(x[i]); + } + TypeParam gpu_asum; + caffe_gpu_asum(n, this->blob_bottom_->gpu_data(), &gpu_asum); + EXPECT_LT((gpu_asum - std_asum) / std_asum, 1e-2); +} + +TYPED_TEST(GPUMathFunctionsTest, TestSign) { + int n = this->blob_bottom_->count(); + caffe_gpu_sign(n, this->blob_bottom_->gpu_data(), + this->blob_bottom_->mutable_gpu_diff()); + const TypeParam* signs = this->blob_bottom_->cpu_diff(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(signs[i], x[i] > 0 ? 1 : (x[i] < 0 ? -1 : 0)); + } +} + +TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { + int n = this->blob_bottom_->count(); + caffe_gpu_sgnbit(n, this->blob_bottom_->gpu_data(), + this->blob_bottom_->mutable_gpu_diff()); + const TypeParam* signbits = this->blob_bottom_->cpu_diff(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(signbits[i], x[i] < 0 ? 1 : 0); + } +} + +TYPED_TEST(GPUMathFunctionsTest, TestFabs) { + int n = this->blob_bottom_->count(); + caffe_gpu_abs(n, this->blob_bottom_->gpu_data(), + this->blob_bottom_->mutable_gpu_diff()); + const TypeParam* abs_val = this->blob_bottom_->cpu_diff(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(abs_val[i], x[i] > 0 ? x[i] : -x[i]); + } +} + +TYPED_TEST(GPUMathFunctionsTest, TestScale) { + int n = this->blob_bottom_->count(); + TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % + this->blob_bottom_->count()]; + caffe_gpu_scale(n, alpha, this->blob_bottom_->gpu_data(), + this->blob_bottom_->mutable_gpu_diff()); + const TypeParam* scaled = this->blob_bottom_->cpu_diff(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(scaled[i], x[i] * alpha); + } +} + +TYPED_TEST(GPUMathFunctionsTest, TestCopy) { + const int n = this->blob_bottom_->count(); + const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); + TypeParam* top_data = this->blob_top_->mutable_gpu_data(); + caffe_copy(n, bottom_data, top_data); + bottom_data = this->blob_bottom_->cpu_data(); + top_data = this->blob_top_->mutable_cpu_data(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(bottom_data[i], top_data[i]); + } +} + +#endif + + +} // namespace caffe diff --git a/src/caffe/test/test_maxpool_dropout_layers.cpp b/src/caffe/test/test_maxpool_dropout_layers.cpp new file mode 100755 index 0000000..611d979 --- /dev/null +++ b/src/caffe/test/test_maxpool_dropout_layers.cpp @@ -0,0 +1,127 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class MaxPoolingDropoutTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: + MaxPoolingDropoutTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()) {} + virtual void SetUp() { + Caffe::set_random_seed(1703); + blob_bottom_->Reshape(2, 3, 6, 5); + // fill the values + FillerParameter filler_param; + filler_param.set_value(1.); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~MaxPoolingDropoutTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(MaxPoolingDropoutTest, TestDtypesAndDevices); + +TYPED_TEST(MaxPoolingDropoutTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + PoolingLayer max_layer(layer_param); + max_layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + DropoutLayer dropout_layer(layer_param); + dropout_layer.SetUp(this->blob_top_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 2); +} + + +TYPED_TEST(MaxPoolingDropoutTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* top_data = this->blob_top_->cpu_data(); + Dtype sum = 0.; + for (int i = 0; i < this->blob_top_->count(); ++i) { + sum += top_data[i]; + } + EXPECT_EQ(sum, this->blob_top_->count()); + // Dropout in-place + DropoutLayer dropout_layer(layer_param); + dropout_layer.SetUp(this->blob_top_vec_, this->blob_top_vec_); + dropout_layer.Forward(this->blob_top_vec_, this->blob_top_vec_); + sum = 0.; + Dtype scale = 1. / (1. - layer_param.dropout_param().dropout_ratio()); + top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + sum += top_data[i]; + } + EXPECT_GE(sum, 0); + EXPECT_LE(sum, this->blob_top_->count()*scale); +} + +TYPED_TEST(MaxPoolingDropoutTest, TestBackward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.set_phase(TRAIN); + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + vector propagate_down(this->blob_bottom_vec_.size(), true); + layer.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + const Dtype* bottom_diff = this->blob_bottom_->cpu_diff(); + Dtype sum = 0.; + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + sum += bottom_diff[i]; + } + EXPECT_EQ(sum, this->blob_top_->count()); + // Dropout in-place + DropoutLayer dropout_layer(layer_param); + dropout_layer.SetUp(this->blob_top_vec_, this->blob_top_vec_); + dropout_layer.Forward(this->blob_top_vec_, this->blob_top_vec_); + dropout_layer.Backward(this->blob_top_vec_, propagate_down, + this->blob_top_vec_); + layer.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + Dtype sum_with_dropout = 0.; + bottom_diff = this->blob_bottom_->cpu_diff(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + sum_with_dropout += bottom_diff[i]; + } + EXPECT_GE(sum_with_dropout, sum); +} + +} // namespace caffe diff --git a/src/caffe/test/test_memory_data_layer.cpp b/src/caffe/test/test_memory_data_layer.cpp new file mode 100755 index 0000000..a79033f --- /dev/null +++ b/src/caffe/test/test_memory_data_layer.cpp @@ -0,0 +1,296 @@ +#include + +#include +#include + +#include "caffe/data_layers.hpp" +#include "caffe/filler.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class MemoryDataLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MemoryDataLayerTest() + : data_(new Blob()), + labels_(new Blob()), + data_blob_(new Blob()), + label_blob_(new Blob()) {} + virtual void SetUp() { + batch_size_ = 8; + batches_ = 12; + channels_ = 4; + height_ = 7; + width_ = 11; + blob_top_vec_.push_back(data_blob_); + blob_top_vec_.push_back(label_blob_); + // pick random input data + FillerParameter filler_param; + GaussianFiller filler(filler_param); + data_->Reshape(batches_ * batch_size_, channels_, height_, width_); + labels_->Reshape(batches_ * batch_size_, 1, 1, 1); + filler.Fill(this->data_); + filler.Fill(this->labels_); + } + + virtual ~MemoryDataLayerTest() { + delete data_blob_; + delete label_blob_; + delete data_; + delete labels_; + } + int batch_size_; + int batches_; + int channels_; + int height_; + int width_; + // we don't really need blobs for the input data, but it makes it + // easier to call Filler + Blob* const data_; + Blob* const labels_; + // blobs for the top of MemoryDataLayer + Blob* const data_blob_; + Blob* const label_blob_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(MemoryDataLayerTest, TestDtypesAndDevices); + +TYPED_TEST(MemoryDataLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + + LayerParameter layer_param; + MemoryDataParameter* md_param = layer_param.mutable_memory_data_param(); + md_param->set_batch_size(this->batch_size_); + md_param->set_channels(this->channels_); + md_param->set_height(this->height_); + md_param->set_width(this->width_); + shared_ptr > layer( + new MemoryDataLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->data_blob_->num(), this->batch_size_); + EXPECT_EQ(this->data_blob_->channels(), this->channels_); + EXPECT_EQ(this->data_blob_->height(), this->height_); + EXPECT_EQ(this->data_blob_->width(), this->width_); + EXPECT_EQ(this->label_blob_->num(), this->batch_size_); + EXPECT_EQ(this->label_blob_->channels(), 1); + EXPECT_EQ(this->label_blob_->height(), 1); + EXPECT_EQ(this->label_blob_->width(), 1); +} + +// run through a few batches and check that the right data appears +TYPED_TEST(MemoryDataLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + + LayerParameter layer_param; + MemoryDataParameter* md_param = layer_param.mutable_memory_data_param(); + md_param->set_batch_size(this->batch_size_); + md_param->set_channels(this->channels_); + md_param->set_height(this->height_); + md_param->set_width(this->width_); + shared_ptr > layer( + new MemoryDataLayer(layer_param)); + layer->DataLayerSetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Reset(this->data_->mutable_cpu_data(), + this->labels_->mutable_cpu_data(), this->data_->num()); + for (int i = 0; i < this->batches_ * 6; ++i) { + int batch_num = i % this->batches_; + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int j = 0; j < this->data_blob_->count(); ++j) { + EXPECT_EQ(this->data_blob_->cpu_data()[j], + this->data_->cpu_data()[ + this->data_->offset(1) * this->batch_size_ * batch_num + j]); + } + for (int j = 0; j < this->label_blob_->count(); ++j) { + EXPECT_EQ(this->label_blob_->cpu_data()[j], + this->labels_->cpu_data()[this->batch_size_ * batch_num + j]); + } + } +} + +TYPED_TEST(MemoryDataLayerTest, AddDatumVectorDefaultTransform) { + typedef typename TypeParam::Dtype Dtype; + + LayerParameter param; + MemoryDataParameter* memory_data_param = param.mutable_memory_data_param(); + memory_data_param->set_batch_size(this->batch_size_); + memory_data_param->set_channels(this->channels_); + memory_data_param->set_height(this->height_); + memory_data_param->set_width(this->width_); + MemoryDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + // We add batch_size*num_iter items, then for each iteration + // we forward batch_size elements + int num_iter = 5; + vector datum_vector(this->batch_size_ * num_iter); + const size_t count = this->channels_ * this->height_ * this->width_; + size_t pixel_index = 0; + for (int i = 0; i < this->batch_size_ * num_iter; ++i) { + datum_vector[i].set_channels(this->channels_); + datum_vector[i].set_height(this->height_); + datum_vector[i].set_width(this->width_); + datum_vector[i].set_label(i); + vector pixels(count); + for (int j = 0; j < count; ++j) { + pixels[j] = pixel_index++ % 256; + } + datum_vector[i].set_data(&(pixels[0]), count); + } + layer.AddDatumVector(datum_vector); + + int data_index; + // Go through the data 5 times + for (int iter = 0; iter < num_iter; ++iter) { + int offset = this->batch_size_ * iter; + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->data_blob_->cpu_data(); + size_t index = 0; + for (int i = 0; i < this->batch_size_; ++i) { + const string& data_string = datum_vector[offset + i].data(); + EXPECT_EQ(offset + i, this->label_blob_->cpu_data()[i]); + for (int c = 0; c < this->channels_; ++c) { + for (int h = 0; h < this->height_; ++h) { + for (int w = 0; w < this->width_; ++w) { + data_index = (c * this->height_ + h) * this->width_ + w; + EXPECT_EQ(static_cast( + static_cast(data_string[data_index])), + data[index++]); + } + } + } + } + } +} + +TYPED_TEST(MemoryDataLayerTest, AddMatVectorDefaultTransform) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter param; + MemoryDataParameter* memory_data_param = param.mutable_memory_data_param(); + memory_data_param->set_batch_size(this->batch_size_); + memory_data_param->set_channels(this->channels_); + memory_data_param->set_height(this->height_); + memory_data_param->set_width(this->width_); + MemoryDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + // We add batch_size*num_iter items, then for each iteration + // we forward batch_size elements + int num_iter = 5; + vector mat_vector(this->batch_size_ * num_iter); + vector label_vector(this->batch_size_ * num_iter); + for (int i = 0; i < this->batch_size_*num_iter; ++i) { + mat_vector[i] = cv::Mat(this->height_, this->width_, CV_8UC4); + label_vector[i] = i; + cv::randu(mat_vector[i], cv::Scalar::all(0), cv::Scalar::all(255)); + } + layer.AddMatVector(mat_vector, label_vector); + + int data_index; + const size_t count = this->channels_ * this->height_ * this->width_; + for (int iter = 0; iter < num_iter; ++iter) { + int offset = this->batch_size_ * iter; + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->data_blob_->cpu_data(); + for (int i = 0; i < this->batch_size_; ++i) { + EXPECT_EQ(offset + i, this->label_blob_->cpu_data()[i]); + for (int h = 0; h < this->height_; ++h) { + const unsigned char* ptr_mat = mat_vector[offset + i].ptr(h); + int index = 0; + for (int w = 0; w < this->width_; ++w) { + for (int c = 0; c < this->channels_; ++c) { + data_index = (i*count) + (c * this->height_ + h) * this->width_ + w; + Dtype pixel = static_cast(ptr_mat[index++]); + EXPECT_EQ(static_cast(pixel), + data[data_index]); + } + } + } + } + } +} + +TYPED_TEST(MemoryDataLayerTest, TestSetBatchSize) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter param; + MemoryDataParameter* memory_data_param = param.mutable_memory_data_param(); + memory_data_param->set_batch_size(this->batch_size_); + memory_data_param->set_channels(this->channels_); + memory_data_param->set_height(this->height_); + memory_data_param->set_width(this->width_); + MemoryDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + // first add data as usual + int num_iter = 5; + vector mat_vector(this->batch_size_ * num_iter); + vector label_vector(this->batch_size_ * num_iter); + for (int i = 0; i < this->batch_size_*num_iter; ++i) { + mat_vector[i] = cv::Mat(this->height_, this->width_, CV_8UC4); + label_vector[i] = i; + cv::randu(mat_vector[i], cv::Scalar::all(0), cv::Scalar::all(255)); + } + layer.AddMatVector(mat_vector, label_vector); + // then consume the data + int data_index; + const size_t count = this->channels_ * this->height_ * this->width_; + for (int iter = 0; iter < num_iter; ++iter) { + int offset = this->batch_size_ * iter; + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->data_blob_->cpu_data(); + for (int i = 0; i < this->batch_size_; ++i) { + EXPECT_EQ(offset + i, this->label_blob_->cpu_data()[i]); + for (int h = 0; h < this->height_; ++h) { + const unsigned char* ptr_mat = mat_vector[offset + i].ptr(h); + int index = 0; + for (int w = 0; w < this->width_; ++w) { + for (int c = 0; c < this->channels_; ++c) { + data_index = (i*count) + (c * this->height_ + h) * this->width_ + w; + Dtype pixel = static_cast(ptr_mat[index++]); + EXPECT_EQ(static_cast(pixel), data[data_index]); + } + } + } + } + } + // and then add new data with different batch_size + int new_batch_size = 16; + layer.set_batch_size(new_batch_size); + mat_vector.clear(); + mat_vector.resize(new_batch_size * num_iter); + label_vector.clear(); + label_vector.resize(new_batch_size * num_iter); + for (int i = 0; i < new_batch_size*num_iter; ++i) { + mat_vector[i] = cv::Mat(this->height_, this->width_, CV_8UC4); + label_vector[i] = i; + cv::randu(mat_vector[i], cv::Scalar::all(0), cv::Scalar::all(255)); + } + layer.AddMatVector(mat_vector, label_vector); + + // finally consume new data and check if everything is fine + for (int iter = 0; iter < num_iter; ++iter) { + int offset = new_batch_size * iter; + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(new_batch_size, this->blob_top_vec_[0]->num()); + EXPECT_EQ(new_batch_size, this->blob_top_vec_[1]->num()); + const Dtype* data = this->data_blob_->cpu_data(); + for (int i = 0; i < new_batch_size; ++i) { + EXPECT_EQ(offset + i, this->label_blob_->cpu_data()[i]); + for (int h = 0; h < this->height_; ++h) { + const unsigned char* ptr_mat = mat_vector[offset + i].ptr(h); + int index = 0; + for (int w = 0; w < this->width_; ++w) { + for (int c = 0; c < this->channels_; ++c) { + data_index = (i*count) + (c * this->height_ + h) * this->width_ + w; + Dtype pixel = static_cast(ptr_mat[index++]); + EXPECT_EQ(static_cast(pixel), data[data_index]); + } + } + } + } + } +} + +} // namespace caffe diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp new file mode 100755 index 0000000..b2db984 --- /dev/null +++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -0,0 +1,61 @@ +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class MultinomialLogisticLossLayerTest : public CPUDeviceTest { + protected: + MultinomialLogisticLossLayerTest() + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_label_(new Blob(10, 1, 1, 1)), + blob_top_loss_(new Blob()) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + PositiveUnitballFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); + for (int i = 0; i < blob_bottom_label_->count(); ++i) { + blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; + } + blob_bottom_vec_.push_back(blob_bottom_label_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~MultinomialLogisticLossLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_label_; + delete blob_top_loss_; + } + Blob* const blob_bottom_data_; + Blob* const blob_bottom_label_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(MultinomialLogisticLossLayerTest, TestDtypes); + + +TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) { + LayerParameter layer_param; + MultinomialLogisticLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 2*1e-2, 1701, 0, 0.05); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); +} + +} // namespace caffe diff --git a/src/caffe/test/test_mvn_layer.cpp b/src/caffe/test/test_mvn_layer.cpp new file mode 100755 index 0000000..933b432 --- /dev/null +++ b/src/caffe/test/test_mvn_layer.cpp @@ -0,0 +1,169 @@ +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/common_layers.hpp" +#include "caffe/filler.hpp" +#include "gtest/gtest.h" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class MVNLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: + MVNLayerTest() + : blob_bottom_(new Blob(2, 3, 4, 5)), + blob_top_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~MVNLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(MVNLayerTest, TestDtypesAndDevices); + +TYPED_TEST(MVNLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MVNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Test mean + int num = this->blob_bottom_->num(); + int channels = this->blob_bottom_->channels(); + int height = this->blob_bottom_->height(); + int width = this->blob_bottom_->width(); + + for (int i = 0; i < num; ++i) { + for (int j = 0; j < channels; ++j) { + Dtype sum = 0, var = 0; + for (int k = 0; k < height; ++k) { + for (int l = 0; l < width; ++l) { + Dtype data = this->blob_top_->data_at(i, j, k, l); + sum += data; + var += data * data; + } + } + sum /= height * width; + var /= height * width; + + const Dtype kErrorBound = 0.001; + // expect zero mean + EXPECT_NEAR(0, sum, kErrorBound); + // expect unit variance + EXPECT_NEAR(1, var, kErrorBound); + } + } +} + +TYPED_TEST(MVNLayerTest, TestForwardMeanOnly) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.ParseFromString("mvn_param{normalize_variance: false}"); + MVNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Test mean + int num = this->blob_bottom_->num(); + int channels = this->blob_bottom_->channels(); + int height = this->blob_bottom_->height(); + int width = this->blob_bottom_->width(); + + for (int i = 0; i < num; ++i) { + for (int j = 0; j < channels; ++j) { + Dtype sum = 0, var = 0; + for (int k = 0; k < height; ++k) { + for (int l = 0; l < width; ++l) { + Dtype data = this->blob_top_->data_at(i, j, k, l); + sum += data; + var += data * data; + } + } + sum /= height * width; + + const Dtype kErrorBound = 0.001; + // expect zero mean + EXPECT_NEAR(0, sum, kErrorBound); + } + } +} + +TYPED_TEST(MVNLayerTest, TestForwardAcrossChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.ParseFromString("mvn_param{across_channels: true}"); + MVNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Test mean + int num = this->blob_bottom_->num(); + int channels = this->blob_bottom_->channels(); + int height = this->blob_bottom_->height(); + int width = this->blob_bottom_->width(); + + for (int i = 0; i < num; ++i) { + Dtype sum = 0, var = 0; + for (int j = 0; j < channels; ++j) { + for (int k = 0; k < height; ++k) { + for (int l = 0; l < width; ++l) { + Dtype data = this->blob_top_->data_at(i, j, k, l); + sum += data; + var += data * data; + } + } + } + sum /= height * width * channels; + var /= height * width * channels; + + const Dtype kErrorBound = 0.001; + // expect zero mean + EXPECT_NEAR(0, sum, kErrorBound); + // expect unit variance + EXPECT_NEAR(1, var, kErrorBound); + } +} + +TYPED_TEST(MVNLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MVNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(MVNLayerTest, TestGradientMeanOnly) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.ParseFromString("mvn_param{normalize_variance: false}"); + MVNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(MVNLayerTest, TestGradientAcrossChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.ParseFromString("mvn_param{across_channels: true}"); + MVNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} // namespace caffe diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp new file mode 100755 index 0000000..12998d8 --- /dev/null +++ b/src/caffe/test/test_net.cpp @@ -0,0 +1,2368 @@ +#include +#include +#include + +#include "google/protobuf/text_format.h" + +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/net.hpp" +#include "caffe/util/math_functions.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class NetTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + NetTest() : seed_(1701) {} + + virtual void InitNetFromProtoString(const string& proto) { + NetParameter param; + CHECK(google::protobuf::TextFormat::ParseFromString(proto, ¶m)); + net_.reset(new Net(param)); + } + + virtual void CopyNetBlobs(const bool copy_diff, + vector > >* blobs_copy) { + CHECK(net_); + const vector > >& net_blobs = net_->blobs(); + blobs_copy->clear(); + blobs_copy->resize(net_blobs.size()); + const bool kReshape = true; + for (int i = 0; i < net_blobs.size(); ++i) { + (*blobs_copy)[i].reset(new Blob()); + (*blobs_copy)[i]->CopyFrom(*net_blobs[i], copy_diff, kReshape); + } + } + + virtual void CopyNetParams(const bool copy_diff, + vector > >* params_copy) { + CHECK(net_); + const vector > >& net_params = net_->params(); + params_copy->clear(); + params_copy->resize(net_params.size()); + const bool kReshape = true; + for (int i = 0; i < net_params.size(); ++i) { + (*params_copy)[i].reset(new Blob()); + (*params_copy)[i]->CopyFrom(*net_params[i], copy_diff, kReshape); + } + } + + virtual void InitTinyNet(const bool force_backward = false, + const bool accuracy_layer = false) { + string proto = + "name: 'TinyTestNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " shape { " + " dim: 5 " + " dim: 2 " + " dim: 3 " + " dim: 4 " + " } " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " shape { " + " dim: 5 " + " } " + " data_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'innerproduct' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerproduct' " + " bottom: 'label' " + " top: 'top_loss' " + "} "; + if (accuracy_layer) { + proto += + "layer { " + " name: 'loss' " + " type: 'Accuracy' " + " bottom: 'innerproduct' " + " bottom: 'label' " + " top: 'accuracy' " + "} "; + } + if (force_backward) { + proto += "force_backward: true "; + } + InitNetFromProtoString(proto); + } + + virtual void InitTinyNetEuclidean(const bool force_backward = false) { + string proto = + "name: 'TinyTestEuclidLossNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " num: 5 " + " channels: 1 " + " height: 1 " + " width: 1 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'innerproduct' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerproduct' " + " bottom: 'label' " + "} "; + if (force_backward) { + proto += "force_backward: true "; + } + InitNetFromProtoString(proto); + } + + virtual void InitTrickyNet(Dtype* loss_weight = NULL) { + ostringstream loss_weight_stream; + if (loss_weight) { + loss_weight_stream << " loss_weight: " << *loss_weight << " "; + } + const string& proto = + "name: 'TrickyTestNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " num: 5 " + " channels: 1 " + " height: 1 " + " width: 1 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'transformed_data' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'label' " + " top: 'transformed_label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + + loss_weight_stream.str() + + " bottom: 'transformed_data' " + " bottom: 'transformed_label' " + "} "; + InitNetFromProtoString(proto); + } + + // loss_weight is the loss weight for the 'EuclideanLoss' layer output. + // midnet_loss_weight is the loss weight for the first 'InnerProduct' layer + // output. Should both default to 0.0 if unspecified (i.e., if NULL is + // passed to this function). + virtual void InitUnsharedWeightsNet(const Dtype* loss_weight = NULL, + const Dtype* midnet_loss_weight = NULL, + const bool force_backward = false, const bool bias_term = false, + const Dtype blobs_lr_w1 = 1, const Dtype blobs_lr_b1 = 2, + const Dtype blobs_lr_w2 = 1, const Dtype blobs_lr_b2 = 2) { + string bias_str = bias_term ? "true ":"false "; + ostringstream proto; + proto << "name: 'UnsharedWeightsNetwork' "; + if (force_backward) { + proto << "force_backward: true "; + } + proto << + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: " << bias_str << + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { " + " name: 'unsharedweights1' " + " lr_mult: " << blobs_lr_w1 << + " } "; + if (bias_term) { + proto << " param { lr_mult: " << blobs_lr_b1 << " } "; + } + proto << + " bottom: 'data' " + " top: 'innerproduct1' "; + if (midnet_loss_weight) { + proto << " loss_weight: " << *midnet_loss_weight << " "; + } + proto << + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: " << bias_str << + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { " + " name: 'unsharedweights2' " + " lr_mult: " << blobs_lr_w2 << + " } "; + if (bias_term) { + proto << " param { lr_mult: " << blobs_lr_b2 << " } "; + } + proto << + " bottom: 'data' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' "; + if (loss_weight) { + proto << " loss_weight: " << *loss_weight << " "; + } + proto << + " bottom: 'innerproduct1' " + " bottom: 'innerproduct2' " + "} "; + InitNetFromProtoString(proto.str()); + } + + virtual void InitSharedWeightsNet() { + const string& proto = + "name: 'SharedWeightsNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { name: 'sharedweights' } " + " bottom: 'data' " + " top: 'innerproduct1' " + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { name: 'sharedweights' } " + " bottom: 'data' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerproduct1' " + " bottom: 'innerproduct2' " + "} "; + InitNetFromProtoString(proto); + } + + virtual void InitDiffDataUnsharedWeightsNet() { + const string& proto = + "name: 'DiffDataUnsharedWeightsNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 10 " + " channels: 10 " + " height: 1 " + " width: 1 " + " num: 10 " + " channels: 10 " + " height: 1 " + " width: 1 " + " data_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " top: 'data1' " + " top: 'data2' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'constant' " + " value: 0.5 " + " } " + " } " + " param { name: 'unsharedweights1' } " + " bottom: 'data1' " + " top: 'innerproduct1' " + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'constant' " + " value: 0.5 " + " } " + " } " + " param { name: 'unsharedweights2' } " + " bottom: 'innerproduct1' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'data2' " + " bottom: 'innerproduct2' " + "} "; + InitNetFromProtoString(proto); + } + + virtual void InitDiffDataSharedWeightsNet() { + const string& proto = + "name: 'DiffDataSharedWeightsNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 10 " + " channels: 10 " + " height: 1 " + " width: 1 " + " num: 10 " + " channels: 10 " + " height: 1 " + " width: 1 " + " data_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " top: 'data1' " + " top: 'data2' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'constant' " + " value: 0.5 " + " } " + " } " + " param { name: 'sharedweights' } " + " bottom: 'data1' " + " top: 'innerproduct1' " + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'constant' " + " value: 0.5 " + " } " + " } " + " param { name: 'sharedweights' } " + " bottom: 'innerproduct1' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'data2' " + " bottom: 'innerproduct2' " + "} "; + InitNetFromProtoString(proto); + } + + virtual void InitReshapableNet() { + const string& proto = + "name: 'ReshapableNetwork' " + "input: 'data' " + "input_dim: 1 " + "input_dim: 3 " + "input_dim: 100 " + "input_dim: 100 " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " bottom: 'data' " + " top: 'conv1' " + " convolution_param { " + " num_output: 5 " + " kernel_size: 3 " + " stride: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0.2 " + " } " + " } " + "} " + "layer { " + " name: 'relu1' " + " type: 'ReLU' " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layer { " + " name: 'pool1' " + " type: 'Pooling' " + " bottom: 'conv1' " + " top: 'pool1' " + " pooling_param { " + " pool: MAX " + " kernel_size: 2 " + " stride: 2 " + " } " + "} " + "layer { " + " name: 'norm1' " + " type: 'LRN' " + " bottom: 'pool1' " + " top: 'norm1' " + " lrn_param { " + " local_size: 3 " + " } " + "} " + "layer { " + " name: 'softmax' " + " type: 'Softmax' " + " bottom: 'norm1' " + " top: 'softmax' " + "} "; + InitNetFromProtoString(proto); + } + + virtual void InitSkipPropNet(bool test_skip_true) { + string proto = + "name: 'SkipPropTestNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " shape { " + " dim: 5 " + " dim: 2 " + " dim: 3 " + " dim: 4 " + " } " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " shape { " + " dim: 5 " + " } " + " data_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'silence' " + " bottom: 'label' " + " type: 'Silence' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'innerproduct' " + "} " + "layer { " + " name: 'ip_fake_labels' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " bottom: 'data' " + " top: 'fake_labels' " + "} " + "layer { " + " name: 'argmax' " + " bottom: 'fake_labels' " + " top: 'label_argmax' " + " type: 'ArgMax' " + "} " + "layer { " + " name: 'loss' " + " bottom: 'innerproduct' " + " bottom: 'label_argmax' "; + if (test_skip_true) + proto += " propagate_down: true " + " propagate_down: false "; + else + proto += " propagate_down: true " + " propagate_down: true "; + proto += + " top: 'cross_entropy_loss' " + " type: 'SigmoidCrossEntropyLoss' " + " loss_weight: 0.1 " + "} "; + InitNetFromProtoString(proto); + } + + int seed_; + shared_ptr > net_; +}; + +TYPED_TEST_CASE(NetTest, TestDtypesAndDevices); + +TYPED_TEST(NetTest, TestHasBlob) { + this->InitTinyNet(); + EXPECT_TRUE(this->net_->has_blob("data")); + EXPECT_TRUE(this->net_->has_blob("label")); + EXPECT_TRUE(this->net_->has_blob("innerproduct")); + EXPECT_FALSE(this->net_->has_blob("loss")); + EXPECT_TRUE(this->net_->has_blob("top_loss")); +} + +TYPED_TEST(NetTest, TestGetBlob) { + this->InitTinyNet(); + EXPECT_EQ(this->net_->blob_by_name("data"), this->net_->blobs()[0]); + EXPECT_EQ(this->net_->blob_by_name("label"), this->net_->blobs()[1]); + EXPECT_EQ(this->net_->blob_by_name("innerproduct"), this->net_->blobs()[2]); + EXPECT_FALSE(this->net_->blob_by_name("loss")); + EXPECT_EQ(this->net_->blob_by_name("top_loss"), this->net_->blobs()[3]); +} + +TYPED_TEST(NetTest, TestHasLayer) { + this->InitTinyNet(); + EXPECT_TRUE(this->net_->has_layer("data")); + EXPECT_TRUE(this->net_->has_layer("innerproduct")); + EXPECT_TRUE(this->net_->has_layer("loss")); + EXPECT_FALSE(this->net_->has_layer("label")); +} + +TYPED_TEST(NetTest, TestGetLayerByName) { + this->InitTinyNet(); + EXPECT_EQ(this->net_->layer_by_name("data"), this->net_->layers()[0]); + EXPECT_EQ(this->net_->layer_by_name("innerproduct"), this->net_->layers()[1]); + EXPECT_EQ(this->net_->layer_by_name("loss"), this->net_->layers()[2]); + EXPECT_FALSE(this->net_->layer_by_name("label")); +} + +TYPED_TEST(NetTest, TestBottomNeedBackward) { + this->InitTinyNet(); + const vector >& bottom_need_backward = + this->net_->bottom_need_backward(); + EXPECT_EQ(3, bottom_need_backward.size()); + EXPECT_EQ(0, bottom_need_backward[0].size()); + EXPECT_EQ(1, bottom_need_backward[1].size()); + EXPECT_EQ(false, bottom_need_backward[1][0]); + EXPECT_EQ(2, bottom_need_backward[2].size()); + EXPECT_EQ(true, bottom_need_backward[2][0]); + EXPECT_EQ(false, bottom_need_backward[2][1]); +} + +TYPED_TEST(NetTest, TestBottomNeedBackwardForce) { + const bool force_backward = true; + this->InitTinyNet(force_backward); + const vector >& bottom_need_backward = + this->net_->bottom_need_backward(); + EXPECT_EQ(3, bottom_need_backward.size()); + EXPECT_EQ(0, bottom_need_backward[0].size()); + EXPECT_EQ(1, bottom_need_backward[1].size()); + EXPECT_EQ(true, bottom_need_backward[1][0]); + EXPECT_EQ(2, bottom_need_backward[2].size()); + EXPECT_EQ(true, bottom_need_backward[2][0]); + EXPECT_EQ(false, bottom_need_backward[2][1]); +} + +TYPED_TEST(NetTest, TestBottomNeedBackwardEuclideanForce) { + const bool force_backward = true; + this->InitTinyNetEuclidean(force_backward); + const vector >& bottom_need_backward = + this->net_->bottom_need_backward(); + EXPECT_EQ(3, bottom_need_backward.size()); + EXPECT_EQ(0, bottom_need_backward[0].size()); + EXPECT_EQ(1, bottom_need_backward[1].size()); + EXPECT_EQ(true, bottom_need_backward[1][0]); + EXPECT_EQ(2, bottom_need_backward[2].size()); + EXPECT_EQ(true, bottom_need_backward[2][0]); + EXPECT_EQ(true, bottom_need_backward[2][1]); +} + +TYPED_TEST(NetTest, TestBottomNeedBackwardTricky) { + this->InitTrickyNet(); + const vector >& bottom_need_backward = + this->net_->bottom_need_backward(); + EXPECT_EQ(4, bottom_need_backward.size()); + EXPECT_EQ(0, bottom_need_backward[0].size()); + EXPECT_EQ(1, bottom_need_backward[1].size()); + EXPECT_EQ(false, bottom_need_backward[1][0]); + EXPECT_EQ(1, bottom_need_backward[2].size()); + EXPECT_EQ(false, bottom_need_backward[2][0]); + EXPECT_EQ(2, bottom_need_backward[3].size()); + EXPECT_EQ(true, bottom_need_backward[3][0]); + // The label input to the SoftmaxLossLayer should say it "needs backward" + // since it has weights under it, even though we expect this to cause a crash + // at training/test time. + EXPECT_EQ(true, bottom_need_backward[3][1]); +} + +TYPED_TEST(NetTest, TestLossWeight) { + typedef typename TypeParam::Dtype Dtype; + // First, compute the loss and gradients with no loss_weight specified. + // In this case, the loss weight for the 'EuclideanLoss' layer should default + // to 1. + vector*> bottom; + Caffe::set_random_seed(this->seed_); + const bool kForceBackward = true; + this->InitUnsharedWeightsNet(NULL, NULL, kForceBackward); + const Dtype loss = this->net_->ForwardBackward(bottom); + const bool kCopyDiff = true; + vector > > blob_grads; + this->CopyNetBlobs(kCopyDiff, &blob_grads); + vector > > param_grads; + this->CopyNetParams(kCopyDiff, ¶m_grads); + // Check that the loss is non-trivial, otherwise the test doesn't prove much. + const Dtype kMinLossAbsValue = 1e-2; + ASSERT_GE(fabs(loss), kMinLossAbsValue); + const Dtype kErrorMargin = 1e-4; + const int kNumLossWeights = 6; + Dtype kLossWeights[kNumLossWeights] = {2, 0, 1, -1, -2.5, 3.7}; + for (int i = 0; i < kNumLossWeights; ++i) { + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&kLossWeights[i], NULL, kForceBackward); + const Dtype weighted_loss = this->net_->ForwardBackward(bottom); + const Dtype error_margin = kErrorMargin * fabs(kLossWeights[i]); + EXPECT_NEAR(loss * kLossWeights[i], weighted_loss, error_margin) + << "loss weight = " << kLossWeights[i]; + const vector > >& weighted_blobs = + this->net_->blobs(); + ASSERT_EQ(blob_grads.size(), weighted_blobs.size()); + for (int j = 0; j < blob_grads.size(); ++j) { + ASSERT_EQ(blob_grads[j]->count(), weighted_blobs[j]->count()); + for (int k = 0; k < blob_grads[j]->count(); ++k) { + EXPECT_NEAR(blob_grads[j]->cpu_diff()[k] * kLossWeights[i], + weighted_blobs[j]->cpu_diff()[k], error_margin); + } + } + const vector > >& weighted_params = + this->net_->params(); + ASSERT_EQ(param_grads.size(), weighted_params.size()); + for (int j = 0; j < param_grads.size(); ++j) { + ASSERT_EQ(param_grads[j]->count(), weighted_params[j]->count()); + for (int k = 0; k < param_grads[j]->count(); ++k) { + EXPECT_NEAR(param_grads[j]->cpu_diff()[k] * kLossWeights[i], + weighted_params[j]->cpu_diff()[k], error_margin); + } + } + } +} + +TYPED_TEST(NetTest, TestLossWeightMidNet) { + typedef typename TypeParam::Dtype Dtype; + vector*> bottom; + Caffe::set_random_seed(this->seed_); + const bool kForceBackward = true; + Dtype loss_weight = 0; + Dtype midnet_loss_weight = 1; + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss = this->net_->ForwardBackward(bottom); + const bool kCopyDiff = true; + const bool kReshape = true; + Blob data_grad; + data_grad.CopyFrom(*this->net_->blob_by_name("data"), kCopyDiff, kReshape); + // Check that the loss is non-trivial, otherwise the test doesn't prove much. + const Dtype kMinLossAbsValue = 1e-2; + ASSERT_GE(fabs(loss), kMinLossAbsValue); + const Dtype kErrorMargin = 1e-4; + const int kNumLossWeights = 6; + Dtype kLossWeights[kNumLossWeights] = {2, 0, 1, -1, -2.5, 3.7}; + for (int i = 0; i < kNumLossWeights; ++i) { + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &kLossWeights[i], + kForceBackward); + const Dtype weighted_loss = this->net_->ForwardBackward(bottom); + const Dtype error_margin = kErrorMargin * fabs(kLossWeights[i]); + EXPECT_NEAR(loss * kLossWeights[i], weighted_loss, error_margin) + << "loss weight = " << kLossWeights[i]; + const shared_ptr >& weighted_blob = + this->net_->blob_by_name("data"); + ASSERT_EQ(data_grad.count(), weighted_blob->count()); + for (int j = 0; j < data_grad.count(); ++j) { + EXPECT_NEAR(data_grad.cpu_diff()[j] * kLossWeights[i], + weighted_blob->cpu_diff()[j], error_margin); + } + } +} + +TYPED_TEST(NetTest, TestComboLossWeight) { + typedef typename TypeParam::Dtype Dtype; + vector*> bottom; + Dtype loss_weight; + Dtype midnet_loss_weight; + const bool kForceBackward = true; + const Dtype kErrorMargin = 1e-4; + + // Get the loss and gradients with 'EuclideanLoss' weight 1, + // 'InnerProduct' weight 1. + loss_weight = 1; + midnet_loss_weight = 1; + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss = this->net_->ForwardBackward(bottom); + const bool kCopyDiff = true; + vector > > blob_grads; + this->CopyNetBlobs(kCopyDiff, &blob_grads); + vector > > param_grads; + this->CopyNetParams(kCopyDiff, ¶m_grads); + + loss_weight = 2; + midnet_loss_weight = 1; + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss_main_2 = this->net_->ForwardBackward(bottom); + vector > > blob_grads_loss_2; + this->CopyNetBlobs(kCopyDiff, &blob_grads_loss_2); + vector > > param_grads_loss_2; + this->CopyNetParams(kCopyDiff, ¶m_grads_loss_2); + + loss_weight = 3; + midnet_loss_weight = 1; + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss_main_3 = this->net_->ForwardBackward(bottom); + const vector > >& blob_grads_loss_3 = + this->net_->blobs(); + ASSERT_EQ(blob_grads.size(), blob_grads_loss_3.size()); + ASSERT_EQ(blob_grads_loss_2.size(), blob_grads_loss_3.size()); + for (int j = 0; j < blob_grads.size(); ++j) { + const string& blob_name = this->net_->blob_names()[j]; + bool grad_should_change = true; + if (blob_name == "innerproduct1_innerproduct1_0_split_0") { + grad_should_change = false; + } + ASSERT_EQ(blob_grads[j]->count(), blob_grads_loss_3[j]->count()); + ASSERT_EQ(blob_grads_loss_2[j]->count(), blob_grads_loss_3[j]->count()); + for (int k = 0; k < blob_grads[j]->count(); ++k) { + const Dtype grad_diff_2 = blob_grads_loss_2[j]->cpu_diff()[k] - + blob_grads[j]->cpu_diff()[k]; + const Dtype grad_diff_3 = blob_grads_loss_3[j]->cpu_diff()[k] - + blob_grads[j]->cpu_diff()[k]; + if (grad_should_change) { + // Test non-triviality. + const Dtype kMinGradDiffAbsValue = 1e-4; + EXPECT_GT(fabs(grad_diff_2), kMinGradDiffAbsValue) << blob_name; + EXPECT_NEAR(2 * grad_diff_2, grad_diff_3, kErrorMargin) << blob_name; + } else { + EXPECT_EQ(0, grad_diff_2) << blob_name; + EXPECT_EQ(0, grad_diff_3) << blob_name; + } + } + } + + loss_weight = 1; + midnet_loss_weight = 2; + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss_midnet_2 = this->net_->ForwardBackward(bottom); + this->CopyNetBlobs(kCopyDiff, &blob_grads_loss_2); + this->CopyNetParams(kCopyDiff, ¶m_grads_loss_2); + + loss_weight = 1; + midnet_loss_weight = 3; + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss_midnet_3 = this->net_->ForwardBackward(bottom); + const vector > >& blob_grads_midnet_loss_3 = + this->net_->blobs(); + ASSERT_EQ(blob_grads.size(), blob_grads_midnet_loss_3.size()); + ASSERT_EQ(blob_grads_loss_2.size(), blob_grads_midnet_loss_3.size()); + const vector& blob_names = this->net_->blob_names(); + for (int j = 0; j < blob_grads.size(); ++j) { + const string& blob_name = blob_names[j]; + bool grad_should_change = false; + if (blob_name == "innerproduct1" || + blob_name == "innerproduct1_innerproduct1_0_split_0" || + blob_name == "data_data_0_split_0" || blob_name == "data") { + grad_should_change = true; + } + ASSERT_EQ(blob_grads[j]->count(), blob_grads_midnet_loss_3[j]->count()); + ASSERT_EQ(blob_grads[j]->count(), blob_grads_loss_2[j]->count()); + for (int k = 0; k < blob_grads[j]->count(); ++k) { + const Dtype grad_diff_2 = blob_grads_loss_2[j]->cpu_diff()[k] - + blob_grads[j]->cpu_diff()[k]; + const Dtype grad_diff_3 = blob_grads_midnet_loss_3[j]->cpu_diff()[k] - + blob_grads[j]->cpu_diff()[k]; + if (grad_should_change) { + // Test non-triviality. + const Dtype kMinGradDiffAbsValue = 1e-4; + EXPECT_GT(fabs(grad_diff_2), kMinGradDiffAbsValue) << blob_name; + EXPECT_NEAR(2 * grad_diff_2, grad_diff_3, kErrorMargin) << blob_name; + } else { + EXPECT_EQ(0, grad_diff_2) << blob_name; + EXPECT_EQ(0, grad_diff_3) << blob_name; + } + } + } + + const Dtype kMinLossDiffAbsValue = 1e-4; + + Dtype loss_diff_2 = loss_main_2 - loss; + // Test non-triviality. + EXPECT_GT(fabs(loss_diff_2), kMinLossDiffAbsValue); + Dtype loss_diff_3 = loss_main_3 - loss; + EXPECT_NEAR(2 * loss_diff_2, loss_diff_3, kErrorMargin); + + loss_diff_2 = loss_midnet_2 - loss; + // Test non-triviality. + EXPECT_GT(fabs(loss_diff_2), kMinLossDiffAbsValue); + loss_diff_3 = loss_midnet_3 - loss; + EXPECT_NEAR(2 * loss_diff_2, loss_diff_3, kErrorMargin); +} + +TYPED_TEST(NetTest, TestBackwardWithAccuracyLayer) { + typedef typename TypeParam::Dtype Dtype; + const bool kForceBackward = false; + const bool kAccuracyLayer = true; + this->InitTinyNet(kForceBackward, kAccuracyLayer); + EXPECT_TRUE(this->net_->has_blob("accuracy")); + vector*> bottom; + // Test that we can do Backward even though we have an 'Accuracy' layer. + this->net_->ForwardBackward(bottom); +} + +TYPED_TEST(NetTest, TestUnsharedWeightsDataNet) { + typedef typename TypeParam::Dtype Dtype; + this->InitUnsharedWeightsNet(); + vector*> bottom; + Dtype loss; + this->net_->Forward(bottom, &loss); + EXPECT_GT(loss, 0); +} + +TYPED_TEST(NetTest, TestSharedWeightsDataNet) { + typedef typename TypeParam::Dtype Dtype; + this->InitSharedWeightsNet(); + vector*> bottom; + Dtype loss; + this->net_->Forward(bottom, &loss); + EXPECT_FLOAT_EQ(loss, 0); +} + +TYPED_TEST(NetTest, TestUnsharedWeightsDiffNet) { + typedef typename TypeParam::Dtype Dtype; + this->InitUnsharedWeightsNet(); + vector*> bottom; + Net* net = this->net_.get(); + net->Forward(bottom); + net->Backward(); + Layer* ip1_layer = net->layer_by_name("innerproduct1").get(); + Layer* ip2_layer = net->layer_by_name("innerproduct2").get(); + const int count = ip1_layer->blobs()[0]->count(); + const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff(); + const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff(); + for (int i = 0; i < count; ++i) { + EXPECT_GT(fabs(grad1[i]), 0); + EXPECT_FLOAT_EQ(-1 * grad1[i], grad2[i]); + } +} + +TYPED_TEST(NetTest, TestSharedWeightsDiffNet) { + typedef typename TypeParam::Dtype Dtype; + this->InitSharedWeightsNet(); + vector*> bottom; + Net* net = this->net_.get(); + Dtype loss; + net->Forward(bottom, &loss); + net->Backward(); + EXPECT_FLOAT_EQ(loss, 0); + Layer* ip1_layer = net->layer_by_name("innerproduct1").get(); + Layer* ip2_layer = net->layer_by_name("innerproduct2").get(); + const int count = ip1_layer->blobs()[0]->count(); + const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff(); + const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff(); + for (int i = 0; i < count; ++i) { + EXPECT_FLOAT_EQ(0, grad1[i]); + EXPECT_FLOAT_EQ(0, grad2[i]); + } +} + +TYPED_TEST(NetTest, TestSharedWeightsUpdate) { + typedef typename TypeParam::Dtype Dtype; + Caffe::set_random_seed(this->seed_); + this->InitDiffDataSharedWeightsNet(); + vector*> bottom; + EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); + EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); + Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); + Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); + // Check that data and diff blobs of shared weights share the same memory + // locations. + EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + this->net_->Forward(bottom); + this->net_->Backward(); + // Compute the expected update as the data minus the two diffs. + Blob shared_params; + const bool reshape = true; + const bool copy_diff = false; + shared_params.CopyFrom(*ip1_weights, copy_diff, reshape); + shared_params.CopyFrom(*ip1_weights, !copy_diff, reshape); + const int count = ip1_weights->count(); + // Make sure the diffs are non-trivial. + for (int i = 0; i < count; ++i) { + EXPECT_NE(0, ip1_weights->cpu_diff()[i]); + } + caffe_axpy(count, Dtype(-1), shared_params.cpu_diff(), + shared_params.mutable_cpu_data()); + const Dtype* expected_updated_params = shared_params.cpu_data(); + this->net_->Update(); + const Dtype* actual_updated_params = ip1_weights->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(expected_updated_params[i], actual_updated_params[i]); + } + // Check that data blobs of shared weights STILL point to the same memory + // location (because ... who knows). + EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + + Caffe::set_random_seed(this->seed_); + this->InitDiffDataUnsharedWeightsNet(); + EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); + EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); + ip1_weights = this->net_->layers()[1]->blobs()[0].get(); + ip2_weights = this->net_->layers()[2]->blobs()[0].get(); + // Check that data and diff blobs of unshared weights are at different + // locations in memory. + EXPECT_NE(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + this->net_->Forward(bottom); + this->net_->Backward(); + // Compute the expected update. + Blob unshared_params1; + unshared_params1.CopyFrom(*ip1_weights, copy_diff, reshape); + unshared_params1.CopyFrom(*ip1_weights, !copy_diff, reshape); + Blob unshared_params2; + unshared_params2.CopyFrom(*ip2_weights, copy_diff, reshape); + unshared_params2.CopyFrom(*ip2_weights, !copy_diff, reshape); + // Make sure the diffs are non-trivial and sum to the diff in the shared net. + for (int i = 0; i < count; ++i) { + EXPECT_NE(0, ip1_weights->cpu_diff()[i]); + EXPECT_NE(0, ip2_weights->cpu_diff()[i]); + EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); + EXPECT_FLOAT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i], + shared_params.cpu_diff()[i]); + } + caffe_axpy(count, Dtype(-1), ip1_weights->cpu_diff(), + unshared_params1.mutable_cpu_data()); + caffe_axpy(count, Dtype(-1), ip2_weights->cpu_diff(), + unshared_params2.mutable_cpu_data()); + const Dtype* expected_updated_params1 = unshared_params1.cpu_data(); + const Dtype* expected_updated_params2 = unshared_params2.cpu_data(); + this->net_->Update(); + const Dtype* actual_updated_params1 = ip1_weights->cpu_data(); + const Dtype* actual_updated_params2 = ip2_weights->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(expected_updated_params1[i], actual_updated_params1[i]); + EXPECT_EQ(expected_updated_params2[i], actual_updated_params2[i]); + EXPECT_NE(actual_updated_params1[i], actual_updated_params2[i]); + EXPECT_NE(expected_updated_params, expected_updated_params1); + } +} + +TYPED_TEST(NetTest, TestSharedWeightsResume) { + typedef typename TypeParam::Dtype Dtype; + + // Create a net with weight sharing; Update it once. + Caffe::set_random_seed(this->seed_); + this->InitDiffDataSharedWeightsNet(); + vector*> bottom; + EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); + EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); + Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); + Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); + // Check that data and diff blobs of shared weights share the same memory + // locations. + EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + this->net_->ForwardBackward(bottom); + this->net_->Update(); + Blob shared_params; + const bool kReshape = true; + const bool kCopyDiff = false; + shared_params.CopyFrom(*ip1_weights, kCopyDiff, kReshape); + const int count = ip1_weights->count(); + + // Write the net to a NetParameter, as in Solver::Snapshot. + NetParameter net_param; + this->net_->ToProto(&net_param); + + // Reinitialize the net and copy parameters from net_param, as in + // Solver::Restore. + Caffe::set_random_seed(this->seed_); + this->InitDiffDataSharedWeightsNet(); + this->net_->CopyTrainedLayersFrom(net_param); + ip1_weights = this->net_->layers()[1]->blobs()[0].get(); + ip2_weights = this->net_->layers()[2]->blobs()[0].get(); + ASSERT_FALSE(NULL == ip1_weights); + ASSERT_FALSE(NULL == ip2_weights); + EXPECT_NE(ip1_weights, ip2_weights); + // Check that data and diff blobs of shared weights share the same memory + // locations. + EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + for (int i = 0; i < count; ++i) { + EXPECT_FLOAT_EQ(shared_params.cpu_data()[i], ip1_weights->cpu_data()[i]); + } +} + +TYPED_TEST(NetTest, TestParamPropagateDown) { + typedef typename TypeParam::Dtype Dtype; + vector*> bottom; + const bool kBiasTerm = true, kForceBackward = false; + const Dtype* kLossWeight1 = NULL; + const Dtype* kLossWeight2 = NULL; + + // Run the net with all params learned; check that gradients are non-zero. + Caffe::set_random_seed(this->seed_); + Dtype blobs_lr_w1 = 1, blobs_lr_w2 = 1, blobs_lr_b1 = 2, blobs_lr_b2 = 2; + this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, + kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); + this->net_->Forward(bottom); + this->net_->Backward(); + const vector > >& params = this->net_->params(); + const int num_params = params.size(); + ASSERT_EQ(4, num_params); + const Dtype kNonZeroTestMin = 1e-3; + vector param_asums(params.size()); + for (int i = 0; i < num_params; ++i) { + const Dtype param_asum = + caffe_cpu_asum(params[i]->count(), params[i]->cpu_diff()); + param_asums[i] = param_asum; + EXPECT_GT(param_asum, kNonZeroTestMin); + } + + // Change the learning rates to different non-zero values; should see same + // gradients. + Caffe::set_random_seed(this->seed_); + blobs_lr_w1 *= 2, blobs_lr_w2 *= 2, blobs_lr_b1 *= 2, blobs_lr_b2 *= 2; + this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, + kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); + this->net_->Forward(bottom); + this->net_->Backward(); + const vector > >& params2 = this->net_->params(); + ASSERT_EQ(num_params, params2.size()); + for (int i = 0; i < num_params; ++i) { + const Dtype param_asum = + caffe_cpu_asum(params2[i]->count(), params2[i]->cpu_diff()); + EXPECT_FLOAT_EQ(param_asum, param_asums[i]); + } + + // Change a subset of the learning rates to zero; check that we see zero + // gradients for those. + Caffe::set_random_seed(this->seed_); + blobs_lr_w1 = 1, blobs_lr_w2 = 0, blobs_lr_b1 = 0, blobs_lr_b2 = 1; + this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, + kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); + this->net_->Forward(bottom); + this->net_->Backward(); + const vector > >& params3 = this->net_->params(); + ASSERT_EQ(num_params, params3.size()); + for (int i = 0; i < num_params; ++i) { + const Dtype param_asum = + caffe_cpu_asum(params3[i]->count(), params3[i]->cpu_diff()); + if (i == 1 || i == 2) { + EXPECT_FLOAT_EQ(0, param_asum); + } else { + EXPECT_FLOAT_EQ(param_asum, param_asums[i]); + } + } + + // Change the opposite subset of the learning rates to zero. + Caffe::set_random_seed(this->seed_); + blobs_lr_w1 = 0, blobs_lr_w2 = 1, blobs_lr_b1 = 1, blobs_lr_b2 = 0; + this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, + kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); + this->net_->Forward(bottom); + this->net_->Backward(); + const vector > >& params4 = this->net_->params(); + ASSERT_EQ(num_params, params4.size()); + for (int i = 0; i < num_params; ++i) { + const Dtype param_asum = + caffe_cpu_asum(params4[i]->count(), params4[i]->cpu_diff()); + if (i == 0 || i == 3) { + EXPECT_FLOAT_EQ(0, param_asum); + } else { + EXPECT_FLOAT_EQ(param_asum, param_asums[i]); + } + } +} + +TYPED_TEST(NetTest, TestFromTo) { + typedef typename TypeParam::Dtype Dtype; + this->InitTinyNet(); + + // Run Forward and Backward, recording the data diff and loss. + Blob data; + data.ReshapeLike(*this->net_->blob_by_name("data")); + this->net_->ForwardPrefilled(); + this->net_->Backward(); + data.CopyFrom(*this->net_->blob_by_name("data"), true, true); + const Dtype *loss_ptr = this->net_->output_blobs()[0]->cpu_data(); + Dtype loss = *loss_ptr; + + // Check that combining partial Forwards gives the same loss. + for (int i = 1; i < this->net_->layers().size(); ++i) { + // Note that we skip layer zero to keep the same data. + this->net_->ForwardFromTo(1, 1); + if (i < this->net_->layers().size() - 1) { + this->net_->ForwardFrom(i + 1); + } + EXPECT_EQ(loss, *loss_ptr); + } + + // Check that combining partial Backwards gives the same data diff. + for (int i = 1; i < this->net_->layers().size(); ++i) { + this->net_->BackwardTo(i); + this->net_->BackwardFrom(i - 1); + for (int j = 0; j < data.count(); ++j) { + EXPECT_EQ(data.cpu_diff()[j], + this->net_->blob_by_name("data")->cpu_diff()[j]); + } + } +} + +class FilterNetTest : public ::testing::Test { + protected: + void RunFilterNetTest( + const string& input_param_string, const string& filtered_param_string) { + NetParameter input_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + input_param_string, &input_param)); + NetParameter expected_filtered_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + filtered_param_string, &expected_filtered_param)); + NetParameter actual_filtered_param; + Net::FilterNet(input_param, &actual_filtered_param); + EXPECT_EQ(expected_filtered_param.DebugString(), + actual_filtered_param.DebugString()); + // Also test idempotence. + NetParameter double_filtered_param; + Net::FilterNet(actual_filtered_param, &double_filtered_param); + EXPECT_EQ(actual_filtered_param.DebugString(), + double_filtered_param.DebugString()); + } +}; + +TEST_F(FilterNetTest, TestNoFilter) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterLeNetTrainTest) { + const string& input_proto = + "name: 'LeNet' " + "layer { " + " name: 'mnist' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + " data_param { " + " source: 'mnist-train-leveldb' " + " batch_size: 64 " + " } " + " transform_param { " + " scale: 0.00390625 " + " } " + " include: { phase: TRAIN } " + "} " + "layer { " + " name: 'mnist' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + " data_param { " + " source: 'mnist-test-leveldb' " + " batch_size: 100 " + " } " + " transform_param { " + " scale: 0.00390625 " + " } " + " include: { phase: TEST } " + "} " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " bottom: 'data' " + " top: 'conv1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " convolution_param { " + " num_output: 20 " + " kernel_size: 5 " + " stride: 1 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'ip1' " + " type: 'InnerProduct' " + " bottom: 'conv1' " + " top: 'ip1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " inner_product_param { " + " num_output: 10 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'accuracy' " + " type: 'Accuracy' " + " bottom: 'ip1' " + " bottom: 'label' " + " top: 'accuracy' " + " include: { phase: TEST } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'ip2' " + " bottom: 'label' " + " top: 'loss' " + "} "; + const string input_proto_train = "state: { phase: TRAIN } " + input_proto; + const string input_proto_test = "state: { phase: TEST } " + input_proto; + const string output_proto_train = + "name: 'LeNet' " + "layer { " + " name: 'mnist' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + " data_param { " + " source: 'mnist-train-leveldb' " + " batch_size: 64 " + " } " + " transform_param { " + " scale: 0.00390625 " + " } " + " include: { phase: TRAIN } " + "} " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " bottom: 'data' " + " top: 'conv1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " convolution_param { " + " num_output: 20 " + " kernel_size: 5 " + " stride: 1 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'ip1' " + " type: 'InnerProduct' " + " bottom: 'conv1' " + " top: 'ip1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " inner_product_param { " + " num_output: 10 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'ip2' " + " bottom: 'label' " + " top: 'loss' " + "} "; + const string& output_proto_test = + "name: 'LeNet' " + "layer { " + " name: 'mnist' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + " data_param { " + " source: 'mnist-test-leveldb' " + " batch_size: 100 " + " } " + " transform_param { " + " scale: 0.00390625 " + " } " + " include: { phase: TEST } " + "} " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " bottom: 'data' " + " top: 'conv1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " convolution_param { " + " num_output: 20 " + " kernel_size: 5 " + " stride: 1 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'ip1' " + " type: 'InnerProduct' " + " bottom: 'conv1' " + " top: 'ip1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " inner_product_param { " + " num_output: 10 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'accuracy' " + " type: 'Accuracy' " + " bottom: 'ip1' " + " bottom: 'label' " + " top: 'accuracy' " + " include: { phase: TEST } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'ip2' " + " bottom: 'label' " + " top: 'loss' " + "} "; + const string output_proto_train_explicit = + output_proto_train + " state: { phase: TRAIN } "; + const string output_proto_test_explicit = + output_proto_test + " state: { phase: TEST } "; + this->RunFilterNetTest(input_proto_train, output_proto_train_explicit); + this->RunFilterNetTest(input_proto_test, output_proto_test_explicit); +} + +TEST_F(FilterNetTest, TestFilterOutByStage) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + " include: { stage: 'mystage' } " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterOutByStage2) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { stage: 'mystage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterInByStage) { + const string& input_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { stage: 'mystage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByStage2) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " exclude: { stage: 'mystage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterOutByMultipleStage) { + const string& input_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { stage: 'mystage' stage: 'myotherstage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { stage: 'mystage' } " + "} "; + const string& output_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { stage: 'mystage' } " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMultipleStage) { + const string& input_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { stage: 'myotherstage' } " + " include: { stage: 'mystage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { stage: 'mystage' } " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMultipleStage2) { + const string& input_proto = + "state: { stage: 'mystage' stage: 'myotherstage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { stage: 'mystage' stage: 'myotherstage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { stage: 'mystage' } " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByNotStage) { + const string& input_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { not_stage: 'myotherstage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { not_stage: 'myotherstage' } " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterOutByNotStage) { + const string& input_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { not_stage: 'mystage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { not_stage: 'mystage' } " + "} "; + const string& output_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterOutByMinLevel) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 3 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterOutByMaxLevel) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { max_level: -3 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMinLevel) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 0 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMinLevel2) { + const string& input_proto = + "state: { level: 7 } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 3 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMaxLevel) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { max_level: 0 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMaxLevel2) { + const string& input_proto = + "state: { level: -7 } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { max_level: -3 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInOutByIncludeMultiRule) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 2 phase: TRAIN } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { min_level: 2 phase: TEST } " + "} "; + const string& input_proto_train = + "state: { level: 4 phase: TRAIN } " + input_proto; + const string& input_proto_test = + "state: { level: 4 phase: TEST } " + input_proto; + const string& output_proto_train = + "state: { level: 4 phase: TRAIN } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 2 phase: TRAIN } " + "} "; + const string& output_proto_test = + "state: { level: 4 phase: TEST } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { min_level: 2 phase: TEST } " + "} "; + this->RunFilterNetTest(input_proto_train, output_proto_train); + this->RunFilterNetTest(input_proto_test, output_proto_test); +} + +TEST_F(FilterNetTest, TestFilterInByIncludeMultiRule) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 2 phase: TRAIN } " + " include: { phase: TEST } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { min_level: 2 phase: TEST } " + " include: { phase: TRAIN } " + "} "; + const string& input_proto_train = + "state: { level: 2 phase: TRAIN } " + input_proto; + const string& input_proto_test = + "state: { level: 2 phase: TEST } " + input_proto; + this->RunFilterNetTest(input_proto_train, input_proto_train); + this->RunFilterNetTest(input_proto_test, input_proto_test); +} + +TEST_F(FilterNetTest, TestFilterInOutByExcludeMultiRule) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " exclude: { min_level: 2 phase: TRAIN } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " exclude: { min_level: 2 phase: TEST } " + "} "; + const string& input_proto_train = + "state: { level: 4 phase: TRAIN } " + input_proto; + const string& input_proto_test = + "state: { level: 4 phase: TEST } " + input_proto; + const string& output_proto_train = + "state: { level: 4 phase: TRAIN } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " exclude: { min_level: 2 phase: TEST } " + "} "; + const string& output_proto_test = + "state: { level: 4 phase: TEST } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " exclude: { min_level: 2 phase: TRAIN } " + "} "; + this->RunFilterNetTest(input_proto_train, output_proto_train); + this->RunFilterNetTest(input_proto_test, output_proto_test); +} + +TYPED_TEST(NetTest, TestReshape) { + typedef typename TypeParam::Dtype Dtype; + // We set up bottom blobs of two different sizes, switch between + // them, and check that forward and backward both run and the results + // are the same. + Caffe::set_random_seed(this->seed_); + Caffe::set_mode(Caffe::CPU); + FillerParameter filler_param; + filler_param.set_std(1); + GaussianFiller filler(filler_param); + Blob blob1(4, 3, 9, 11); + Blob blob2(2, 3, 12, 10); + filler.Fill(&blob1); + filler.Fill(&blob2); + + this->InitReshapableNet(); + Blob* input_blob = this->net_->input_blobs()[0]; + Blob* output_blob = this->net_->output_blobs()[0]; + input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), + blob1.width()); + caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->ForwardPrefilled(); + // call backward just to make sure it runs + this->net_->Backward(); + Blob output1(output_blob->num(), output_blob->channels(), + output_blob->height(), output_blob->width()); + caffe_copy(output1.count(), output_blob->cpu_data(), + output1.mutable_cpu_data()); + + input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), + blob2.width()); + caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->ForwardPrefilled(); + this->net_->Backward(); + Blob output2(output_blob->num(), output_blob->channels(), + output_blob->height(), output_blob->width()); + caffe_copy(output2.count(), output_blob->cpu_data(), + output2.mutable_cpu_data()); + + input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), + blob1.width()); + caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->ForwardPrefilled(); + this->net_->Backward(); + for (int i = 0; i < output1.count(); ++i) { + CHECK_EQ(*(output1.cpu_data() + i), *(output_blob->cpu_data() + i)); + } + + input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), + blob2.width()); + caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->ForwardPrefilled(); + this->net_->Backward(); + for (int i = 0; i < output2.count(); ++i) { + CHECK_EQ(*(output2.cpu_data() + i), *(output_blob->cpu_data() + i)); + } +} + +TYPED_TEST(NetTest, TestSkipPropagateDown) { + // check bottom_need_backward if propagate_down is true + this->InitSkipPropNet(false); + vector vec_layer_need_backward = this->net_->layer_need_backward(); + for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + string layer_name = this->net_->layer_names()[layer_id]; + if (layer_name == "loss") { + // access to bottom_need_backward coresponding to label's blob + bool need_back = this->net_->bottom_need_backward()[layer_id][1]; + // if propagate_down is true, the loss layer will try to + // backpropagate on labels + EXPECT_TRUE(need_back) << "bottom_need_backward should be True"; + } + // layer_need_backward should be True except for data and silence layers + if (layer_name.find("data") != std::string::npos || + layer_name == "silence") { + EXPECT_FALSE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be False"; + } else { + EXPECT_TRUE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be True"; + } + } + // check bottom_need_backward if propagat_down is false + this->InitSkipPropNet(true); + vec_layer_need_backward.clear(); + vec_layer_need_backward = this->net_->layer_need_backward(); + for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + string layer_name = this->net_->layer_names()[layer_id]; + if (layer_name == "loss") { + // access to bottom_need_backward coresponding to label's blob + bool need_back = this->net_->bottom_need_backward()[layer_id][1]; + // if propagate_down is false, the loss layer will not try to + // backpropagate on labels + EXPECT_FALSE(need_back) << "bottom_need_backward should be False"; + } + // layer_need_backward should be False except for innerproduct and + // loss layers + if (layer_name == "innerproduct" || layer_name == "loss") { + EXPECT_TRUE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be True"; + } else { + EXPECT_FALSE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be False"; + } + } +} + +} // namespace caffe diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp new file mode 100755 index 0000000..c6e4d27 --- /dev/null +++ b/src/caffe/test/test_neuron_layer.cpp @@ -0,0 +1,842 @@ +#include +#include +#include + +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class NeuronLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + NeuronLayerTest() + : blob_bottom_(new Blob(2, 3, 4, 5)), + blob_top_(new Blob()) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~NeuronLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + + void TestDropoutForward(const float dropout_ratio) { + LayerParameter layer_param; + // Fill in the given dropout_ratio, unless it's 0.5, in which case we don't + // set it explicitly to test that 0.5 is the default. + if (dropout_ratio != 0.5) { + layer_param.mutable_dropout_param()->set_dropout_ratio(dropout_ratio); + } + DropoutLayer layer(layer_param); + layer_param.set_phase(TRAIN); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio()); + const int count = this->blob_bottom_->count(); + // Initialize num_kept to count the number of inputs NOT dropped out. + int num_kept = 0; + for (int i = 0; i < count; ++i) { + if (top_data[i] != 0) { + ++num_kept; + EXPECT_EQ(top_data[i], bottom_data[i] * scale); + } + } + const Dtype std_error = sqrt(dropout_ratio * (1 - dropout_ratio) / count); + // Fail if the number dropped was more than 1.96 * std_error away from the + // expected number -- requires 95% confidence that the dropout layer is not + // obeying the given dropout_ratio for test failure. + const Dtype empirical_dropout_ratio = 1 - num_kept / Dtype(count); + EXPECT_NEAR(empirical_dropout_ratio, dropout_ratio, 1.96 * std_error); + } + + void TestExpForward(const float base, const float scale, const float shift) { + LayerParameter layer_param; + layer_param.mutable_exp_param()->set_base(base); + layer_param.mutable_exp_param()->set_scale(scale); + layer_param.mutable_exp_param()->set_shift(shift); + ExpLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + layer.Forward(blob_bottom_vec_, blob_top_vec_); + const Dtype kDelta = 2e-4; + const Dtype* bottom_data = blob_bottom_->cpu_data(); + const Dtype* top_data = blob_top_->cpu_data(); + for (int i = 0; i < blob_bottom_->count(); ++i) { + const Dtype bottom_val = bottom_data[i]; + const Dtype top_val = top_data[i]; + if (base == -1) { + EXPECT_NEAR(top_val, exp(shift + scale * bottom_val), kDelta); + } else { + EXPECT_NEAR(top_val, pow(base, shift + scale * bottom_val), kDelta); + } + } + } + + void TestExpGradient(const float base, const float scale, const float shift) { + LayerParameter layer_param; + layer_param.mutable_exp_param()->set_base(base); + layer_param.mutable_exp_param()->set_scale(scale); + layer_param.mutable_exp_param()->set_shift(shift); + ExpLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, blob_bottom_vec_, blob_top_vec_); + } + + void TestPReLU(PReLULayer *layer) { + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + const Dtype* slope_data = layer->blobs()[0]->cpu_data(); + int hw = this->blob_bottom_->height() * this->blob_bottom_->width(); + int channels = this->blob_bottom_->channels(); + bool channel_shared = layer->layer_param().prelu_param().channel_shared(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + int c = channel_shared ? 0 : (i / hw) % channels; + EXPECT_EQ(top_data[i], + std::max(bottom_data[i], (Dtype)(0)) + + slope_data[c] * std::min(bottom_data[i], (Dtype)(0))); + } + } + + void LogBottomInit() { + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + Dtype* bottom_data = this->blob_bottom_->mutable_cpu_data(); + caffe_exp(this->blob_bottom_->count(), bottom_data, bottom_data); + } + + void TestLogForward(const float base, const float scale, const float shift) { + LogBottomInit(); + LayerParameter layer_param; + layer_param.mutable_log_param()->set_base(base); + layer_param.mutable_log_param()->set_scale(scale); + layer_param.mutable_log_param()->set_shift(shift); + LogLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + layer.Forward(blob_bottom_vec_, blob_top_vec_); + const Dtype kDelta = 2e-4; + const Dtype* bottom_data = blob_bottom_->cpu_data(); + const Dtype* top_data = blob_top_->cpu_data(); + for (int i = 0; i < blob_bottom_->count(); ++i) { + const Dtype bottom_val = bottom_data[i]; + const Dtype top_val = top_data[i]; + if (base == -1) { + EXPECT_NEAR(top_val, log(shift + scale * bottom_val), kDelta); + } else { + EXPECT_NEAR(top_val, log(shift + scale * bottom_val) / log(base), + kDelta); + } + } + } + + void TestLogGradient(const float base, const float scale, const float shift) { + LogBottomInit(); + LayerParameter layer_param; + layer_param.mutable_log_param()->set_base(base); + layer_param.mutable_log_param()->set_scale(scale); + layer_param.mutable_log_param()->set_shift(shift); + LogLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientEltwise(&layer, blob_bottom_vec_, blob_top_vec_); + } +}; + +TYPED_TEST_CASE(NeuronLayerTest, TestDtypesAndDevices); + +TYPED_TEST(NeuronLayerTest, TestAbsVal) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + AbsValLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + const int count = this->blob_bottom_->count(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(top_data[i], fabs(bottom_data[i])); + } +} + +TYPED_TEST(NeuronLayerTest, TestAbsGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + AbsValLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NeuronLayerTest, TestReLU) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]); + } +} + +TYPED_TEST(NeuronLayerTest, TestReLUGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ReLULayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NeuronLayerTest, TestReLUWithNegativeSlope) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + "relu_param { negative_slope: 0.01 }", &layer_param)); + ReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + if (top_data[i] >= 0) { + EXPECT_FLOAT_EQ(top_data[i], bottom_data[i]); + } else { + EXPECT_FLOAT_EQ(top_data[i], bottom_data[i] * 0.01); + } + } +} + +TYPED_TEST(NeuronLayerTest, TestReLUGradientWithNegativeSlope) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + "relu_param { negative_slope: 0.01 }", &layer_param)); + ReLULayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NeuronLayerTest, TestSigmoid) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + SigmoidLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i]))); + // check that we squashed the value between 0 and 1 + EXPECT_GE(top_data[i], 0.); + EXPECT_LE(top_data[i], 1.); + } +} + +TYPED_TEST(NeuronLayerTest, TestSigmoidGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + SigmoidLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NeuronLayerTest, TestTanH) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TanHLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Test exact values + for (int i = 0; i < this->blob_bottom_->num(); ++i) { + for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + for (int k = 0; k < this->blob_bottom_->height(); ++k) { + for (int l = 0; l < this->blob_bottom_->width(); ++l) { + EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); + EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4, + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); + } + } + } + } +} + +TYPED_TEST(NeuronLayerTest, TestTanHGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TanHLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NeuronLayerTest, TestExpLayer) { + typedef typename TypeParam::Dtype Dtype; + // Test default base of "-1" -- should actually set base := e. + const Dtype kBase = -1; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestExpForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestExpGradient) { + typedef typename TypeParam::Dtype Dtype; + // Test default base of "-1" -- should actually set base := e. + const Dtype kBase = -1; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestExpGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestExpLayerBase2) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestExpForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestExpGradientBase2) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestExpGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestExpLayerBase2Shift1) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 1; + this->TestExpForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Shift1) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 1; + this->TestExpGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestExpLayerBase2Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 0; + this->TestExpForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 0; + this->TestExpGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestExpLayerBase2Shift1Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 1; + this->TestExpForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Shift1Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 1; + this->TestExpGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayer) { + typedef typename TypeParam::Dtype Dtype; + // Test default base of "-1" -- should actually set base := e. + const Dtype kBase = -1; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradient) { + typedef typename TypeParam::Dtype Dtype; + // Test default base of "-1" -- should actually set base := e. + const Dtype kBase = -1; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 1; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 1; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 0; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 0; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 1; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 1; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestDropoutHalf) { + const float kDropoutRatio = 0.5; + this->TestDropoutForward(kDropoutRatio); +} + +TYPED_TEST(NeuronLayerTest, TestDropoutThreeQuarters) { + const float kDropoutRatio = 0.75; + this->TestDropoutForward(kDropoutRatio); +} + +TYPED_TEST(NeuronLayerTest, TestDropoutTestPhase) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.set_phase(TEST); + DropoutLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + if (top_data[i] != 0) { + EXPECT_EQ(top_data[i], bottom_data[i]); + } + } +} + +TYPED_TEST(NeuronLayerTest, TestDropoutGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.set_phase(TRAIN); + DropoutLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NeuronLayerTest, TestDropoutGradientTest) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.set_phase(TEST); + DropoutLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NeuronLayerTest, TestBNLL) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + BNLLLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_GE(top_data[i], bottom_data[i]); + } +} + +TYPED_TEST(NeuronLayerTest, TestBNLLGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + BNLLLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NeuronLayerTest, TestPReLUParam) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* slopes = layer.blobs()[0]->cpu_data(); + int count = layer.blobs()[0]->count(); + for (int i = 0; i < count; ++i, ++slopes) { + EXPECT_EQ(*slopes, 0.25); + } +} + +TYPED_TEST(NeuronLayerTest, TestPReLUForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(layer.blobs()[0].get()); + this->TestPReLU(&layer); +} + +TYPED_TEST(NeuronLayerTest, TestPReLUForwardChannelShared) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_prelu_param()->set_channel_shared(true); + PReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + this->TestPReLU(&layer); +} + +TYPED_TEST(NeuronLayerTest, TestPReLUGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(layer.blobs()[0].get()); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NeuronLayerTest, TestPReLUGradientChannelShared) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_prelu_param()->set_channel_shared(true); + PReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NeuronLayerTest, TestPReLUConsistencyReLU) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter prelu_layer_param; + LayerParameter relu_layer_param; + relu_layer_param.mutable_relu_param()->set_negative_slope(0.25); + PReLULayer prelu(prelu_layer_param); + ReLULayer relu(relu_layer_param); + // Set up blobs + vector*> blob_bottom_vec_2; + vector*> blob_top_vec_2; + shared_ptr > blob_bottom_2(new Blob()); + shared_ptr > blob_top_2(new Blob()); + blob_bottom_vec_2.push_back(blob_bottom_2.get()); + blob_top_vec_2.push_back(blob_top_2.get()); + blob_bottom_2->CopyFrom(*this->blob_bottom_, false, true); + // SetUp layers + prelu.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + relu.SetUp(blob_bottom_vec_2, blob_top_vec_2); + // Check forward + prelu.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + relu.Forward(this->blob_bottom_vec_, blob_top_vec_2); + for (int s = 0; s < blob_top_2->count(); ++s) { + EXPECT_EQ(this->blob_top_->cpu_data()[s], blob_top_2->cpu_data()[s]); + } + // Check backward + shared_ptr > tmp_blob(new Blob()); + tmp_blob->ReshapeLike(*blob_top_2.get()); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(tmp_blob.get()); + caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), + this->blob_top_->mutable_cpu_diff()); + caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), + blob_top_2->mutable_cpu_diff()); + vector propagate_down; + propagate_down.push_back(true); + prelu.Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); + relu.Backward(blob_top_vec_2, propagate_down, blob_bottom_vec_2); + for (int s = 0; s < blob_bottom_2->count(); ++s) { + EXPECT_EQ(this->blob_bottom_->cpu_diff()[s], blob_bottom_2->cpu_diff()[s]); + } +} + +TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { + typedef typename TypeParam::Dtype Dtype; + // Set layer parameters + LayerParameter ip_layer_param; + LayerParameter prelu_layer_param; + InnerProductParameter *ip_param = + ip_layer_param.mutable_inner_product_param(); + ip_param->mutable_weight_filler()->set_type("gaussian"); + ip_param->set_num_output(3); + InnerProductLayer ip(ip_layer_param); + PReLULayer prelu(prelu_layer_param); + InnerProductLayer ip2(ip_layer_param); + PReLULayer prelu2(prelu_layer_param); + // Set up blobs + vector*> blob_bottom_vec_2; + vector*> blob_middle_vec_2; + vector*> blob_top_vec_2; + shared_ptr > blob_bottom_2(new Blob()); + shared_ptr > blob_middle_2(new Blob()); + shared_ptr > blob_top_2(new Blob()); + blob_bottom_vec_2.push_back(blob_bottom_2.get()); + blob_middle_vec_2.push_back(blob_middle_2.get()); + blob_top_vec_2.push_back(blob_top_2.get()); + blob_bottom_2->CopyFrom(*this->blob_bottom_, false, true); + // SetUp layers + ip.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + prelu.SetUp(this->blob_top_vec_, this->blob_top_vec_); + ip2.SetUp(blob_bottom_vec_2, blob_middle_vec_2); + prelu2.SetUp(blob_middle_vec_2, blob_top_vec_2); + caffe_copy(ip2.blobs()[0]->count(), ip.blobs()[0]->cpu_data(), + ip2.blobs()[0]->mutable_cpu_data()); + // Forward in-place + ip.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + prelu.Forward(this->blob_top_vec_, this->blob_top_vec_); + // Forward non-in-place + ip2.Forward(blob_bottom_vec_2, blob_middle_vec_2); + prelu2.Forward(blob_middle_vec_2, blob_top_vec_2); + // Check numbers + for (int s = 0; s < blob_top_2->count(); ++s) { + EXPECT_EQ(this->blob_top_->cpu_data()[s], blob_top_2->cpu_data()[s]); + } + // Fill top diff with random numbers + shared_ptr > tmp_blob(new Blob()); + tmp_blob->ReshapeLike(*blob_top_2.get()); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(tmp_blob.get()); + caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), + this->blob_top_->mutable_cpu_diff()); + caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), + blob_top_2->mutable_cpu_diff()); + // Backward in-place + vector propagate_down; + propagate_down.push_back(true); + prelu.Backward(this->blob_top_vec_, propagate_down, this->blob_top_vec_); + ip.Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); + // Backward non-in-place + prelu2.Backward(blob_top_vec_2, propagate_down, blob_middle_vec_2); + ip2.Backward(blob_middle_vec_2, propagate_down, blob_bottom_vec_2); + // Check numbers + for (int s = 0; s < blob_bottom_2->count(); ++s) { + EXPECT_EQ(this->blob_bottom_->cpu_diff()[s], blob_bottom_2->cpu_diff()[s]); + } + for (int s = 0; s < ip.blobs()[0]->count(); ++s) { + EXPECT_EQ(ip.blobs()[0]->cpu_diff()[s], ip2.blobs()[0]->cpu_diff()[s]); + } + for (int s = 0; s < ip.blobs()[1]->count(); ++s) { + EXPECT_EQ(ip.blobs()[1]->cpu_diff()[s], ip2.blobs()[1]->cpu_diff()[s]); + } + for (int s = 0; s < prelu.blobs()[0]->count(); ++s) { + EXPECT_EQ(prelu.blobs()[0]->cpu_diff()[s], + prelu2.blobs()[0]->cpu_diff()[s]); + } +} + +#ifdef USE_CUDNN +template +class CuDNNNeuronLayerTest : public GPUDeviceTest { + protected: + CuDNNNeuronLayerTest() + : blob_bottom_(new Blob(2, 3, 4, 5)), + blob_top_(new Blob()) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~CuDNNNeuronLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(CuDNNNeuronLayerTest, TestDtypes); + +TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { + LayerParameter layer_param; + CuDNNReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]); + } +} + +TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { + LayerParameter layer_param; + CuDNNReLULayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { + LayerParameter layer_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + "relu_param { negative_slope: 0.01 }", &layer_param)); + CuDNNReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + if (top_data[i] >= 0) { + EXPECT_FLOAT_EQ(top_data[i], bottom_data[i]); + } else { + EXPECT_FLOAT_EQ(top_data[i], bottom_data[i] * 0.01); + } + } +} + +TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { + LayerParameter layer_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + "relu_param { negative_slope: 0.01 }", &layer_param)); + CuDNNReLULayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { + LayerParameter layer_param; + CuDNNSigmoidLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i]))); + // check that we squashed the value between 0 and 1 + EXPECT_GE(top_data[i], 0.); + EXPECT_LE(top_data[i], 1.); + } +} + +TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { + LayerParameter layer_param; + CuDNNSigmoidLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { + LayerParameter layer_param; + CuDNNTanHLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Test exact values + for (int i = 0; i < this->blob_bottom_->num(); ++i) { + for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + for (int k = 0; k < this->blob_bottom_->height(); ++k) { + for (int l = 0; l < this->blob_bottom_->width(); ++l) { + EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); + EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4, + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); + } + } + } + } +} + +TYPED_TEST(CuDNNNeuronLayerTest, TestTanHGradientCuDNN) { + LayerParameter layer_param; + CuDNNTanHLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +#endif + +} // namespace caffe diff --git a/src/caffe/test/test_platform.cpp b/src/caffe/test/test_platform.cpp new file mode 100755 index 0000000..f3513e0 --- /dev/null +++ b/src/caffe/test/test_platform.cpp @@ -0,0 +1,57 @@ +#ifndef CPU_ONLY + +#include +#include + +#include "glog/logging.h" +#include "gtest/gtest.h" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +class PlatformTest : public ::testing::Test {}; + +TEST_F(PlatformTest, TestInitialization) { + printf("Major revision number: %d\n", CAFFE_TEST_CUDA_PROP.major); + printf("Minor revision number: %d\n", CAFFE_TEST_CUDA_PROP.minor); + printf("Name: %s\n", CAFFE_TEST_CUDA_PROP.name); + printf("Total global memory: %lu\n", + CAFFE_TEST_CUDA_PROP.totalGlobalMem); + printf("Total shared memory per block: %lu\n", + CAFFE_TEST_CUDA_PROP.sharedMemPerBlock); + printf("Total registers per block: %d\n", + CAFFE_TEST_CUDA_PROP.regsPerBlock); + printf("Warp size: %d\n", + CAFFE_TEST_CUDA_PROP.warpSize); + printf("Maximum memory pitch: %lu\n", + CAFFE_TEST_CUDA_PROP.memPitch); + printf("Maximum threads per block: %d\n", + CAFFE_TEST_CUDA_PROP.maxThreadsPerBlock); + for (int i = 0; i < 3; ++i) + printf("Maximum dimension %d of block: %d\n", i, + CAFFE_TEST_CUDA_PROP.maxThreadsDim[i]); + for (int i = 0; i < 3; ++i) + printf("Maximum dimension %d of grid: %d\n", i, + CAFFE_TEST_CUDA_PROP.maxGridSize[i]); + printf("Clock rate: %d\n", CAFFE_TEST_CUDA_PROP.clockRate); + printf("Total constant memory: %lu\n", + CAFFE_TEST_CUDA_PROP.totalConstMem); + printf("Texture alignment: %lu\n", + CAFFE_TEST_CUDA_PROP.textureAlignment); + printf("Concurrent copy and execution: %s\n", + (CAFFE_TEST_CUDA_PROP.deviceOverlap ? "Yes" : "No")); + printf("Number of multiprocessors: %d\n", + CAFFE_TEST_CUDA_PROP.multiProcessorCount); + printf("Kernel execution timeout: %s\n", + (CAFFE_TEST_CUDA_PROP.kernelExecTimeoutEnabled ? "Yes" : "No")); + printf("Unified virtual addressing: %s\n", + (CAFFE_TEST_CUDA_PROP.unifiedAddressing ? "Yes" : "No")); + EXPECT_TRUE(true); +} + +} // namespace caffe + +#endif // CPU_ONLY diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp new file mode 100755 index 0000000..69f2d5c --- /dev/null +++ b/src/caffe/test/test_pooling_layer.cpp @@ -0,0 +1,1182 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class PoolingLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + PoolingLayerTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()), + blob_top_mask_(new Blob()) {} + virtual void SetUp() { + Caffe::set_random_seed(1701); + blob_bottom_->Reshape(2, 3, 6, 5); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~PoolingLayerTest() { + delete blob_bottom_; + delete blob_top_; + delete blob_top_mask_; + } + Blob* const blob_bottom_; + Blob* const blob_top_; + Blob* const blob_top_mask_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + // Test for 2x 2 square pooling layer + void TestForwardSquare() { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + const int num = 2; + const int channels = 2; + blob_bottom_->Reshape(num, channels, 3, 5); + // Input: 2x 2 channels of: + // [1 2 5 2 3] + // [9 4 1 4 8] + // [1 2 5 2 3] + for (int i = 0; i < 15 * num * channels; i += 15) { + blob_bottom_->mutable_cpu_data()[i + 0] = 1; + blob_bottom_->mutable_cpu_data()[i + 1] = 2; + blob_bottom_->mutable_cpu_data()[i + 2] = 5; + blob_bottom_->mutable_cpu_data()[i + 3] = 2; + blob_bottom_->mutable_cpu_data()[i + 4] = 3; + blob_bottom_->mutable_cpu_data()[i + 5] = 9; + blob_bottom_->mutable_cpu_data()[i + 6] = 4; + blob_bottom_->mutable_cpu_data()[i + 7] = 1; + blob_bottom_->mutable_cpu_data()[i + 8] = 4; + blob_bottom_->mutable_cpu_data()[i + 9] = 8; + blob_bottom_->mutable_cpu_data()[i + 10] = 1; + blob_bottom_->mutable_cpu_data()[i + 11] = 2; + blob_bottom_->mutable_cpu_data()[i + 12] = 5; + blob_bottom_->mutable_cpu_data()[i + 13] = 2; + blob_bottom_->mutable_cpu_data()[i + 14] = 3; + } + PoolingLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_->num(), num); + EXPECT_EQ(blob_top_->channels(), channels); + EXPECT_EQ(blob_top_->height(), 2); + EXPECT_EQ(blob_top_->width(), 4); + if (blob_top_vec_.size() > 1) { + EXPECT_EQ(blob_top_mask_->num(), num); + EXPECT_EQ(blob_top_mask_->channels(), channels); + EXPECT_EQ(blob_top_mask_->height(), 2); + EXPECT_EQ(blob_top_mask_->width(), 4); + } + layer.Forward(blob_bottom_vec_, blob_top_vec_); + // Expected output: 2x 2 channels of: + // [9 5 5 8] + // [9 5 5 8] + for (int i = 0; i < 8 * num * channels; i += 8) { + EXPECT_EQ(blob_top_->cpu_data()[i + 0], 9); + EXPECT_EQ(blob_top_->cpu_data()[i + 1], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 2], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 3], 8); + EXPECT_EQ(blob_top_->cpu_data()[i + 4], 9); + EXPECT_EQ(blob_top_->cpu_data()[i + 5], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 6], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 7], 8); + } + if (blob_top_vec_.size() > 1) { + // Expected mask output: 2x 2 channels of: + // [5 2 2 9] + // [5 12 12 9] + for (int i = 0; i < 8 * num * channels; i += 8) { + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 5); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 2); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 2); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 3], 9); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 4], 5); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 5], 12); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 6], 12); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 7], 9); + } + } + } + // Test for 3x 2 rectangular pooling layer with kernel_h > kernel_w + void TestForwardRectHigh() { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(3); + pooling_param->set_kernel_w(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + const int num = 2; + const int channels = 2; + blob_bottom_->Reshape(num, channels, 6, 6); + // Input: 2x 2 channels of: + // [35 1 6 26 19 24] + // [ 3 32 7 21 23 25] + // [31 9 2 22 27 20] + // [ 8 28 33 17 10 15] + // [30 5 34 12 14 16] + // [ 4 36 29 13 18 11] + // (this is generated by magic(6) in MATLAB) + for (int i = 0; i < 36 * num * channels; i += 36) { + blob_bottom_->mutable_cpu_data()[i + 0] = 35; + blob_bottom_->mutable_cpu_data()[i + 1] = 1; + blob_bottom_->mutable_cpu_data()[i + 2] = 6; + blob_bottom_->mutable_cpu_data()[i + 3] = 26; + blob_bottom_->mutable_cpu_data()[i + 4] = 19; + blob_bottom_->mutable_cpu_data()[i + 5] = 24; + blob_bottom_->mutable_cpu_data()[i + 6] = 3; + blob_bottom_->mutable_cpu_data()[i + 7] = 32; + blob_bottom_->mutable_cpu_data()[i + 8] = 7; + blob_bottom_->mutable_cpu_data()[i + 9] = 21; + blob_bottom_->mutable_cpu_data()[i + 10] = 23; + blob_bottom_->mutable_cpu_data()[i + 11] = 25; + blob_bottom_->mutable_cpu_data()[i + 12] = 31; + blob_bottom_->mutable_cpu_data()[i + 13] = 9; + blob_bottom_->mutable_cpu_data()[i + 14] = 2; + blob_bottom_->mutable_cpu_data()[i + 15] = 22; + blob_bottom_->mutable_cpu_data()[i + 16] = 27; + blob_bottom_->mutable_cpu_data()[i + 17] = 20; + blob_bottom_->mutable_cpu_data()[i + 18] = 8; + blob_bottom_->mutable_cpu_data()[i + 19] = 28; + blob_bottom_->mutable_cpu_data()[i + 20] = 33; + blob_bottom_->mutable_cpu_data()[i + 21] = 17; + blob_bottom_->mutable_cpu_data()[i + 22] = 10; + blob_bottom_->mutable_cpu_data()[i + 23] = 15; + blob_bottom_->mutable_cpu_data()[i + 24] = 30; + blob_bottom_->mutable_cpu_data()[i + 25] = 5; + blob_bottom_->mutable_cpu_data()[i + 26] = 34; + blob_bottom_->mutable_cpu_data()[i + 27] = 12; + blob_bottom_->mutable_cpu_data()[i + 28] = 14; + blob_bottom_->mutable_cpu_data()[i + 29] = 16; + blob_bottom_->mutable_cpu_data()[i + 30] = 4; + blob_bottom_->mutable_cpu_data()[i + 31] = 36; + blob_bottom_->mutable_cpu_data()[i + 32] = 29; + blob_bottom_->mutable_cpu_data()[i + 33] = 13; + blob_bottom_->mutable_cpu_data()[i + 34] = 18; + blob_bottom_->mutable_cpu_data()[i + 35] = 11; + } + PoolingLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_->num(), num); + EXPECT_EQ(blob_top_->channels(), channels); + EXPECT_EQ(blob_top_->height(), 4); + EXPECT_EQ(blob_top_->width(), 5); + if (blob_top_vec_.size() > 1) { + EXPECT_EQ(blob_top_mask_->num(), num); + EXPECT_EQ(blob_top_mask_->channels(), channels); + EXPECT_EQ(blob_top_mask_->height(), 4); + EXPECT_EQ(blob_top_mask_->width(), 5); + } + layer.Forward(blob_bottom_vec_, blob_top_vec_); + // Expected output: 2x 2 channels of: + // [35 32 26 27 27] + // [32 33 33 27 27] + // [31 34 34 27 27] + // [36 36 34 18 18] + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(blob_top_->cpu_data()[i + 0], 35); + EXPECT_EQ(blob_top_->cpu_data()[i + 1], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 2], 26); + EXPECT_EQ(blob_top_->cpu_data()[i + 3], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 4], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 5], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 6], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 7], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 8], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 9], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 10], 31); + EXPECT_EQ(blob_top_->cpu_data()[i + 11], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 12], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 13], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 14], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 15], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 16], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 17], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 18], 18); + EXPECT_EQ(blob_top_->cpu_data()[i + 19], 18); + } + if (blob_top_vec_.size() > 1) { + // [ 1 8 4 17 17] + // [ 8 21 21 17 17] + // [13 27 27 17 17] + // [32 32 27 35 35] + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 0); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 7); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 3); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 3], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 4], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 5], 7); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 6], 20); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 7], 20); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 8], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 9], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 10], 12); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 11], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 12], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 13], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 14], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 15], 31); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 16], 31); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 17], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 18], 34); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 19], 34); + } + } + } + // Test for rectangular pooling layer with kernel_w > kernel_h + void TestForwardRectWide() { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(2); + pooling_param->set_kernel_w(3); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + const int num = 2; + const int channels = 2; + blob_bottom_->Reshape(num, channels, 6, 6); + // Input: 2x 2 channels of: + // [35 1 6 26 19 24] + // [ 3 32 7 21 23 25] + // [31 9 2 22 27 20] + // [ 8 28 33 17 10 15] + // [30 5 34 12 14 16] + // [ 4 36 29 13 18 11] + // (this is generated by magic(6) in MATLAB) + for (int i = 0; i < 36 * num * channels; i += 36) { + blob_bottom_->mutable_cpu_data()[i + 0] = 35; + blob_bottom_->mutable_cpu_data()[i + 1] = 1; + blob_bottom_->mutable_cpu_data()[i + 2] = 6; + blob_bottom_->mutable_cpu_data()[i + 3] = 26; + blob_bottom_->mutable_cpu_data()[i + 4] = 19; + blob_bottom_->mutable_cpu_data()[i + 5] = 24; + blob_bottom_->mutable_cpu_data()[i + 6] = 3; + blob_bottom_->mutable_cpu_data()[i + 7] = 32; + blob_bottom_->mutable_cpu_data()[i + 8] = 7; + blob_bottom_->mutable_cpu_data()[i + 9] = 21; + blob_bottom_->mutable_cpu_data()[i + 10] = 23; + blob_bottom_->mutable_cpu_data()[i + 11] = 25; + blob_bottom_->mutable_cpu_data()[i + 12] = 31; + blob_bottom_->mutable_cpu_data()[i + 13] = 9; + blob_bottom_->mutable_cpu_data()[i + 14] = 2; + blob_bottom_->mutable_cpu_data()[i + 15] = 22; + blob_bottom_->mutable_cpu_data()[i + 16] = 27; + blob_bottom_->mutable_cpu_data()[i + 17] = 20; + blob_bottom_->mutable_cpu_data()[i + 18] = 8; + blob_bottom_->mutable_cpu_data()[i + 19] = 28; + blob_bottom_->mutable_cpu_data()[i + 20] = 33; + blob_bottom_->mutable_cpu_data()[i + 21] = 17; + blob_bottom_->mutable_cpu_data()[i + 22] = 10; + blob_bottom_->mutable_cpu_data()[i + 23] = 15; + blob_bottom_->mutable_cpu_data()[i + 24] = 30; + blob_bottom_->mutable_cpu_data()[i + 25] = 5; + blob_bottom_->mutable_cpu_data()[i + 26] = 34; + blob_bottom_->mutable_cpu_data()[i + 27] = 12; + blob_bottom_->mutable_cpu_data()[i + 28] = 14; + blob_bottom_->mutable_cpu_data()[i + 29] = 16; + blob_bottom_->mutable_cpu_data()[i + 30] = 4; + blob_bottom_->mutable_cpu_data()[i + 31] = 36; + blob_bottom_->mutable_cpu_data()[i + 32] = 29; + blob_bottom_->mutable_cpu_data()[i + 33] = 13; + blob_bottom_->mutable_cpu_data()[i + 34] = 18; + blob_bottom_->mutable_cpu_data()[i + 35] = 11; + } + PoolingLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_->num(), num); + EXPECT_EQ(blob_top_->channels(), channels); + EXPECT_EQ(blob_top_->height(), 5); + EXPECT_EQ(blob_top_->width(), 4); + if (blob_top_vec_.size() > 1) { + EXPECT_EQ(blob_top_mask_->num(), num); + EXPECT_EQ(blob_top_mask_->channels(), channels); + EXPECT_EQ(blob_top_mask_->height(), 5); + EXPECT_EQ(blob_top_mask_->width(), 4); + } + layer.Forward(blob_bottom_vec_, blob_top_vec_); + // Expected output: 2x 2 channels of: + // [35 32 26 26] + // [32 32 27 27] + // [33 33 33 27] + // [34 34 34 17] + // [36 36 34 18] + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(blob_top_->cpu_data()[i + 0], 35); + EXPECT_EQ(blob_top_->cpu_data()[i + 1], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 2], 26); + EXPECT_EQ(blob_top_->cpu_data()[i + 3], 26); + EXPECT_EQ(blob_top_->cpu_data()[i + 4], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 5], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 6], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 7], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 8], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 9], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 10], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 11], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 12], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 13], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 14], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 15], 17); + EXPECT_EQ(blob_top_->cpu_data()[i + 16], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 17], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 18], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 19], 18); + } + if (blob_top_vec_.size() > 1) { + // [ 1 8 4 4] + // [ 8 8 17 17] + // [21 21 21 17] + // [27 27 27 22] + // [32 32 27 35] + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 0); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 7); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 3); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 3], 3); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 4], 7); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 5], 7); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 6], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 7], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 8], 20); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 9], 20); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 10], 20); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 11], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 12], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 13], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 14], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 15], 21); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 16], 31); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 17], 31); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 18], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 19], 34); + } + } + } +}; + +TYPED_TEST_CASE(PoolingLayerTest, TestDtypesAndDevices); + +TYPED_TEST(PoolingLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 2); +} + +TYPED_TEST(PoolingLayerTest, TestSetupPadded) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 4); + EXPECT_EQ(this->blob_top_->width(), 3); +} + +TYPED_TEST(PoolingLayerTest, TestSetupGlobalPooling) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_global_pooling(true); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +/* +TYPED_TEST(PoolingLayerTest, PrintBackward) { + LayerParameter layer_param; + layer_param.set_kernelsize(3); + layer_param.set_stride(2); + layer_param.set_pool(LayerParameter_PoolMethod_MAX); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + cout << "bottom data " << i << " " << this->blob_bottom_->cpu_data()[i] << endl; + } + for (int i = 0; i < this->blob_top_->count(); ++i) { + cout << "top data " << i << " " << this->blob_top_->cpu_data()[i] << endl; + } + + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = i; + } + layer.Backward(this->blob_top_vec_, true, this->blob_bottom_vec_); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + cout << "bottom diff " << i << " " << this->blob_bottom_->cpu_diff()[i] << endl; + } +} +*/ + +TYPED_TEST(PoolingLayerTest, TestForwardMax) { + this->TestForwardSquare(); + this->TestForwardRectHigh(); + this->TestForwardRectWide(); +} + +TYPED_TEST(PoolingLayerTest, TestForwardMaxTopMask) { + this->blob_top_vec_.push_back(this->blob_top_mask_); + this->TestForwardSquare(); + this->TestForwardRectHigh(); + this->TestForwardRectWide(); +} + +TYPED_TEST(PoolingLayerTest, TestGradientMax) { + typedef typename TypeParam::Dtype Dtype; + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + PoolingLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + } +} + +TYPED_TEST(PoolingLayerTest, TestForwardMaxPadded) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + this->blob_bottom_->Reshape(1, 1, 3, 3); + // Input: + // [ 1 2 4 ] + // [ 2 3 2 ] + // [ 4 2 1 ] + this->blob_bottom_->mutable_cpu_data()[0] = 1; + this->blob_bottom_->mutable_cpu_data()[1] = 2; + this->blob_bottom_->mutable_cpu_data()[2] = 4; + this->blob_bottom_->mutable_cpu_data()[3] = 2; + this->blob_bottom_->mutable_cpu_data()[4] = 3; + this->blob_bottom_->mutable_cpu_data()[5] = 2; + this->blob_bottom_->mutable_cpu_data()[6] = 4; + this->blob_bottom_->mutable_cpu_data()[7] = 2; + this->blob_bottom_->mutable_cpu_data()[8] = 1; + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Dtype epsilon = 1e-8; + // Output: + // [ 1 4 4 ] + // [ 4 4 4 ] + // [ 4 4 1 ] + EXPECT_NEAR(this->blob_top_->cpu_data()[0], 1, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[2], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[4], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[6], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[8], 1, epsilon); +} + +TYPED_TEST(PoolingLayerTest, TestGradientMaxTopMask) { + typedef typename TypeParam::Dtype Dtype; + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + this->blob_top_vec_.push_back(this->blob_top_mask_); + PoolingLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + this->blob_top_vec_.pop_back(); + } + } +} + +TYPED_TEST(PoolingLayerTest, TestForwardAve) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(1); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + this->blob_bottom_->Reshape(1, 1, 3, 3); + FillerParameter filler_param; + filler_param.set_value(Dtype(2)); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Dtype epsilon = 1e-5; + EXPECT_NEAR(this->blob_top_->cpu_data()[0], 8.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[2], 8.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[4], 2.0 , epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[6], 8.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[8], 8.0 / 9, epsilon); +} + +TYPED_TEST(PoolingLayerTest, TestGradientAve) { + typedef typename TypeParam::Dtype Dtype; + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + PoolingLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + } +} + +TYPED_TEST(PoolingLayerTest, TestGradientAvePadded) { + typedef typename TypeParam::Dtype Dtype; + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pad(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + PoolingLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + } +} + +#ifdef USE_CUDNN +template +class CuDNNPoolingLayerTest : public GPUDeviceTest { + protected: + CuDNNPoolingLayerTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()), + blob_top_mask_(new Blob()) {} + virtual void SetUp() { + Caffe::set_random_seed(1701); + blob_bottom_->Reshape(2, 3, 6, 5); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~CuDNNPoolingLayerTest() { + delete blob_bottom_; + delete blob_top_; + delete blob_top_mask_; + } + Blob* const blob_bottom_; + Blob* const blob_top_; + Blob* const blob_top_mask_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + // Test for 2x 2 square pooling layer + void TestForwardSquare() { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + const int num = 2; + const int channels = 2; + blob_bottom_->Reshape(num, channels, 3, 5); + // Input: 2x 2 channels of: + // [1 2 5 2 3] + // [9 4 1 4 8] + // [1 2 5 2 3] + for (int i = 0; i < 15 * num * channels; i += 15) { + blob_bottom_->mutable_cpu_data()[i + 0] = 1; + blob_bottom_->mutable_cpu_data()[i + 1] = 2; + blob_bottom_->mutable_cpu_data()[i + 2] = 5; + blob_bottom_->mutable_cpu_data()[i + 3] = 2; + blob_bottom_->mutable_cpu_data()[i + 4] = 3; + blob_bottom_->mutable_cpu_data()[i + 5] = 9; + blob_bottom_->mutable_cpu_data()[i + 6] = 4; + blob_bottom_->mutable_cpu_data()[i + 7] = 1; + blob_bottom_->mutable_cpu_data()[i + 8] = 4; + blob_bottom_->mutable_cpu_data()[i + 9] = 8; + blob_bottom_->mutable_cpu_data()[i + 10] = 1; + blob_bottom_->mutable_cpu_data()[i + 11] = 2; + blob_bottom_->mutable_cpu_data()[i + 12] = 5; + blob_bottom_->mutable_cpu_data()[i + 13] = 2; + blob_bottom_->mutable_cpu_data()[i + 14] = 3; + } + CuDNNPoolingLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_->num(), num); + EXPECT_EQ(blob_top_->channels(), channels); + EXPECT_EQ(blob_top_->height(), 2); + EXPECT_EQ(blob_top_->width(), 4); + if (blob_top_vec_.size() > 1) { + EXPECT_EQ(blob_top_mask_->num(), num); + EXPECT_EQ(blob_top_mask_->channels(), channels); + EXPECT_EQ(blob_top_mask_->height(), 2); + EXPECT_EQ(blob_top_mask_->width(), 4); + } + layer.Forward(blob_bottom_vec_, blob_top_vec_); + // Expected output: 2x 2 channels of: + // [9 5 5 8] + // [9 5 5 8] + for (int i = 0; i < 8 * num * channels; i += 8) { + EXPECT_EQ(blob_top_->cpu_data()[i + 0], 9); + EXPECT_EQ(blob_top_->cpu_data()[i + 1], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 2], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 3], 8); + EXPECT_EQ(blob_top_->cpu_data()[i + 4], 9); + EXPECT_EQ(blob_top_->cpu_data()[i + 5], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 6], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 7], 8); + } + if (blob_top_vec_.size() > 1) { + // Expected mask output: 2x 2 channels of: + // [5 2 2 9] + // [5 12 12 9] + for (int i = 0; i < 8 * num * channels; i += 8) { + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 5); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 2); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 2); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 3], 9); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 4], 5); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 5], 12); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 6], 12); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 7], 9); + } + } + } + // Test for 3x 2 rectangular pooling layer with kernel_h > kernel_w + void TestForwardRectHigh() { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(3); + pooling_param->set_kernel_w(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + const int num = 2; + const int channels = 2; + blob_bottom_->Reshape(num, channels, 6, 6); + // Input: 2x 2 channels of: + // [35 1 6 26 19 24] + // [ 3 32 7 21 23 25] + // [31 9 2 22 27 20] + // [ 8 28 33 17 10 15] + // [30 5 34 12 14 16] + // [ 4 36 29 13 18 11] + // (this is generated by magic(6) in MATLAB) + for (int i = 0; i < 36 * num * channels; i += 36) { + blob_bottom_->mutable_cpu_data()[i + 0] = 35; + blob_bottom_->mutable_cpu_data()[i + 1] = 1; + blob_bottom_->mutable_cpu_data()[i + 2] = 6; + blob_bottom_->mutable_cpu_data()[i + 3] = 26; + blob_bottom_->mutable_cpu_data()[i + 4] = 19; + blob_bottom_->mutable_cpu_data()[i + 5] = 24; + blob_bottom_->mutable_cpu_data()[i + 6] = 3; + blob_bottom_->mutable_cpu_data()[i + 7] = 32; + blob_bottom_->mutable_cpu_data()[i + 8] = 7; + blob_bottom_->mutable_cpu_data()[i + 9] = 21; + blob_bottom_->mutable_cpu_data()[i + 10] = 23; + blob_bottom_->mutable_cpu_data()[i + 11] = 25; + blob_bottom_->mutable_cpu_data()[i + 12] = 31; + blob_bottom_->mutable_cpu_data()[i + 13] = 9; + blob_bottom_->mutable_cpu_data()[i + 14] = 2; + blob_bottom_->mutable_cpu_data()[i + 15] = 22; + blob_bottom_->mutable_cpu_data()[i + 16] = 27; + blob_bottom_->mutable_cpu_data()[i + 17] = 20; + blob_bottom_->mutable_cpu_data()[i + 18] = 8; + blob_bottom_->mutable_cpu_data()[i + 19] = 28; + blob_bottom_->mutable_cpu_data()[i + 20] = 33; + blob_bottom_->mutable_cpu_data()[i + 21] = 17; + blob_bottom_->mutable_cpu_data()[i + 22] = 10; + blob_bottom_->mutable_cpu_data()[i + 23] = 15; + blob_bottom_->mutable_cpu_data()[i + 24] = 30; + blob_bottom_->mutable_cpu_data()[i + 25] = 5; + blob_bottom_->mutable_cpu_data()[i + 26] = 34; + blob_bottom_->mutable_cpu_data()[i + 27] = 12; + blob_bottom_->mutable_cpu_data()[i + 28] = 14; + blob_bottom_->mutable_cpu_data()[i + 29] = 16; + blob_bottom_->mutable_cpu_data()[i + 30] = 4; + blob_bottom_->mutable_cpu_data()[i + 31] = 36; + blob_bottom_->mutable_cpu_data()[i + 32] = 29; + blob_bottom_->mutable_cpu_data()[i + 33] = 13; + blob_bottom_->mutable_cpu_data()[i + 34] = 18; + blob_bottom_->mutable_cpu_data()[i + 35] = 11; + } + CuDNNPoolingLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_->num(), num); + EXPECT_EQ(blob_top_->channels(), channels); + EXPECT_EQ(blob_top_->height(), 4); + EXPECT_EQ(blob_top_->width(), 5); + if (blob_top_vec_.size() > 1) { + EXPECT_EQ(blob_top_mask_->num(), num); + EXPECT_EQ(blob_top_mask_->channels(), channels); + EXPECT_EQ(blob_top_mask_->height(), 4); + EXPECT_EQ(blob_top_mask_->width(), 5); + } + layer.Forward(blob_bottom_vec_, blob_top_vec_); + // Expected output: 2x 2 channels of: + // [35 32 26 27 27] + // [32 33 33 27 27] + // [31 34 34 27 27] + // [36 36 34 18 18] + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(blob_top_->cpu_data()[i + 0], 35); + EXPECT_EQ(blob_top_->cpu_data()[i + 1], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 2], 26); + EXPECT_EQ(blob_top_->cpu_data()[i + 3], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 4], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 5], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 6], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 7], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 8], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 9], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 10], 31); + EXPECT_EQ(blob_top_->cpu_data()[i + 11], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 12], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 13], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 14], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 15], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 16], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 17], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 18], 18); + EXPECT_EQ(blob_top_->cpu_data()[i + 19], 18); + } + if (blob_top_vec_.size() > 1) { + // [ 1 8 4 17 17] + // [ 8 21 21 17 17] + // [13 27 27 17 17] + // [32 32 27 35 35] + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 0); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 7); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 3); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 3], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 4], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 5], 7); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 6], 20); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 7], 20); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 8], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 9], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 10], 12); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 11], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 12], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 13], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 14], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 15], 31); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 16], 31); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 17], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 18], 34); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 19], 34); + } + } + } + // Test for rectangular pooling layer with kernel_w > kernel_h + void TestForwardRectWide() { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(2); + pooling_param->set_kernel_w(3); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + const int num = 2; + const int channels = 2; + blob_bottom_->Reshape(num, channels, 6, 6); + // Input: 2x 2 channels of: + // [35 1 6 26 19 24] + // [ 3 32 7 21 23 25] + // [31 9 2 22 27 20] + // [ 8 28 33 17 10 15] + // [30 5 34 12 14 16] + // [ 4 36 29 13 18 11] + // (this is generated by magic(6) in MATLAB) + for (int i = 0; i < 36 * num * channels; i += 36) { + blob_bottom_->mutable_cpu_data()[i + 0] = 35; + blob_bottom_->mutable_cpu_data()[i + 1] = 1; + blob_bottom_->mutable_cpu_data()[i + 2] = 6; + blob_bottom_->mutable_cpu_data()[i + 3] = 26; + blob_bottom_->mutable_cpu_data()[i + 4] = 19; + blob_bottom_->mutable_cpu_data()[i + 5] = 24; + blob_bottom_->mutable_cpu_data()[i + 6] = 3; + blob_bottom_->mutable_cpu_data()[i + 7] = 32; + blob_bottom_->mutable_cpu_data()[i + 8] = 7; + blob_bottom_->mutable_cpu_data()[i + 9] = 21; + blob_bottom_->mutable_cpu_data()[i + 10] = 23; + blob_bottom_->mutable_cpu_data()[i + 11] = 25; + blob_bottom_->mutable_cpu_data()[i + 12] = 31; + blob_bottom_->mutable_cpu_data()[i + 13] = 9; + blob_bottom_->mutable_cpu_data()[i + 14] = 2; + blob_bottom_->mutable_cpu_data()[i + 15] = 22; + blob_bottom_->mutable_cpu_data()[i + 16] = 27; + blob_bottom_->mutable_cpu_data()[i + 17] = 20; + blob_bottom_->mutable_cpu_data()[i + 18] = 8; + blob_bottom_->mutable_cpu_data()[i + 19] = 28; + blob_bottom_->mutable_cpu_data()[i + 20] = 33; + blob_bottom_->mutable_cpu_data()[i + 21] = 17; + blob_bottom_->mutable_cpu_data()[i + 22] = 10; + blob_bottom_->mutable_cpu_data()[i + 23] = 15; + blob_bottom_->mutable_cpu_data()[i + 24] = 30; + blob_bottom_->mutable_cpu_data()[i + 25] = 5; + blob_bottom_->mutable_cpu_data()[i + 26] = 34; + blob_bottom_->mutable_cpu_data()[i + 27] = 12; + blob_bottom_->mutable_cpu_data()[i + 28] = 14; + blob_bottom_->mutable_cpu_data()[i + 29] = 16; + blob_bottom_->mutable_cpu_data()[i + 30] = 4; + blob_bottom_->mutable_cpu_data()[i + 31] = 36; + blob_bottom_->mutable_cpu_data()[i + 32] = 29; + blob_bottom_->mutable_cpu_data()[i + 33] = 13; + blob_bottom_->mutable_cpu_data()[i + 34] = 18; + blob_bottom_->mutable_cpu_data()[i + 35] = 11; + } + CuDNNPoolingLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_->num(), num); + EXPECT_EQ(blob_top_->channels(), channels); + EXPECT_EQ(blob_top_->height(), 5); + EXPECT_EQ(blob_top_->width(), 4); + if (blob_top_vec_.size() > 1) { + EXPECT_EQ(blob_top_mask_->num(), num); + EXPECT_EQ(blob_top_mask_->channels(), channels); + EXPECT_EQ(blob_top_mask_->height(), 5); + EXPECT_EQ(blob_top_mask_->width(), 4); + } + layer.Forward(blob_bottom_vec_, blob_top_vec_); + // Expected output: 2x 2 channels of: + // [35 32 26 26] + // [32 32 27 27] + // [33 33 33 27] + // [34 34 34 17] + // [36 36 34 18] + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(blob_top_->cpu_data()[i + 0], 35); + EXPECT_EQ(blob_top_->cpu_data()[i + 1], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 2], 26); + EXPECT_EQ(blob_top_->cpu_data()[i + 3], 26); + EXPECT_EQ(blob_top_->cpu_data()[i + 4], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 5], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 6], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 7], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 8], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 9], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 10], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 11], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 12], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 13], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 14], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 15], 17); + EXPECT_EQ(blob_top_->cpu_data()[i + 16], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 17], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 18], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 19], 18); + } + if (blob_top_vec_.size() > 1) { + // [ 1 8 4 4] + // [ 8 8 17 17] + // [21 21 21 17] + // [27 27 27 22] + // [32 32 27 35] + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 0); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 7); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 3); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 3], 3); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 4], 7); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 5], 7); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 6], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 7], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 8], 20); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 9], 20); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 10], 20); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 11], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 12], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 13], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 14], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 15], 21); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 16], 31); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 17], 31); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 18], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 19], 34); + } + } + } +}; + +TYPED_TEST_CASE(CuDNNPoolingLayerTest, TestDtypes); + +TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + CuDNNPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 2); +} + +TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + CuDNNPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 4); + EXPECT_EQ(this->blob_top_->width(), 3); +} + +/* +TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { + LayerParameter layer_param; + layer_param.set_kernelsize(3); + layer_param.set_stride(2); + layer_param.set_pool(LayerParameter_PoolMethod_MAX); + CuDNNPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + cout << "bottom data " << i << " " << this->blob_bottom_->cpu_data()[i] << endl; + } + for (int i = 0; i < this->blob_top_->count(); ++i) { + cout << "top data " << i << " " << this->blob_top_->cpu_data()[i] << endl; + } + + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = i; + } + layer.Backward(this->blob_top_vec_, true, this->blob_bottom_vec_); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + cout << "bottom diff " << i << " " << this->blob_bottom_->cpu_diff()[i] << endl; + } +} +*/ + +TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { + this->TestForwardSquare(); + this->TestForwardRectHigh(); + this->TestForwardRectWide(); +} + +// Currently, cuDNN does not support a top mask, so we comment this and +// the corresponding backward test. +/* +TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { + this->blob_top_vec_.push_back(this->blob_top_mask_); + this->TestForwardSquare(); + this->TestForwardRectHigh(); + this->TestForwardRectWide(); +} +*/ + +TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + // currenty, cuDNN pooling does not support padding + pooling_param->set_pad(0); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + CuDNNPoolingLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + } +} + +TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + this->blob_bottom_->Reshape(1, 1, 3, 3); + // Input: + // [ 1 2 4 ] + // [ 2 3 2 ] + // [ 4 2 1 ] + this->blob_bottom_->mutable_cpu_data()[0] = 1; + this->blob_bottom_->mutable_cpu_data()[1] = 2; + this->blob_bottom_->mutable_cpu_data()[2] = 4; + this->blob_bottom_->mutable_cpu_data()[3] = 2; + this->blob_bottom_->mutable_cpu_data()[4] = 3; + this->blob_bottom_->mutable_cpu_data()[5] = 2; + this->blob_bottom_->mutable_cpu_data()[6] = 4; + this->blob_bottom_->mutable_cpu_data()[7] = 2; + this->blob_bottom_->mutable_cpu_data()[8] = 1; + CuDNNPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + TypeParam epsilon = 1e-8; + // Output: + // [ 1 4 4 ] + // [ 4 4 4 ] + // [ 4 4 1 ] + EXPECT_NEAR(this->blob_top_->cpu_data()[0], 1, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[2], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[4], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[6], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[8], 1, epsilon); +} + +/* +TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + this->blob_top_vec_.push_back(this->blob_top_mask_); + CuDNNPoolingLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + this->blob_top_vec_.pop_back(); + } + } +} +*/ + +TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(1); + // Currently, cuDNN pooling does not support padding, so we use + // a simplified version of this test. + pooling_param->set_pad(0); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + this->blob_bottom_->Reshape(1, 1, 3, 3); + FillerParameter filler_param; + filler_param.set_value(TypeParam(2)); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + CuDNNPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + TypeParam epsilon = 1e-5; + EXPECT_NEAR(this->blob_top_->cpu_data()[0], 2.0, epsilon); +} + +TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + CuDNNPoolingLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + } +} + +TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pad(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + CuDNNPoolingLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + } +} + +#endif + +} // namespace caffe diff --git a/src/caffe/test/test_power_layer.cpp b/src/caffe/test/test_power_layer.cpp new file mode 100755 index 0000000..76c9e85 --- /dev/null +++ b/src/caffe/test/test_power_layer.cpp @@ -0,0 +1,170 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class PowerLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + PowerLayerTest() + : blob_bottom_(new Blob(2, 3, 4, 5)), + blob_top_(new Blob()) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~PowerLayerTest() { delete blob_bottom_; delete blob_top_; } + + void TestForward(Dtype power, Dtype scale, Dtype shift) { + LayerParameter layer_param; + layer_param.mutable_power_param()->set_power(power); + layer_param.mutable_power_param()->set_scale(scale); + layer_param.mutable_power_param()->set_shift(shift); + PowerLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + const Dtype min_precision = 1e-5; + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + Dtype expected_value = pow(shift + scale * bottom_data[i], power); + if (power == Dtype(0) || power == Dtype(1) || power == Dtype(2)) { + EXPECT_FALSE(isnan(top_data[i])); + } + if (isnan(expected_value)) { + EXPECT_TRUE(isnan(top_data[i])); + } else { + Dtype precision = std::max( + Dtype(std::abs(expected_value * Dtype(1e-4))), min_precision); + EXPECT_NEAR(expected_value, top_data[i], precision); + } + } + } + + void TestBackward(Dtype power, Dtype scale, Dtype shift) { + LayerParameter layer_param; + layer_param.mutable_power_param()->set_power(power); + layer_param.mutable_power_param()->set_scale(scale); + layer_param.mutable_power_param()->set_shift(shift); + PowerLayer layer(layer_param); + if (power != Dtype(0) && power != Dtype(1) && power != Dtype(2)) { + // Avoid NaNs by forcing (shift + scale * x) >= 0 + Dtype* bottom_data = this->blob_bottom_->mutable_cpu_data(); + Dtype min_value = -shift / scale; + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + if (bottom_data[i] < min_value) { + bottom_data[i] = min_value + (min_value - bottom_data[i]); + } + } + } + GradientChecker checker(1e-3, 1e-2, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(PowerLayerTest, TestDtypesAndDevices); + +TYPED_TEST(PowerLayerTest, TestPower) { + typedef typename TypeParam::Dtype Dtype; + Dtype power = 0.37; + Dtype scale = 0.83; + Dtype shift = -2.4; + this->TestForward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerGradient) { + typedef typename TypeParam::Dtype Dtype; + Dtype power = 0.37; + Dtype scale = 0.83; + Dtype shift = -2.4; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerGradientShiftZero) { + typedef typename TypeParam::Dtype Dtype; + Dtype power = 0.37; + Dtype scale = 0.83; + Dtype shift = 0.0; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerZero) { + typedef typename TypeParam::Dtype Dtype; + Dtype power = 0.0; + Dtype scale = 0.83; + Dtype shift = -2.4; + this->TestForward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerZeroGradient) { + typedef typename TypeParam::Dtype Dtype; + Dtype power = 0.0; + Dtype scale = 0.83; + Dtype shift = -2.4; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerOne) { + typedef typename TypeParam::Dtype Dtype; + Dtype power = 1.0; + Dtype scale = 0.83; + Dtype shift = -2.4; + this->TestForward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerOneGradient) { + typedef typename TypeParam::Dtype Dtype; + Dtype power = 1.0; + Dtype scale = 0.83; + Dtype shift = -2.4; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerTwo) { + typedef typename TypeParam::Dtype Dtype; + Dtype power = 2.0; + Dtype scale = 0.34; + Dtype shift = -2.4; + this->TestForward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerTwoGradient) { + typedef typename TypeParam::Dtype Dtype; + Dtype power = 2.0; + Dtype scale = 0.83; + Dtype shift = -2.4; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerTwoScaleHalfGradient) { + typedef typename TypeParam::Dtype Dtype; + Dtype power = 2.0; + Dtype scale = 0.5; + Dtype shift = -2.4; + this->TestBackward(power, scale, shift); +} + +} // namespace caffe diff --git a/src/caffe/test/test_protobuf.cpp b/src/caffe/test/test_protobuf.cpp new file mode 100755 index 0000000..01de461 --- /dev/null +++ b/src/caffe/test/test_protobuf.cpp @@ -0,0 +1,29 @@ +// This is simply a script that tries serializing protocol buffer in text +// format. Nothing special here and no actual code is being tested. +#include + +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" + +#include "caffe/proto/caffe.pb.h" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +class ProtoTest : public ::testing::Test {}; + +TEST_F(ProtoTest, TestSerialization) { + LayerParameter param; + param.set_name("test"); + param.set_type("Test"); + std::cout << "Printing in binary format." << std::endl; + std::cout << param.SerializeAsString() << std::endl; + std::cout << "Printing in text format." << std::endl; + std::string str; + google::protobuf::TextFormat::PrintToString(param, &str); + std::cout << str << std::endl; + EXPECT_TRUE(true); +} + +} // namespace caffe diff --git a/src/caffe/test/test_random_number_generator.cpp b/src/caffe/test/test_random_number_generator.cpp new file mode 100755 index 0000000..98424c0 --- /dev/null +++ b/src/caffe/test/test_random_number_generator.cpp @@ -0,0 +1,521 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class RandomNumberGeneratorTest : public ::testing::Test { + protected: + RandomNumberGeneratorTest() + : mean_bound_multiplier_(3.8), // ~99.99% confidence for test failure. + sample_size_(10000), + seed_(1701), + data_(new SyncedMemory(sample_size_ * sizeof(Dtype))), + data_2_(new SyncedMemory(sample_size_ * sizeof(Dtype))), + int_data_(new SyncedMemory(sample_size_ * sizeof(int))), + int_data_2_(new SyncedMemory(sample_size_ * sizeof(int))) {} + + virtual void SetUp() { + Caffe::set_random_seed(this->seed_); + } + + Dtype sample_mean(const Dtype* const seqs, const int sample_size) { + Dtype sum = 0; + for (int i = 0; i < sample_size; ++i) { + sum += seqs[i]; + } + return sum / sample_size; + } + + Dtype sample_mean(const Dtype* const seqs) { + return sample_mean(seqs, sample_size_); + } + + Dtype sample_mean(const int* const seqs, const int sample_size) { + Dtype sum = 0; + for (int i = 0; i < sample_size; ++i) { + sum += Dtype(seqs[i]); + } + return sum / sample_size; + } + + Dtype sample_mean(const int* const seqs) { + return sample_mean(seqs, sample_size_); + } + + Dtype mean_bound(const Dtype std, const int sample_size) { + return mean_bound_multiplier_ * std / sqrt(static_cast(sample_size)); + } + + Dtype mean_bound(const Dtype std) { + return mean_bound(std, sample_size_); + } + + void RngGaussianFill(const Dtype mu, const Dtype sigma, void* cpu_data) { + Dtype* rng_data = static_cast(cpu_data); + caffe_rng_gaussian(sample_size_, mu, sigma, rng_data); + } + + void RngGaussianChecks(const Dtype mu, const Dtype sigma, + const void* cpu_data, const Dtype sparse_p = 0) { + const Dtype* rng_data = static_cast(cpu_data); + const Dtype true_mean = mu; + const Dtype true_std = sigma; + // Check that sample mean roughly matches true mean. + const Dtype bound = this->mean_bound(true_std); + const Dtype sample_mean = this->sample_mean( + static_cast(cpu_data)); + EXPECT_NEAR(sample_mean, true_mean, bound); + // Check that roughly half the samples are above the true mean. + int num_above_mean = 0; + int num_below_mean = 0; + int num_mean = 0; + int num_nan = 0; + for (int i = 0; i < sample_size_; ++i) { + if (rng_data[i] > true_mean) { + ++num_above_mean; + } else if (rng_data[i] < true_mean) { + ++num_below_mean; + } else if (rng_data[i] == true_mean) { + ++num_mean; + } else { + ++num_nan; + } + } + EXPECT_EQ(0, num_nan); + if (sparse_p == Dtype(0)) { + EXPECT_EQ(0, num_mean); + } + const Dtype sample_p_above_mean = + static_cast(num_above_mean) / sample_size_; + const Dtype bernoulli_p = (1 - sparse_p) * 0.5; + const Dtype bernoulli_std = sqrt(bernoulli_p * (1 - bernoulli_p)); + const Dtype bernoulli_bound = this->mean_bound(bernoulli_std); + EXPECT_NEAR(bernoulli_p, sample_p_above_mean, bernoulli_bound); + } + + void RngUniformFill(const Dtype lower, const Dtype upper, void* cpu_data) { + CHECK_GE(upper, lower); + Dtype* rng_data = static_cast(cpu_data); + caffe_rng_uniform(sample_size_, lower, upper, rng_data); + } + + void RngUniformChecks(const Dtype lower, const Dtype upper, + const void* cpu_data, const Dtype sparse_p = 0) { + const Dtype* rng_data = static_cast(cpu_data); + const Dtype true_mean = (lower + upper) / 2; + const Dtype true_std = (upper - lower) / sqrt(12); + // Check that sample mean roughly matches true mean. + const Dtype bound = this->mean_bound(true_std); + const Dtype sample_mean = this->sample_mean(rng_data); + EXPECT_NEAR(sample_mean, true_mean, bound); + // Check that roughly half the samples are above the true mean, and none are + // above upper or below lower. + int num_above_mean = 0; + int num_below_mean = 0; + int num_mean = 0; + int num_nan = 0; + int num_above_upper = 0; + int num_below_lower = 0; + for (int i = 0; i < sample_size_; ++i) { + if (rng_data[i] > true_mean) { + ++num_above_mean; + } else if (rng_data[i] < true_mean) { + ++num_below_mean; + } else if (rng_data[i] == true_mean) { + ++num_mean; + } else { + ++num_nan; + } + if (rng_data[i] > upper) { + ++num_above_upper; + } else if (rng_data[i] < lower) { + ++num_below_lower; + } + } + EXPECT_EQ(0, num_nan); + EXPECT_EQ(0, num_above_upper); + EXPECT_EQ(0, num_below_lower); + if (sparse_p == Dtype(0)) { + EXPECT_EQ(0, num_mean); + } + const Dtype sample_p_above_mean = + static_cast(num_above_mean) / sample_size_; + const Dtype bernoulli_p = (1 - sparse_p) * 0.5; + const Dtype bernoulli_std = sqrt(bernoulli_p * (1 - bernoulli_p)); + const Dtype bernoulli_bound = this->mean_bound(bernoulli_std); + EXPECT_NEAR(bernoulli_p, sample_p_above_mean, bernoulli_bound); + } + + void RngBernoulliFill(const Dtype p, void* cpu_data) { + int* rng_data = static_cast(cpu_data); + caffe_rng_bernoulli(sample_size_, p, rng_data); + } + + void RngBernoulliChecks(const Dtype p, const void* cpu_data) { + const int* rng_data = static_cast(cpu_data); + const Dtype true_mean = p; + const Dtype true_std = sqrt(p * (1 - p)); + const Dtype bound = this->mean_bound(true_std); + const Dtype sample_mean = this->sample_mean(rng_data); + EXPECT_NEAR(sample_mean, true_mean, bound); + } + +#ifndef CPU_ONLY + + void RngGaussianFillGPU(const Dtype mu, const Dtype sigma, void* gpu_data) { + Dtype* rng_data = static_cast(gpu_data); + caffe_gpu_rng_gaussian(sample_size_, mu, sigma, rng_data); + } + + void RngUniformFillGPU(const Dtype lower, const Dtype upper, void* gpu_data) { + CHECK_GE(upper, lower); + Dtype* rng_data = static_cast(gpu_data); + caffe_gpu_rng_uniform(sample_size_, lower, upper, rng_data); + } + + // Fills with uniform integers in [0, UINT_MAX] using 2 argument form of + // caffe_gpu_rng_uniform. + void RngUniformIntFillGPU(void* gpu_data) { + unsigned int* rng_data = static_cast(gpu_data); + caffe_gpu_rng_uniform(sample_size_, rng_data); + } + +#endif + + int num_above_mean; + int num_below_mean; + + Dtype mean_bound_multiplier_; + + size_t sample_size_; + uint32_t seed_; + + shared_ptr data_; + shared_ptr data_2_; + shared_ptr int_data_; + shared_ptr int_data_2_; +}; + +TYPED_TEST_CASE(RandomNumberGeneratorTest, TestDtypes); + +TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian) { + const TypeParam mu = 0; + const TypeParam sigma = 1; + void* gaussian_data = this->data_->mutable_cpu_data(); + this->RngGaussianFill(mu, sigma, gaussian_data); + this->RngGaussianChecks(mu, sigma, gaussian_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian2) { + const TypeParam mu = -2; + const TypeParam sigma = 3; + void* gaussian_data = this->data_->mutable_cpu_data(); + this->RngGaussianFill(mu, sigma, gaussian_data); + this->RngGaussianChecks(mu, sigma, gaussian_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform) { + const TypeParam lower = 0; + const TypeParam upper = 1; + void* uniform_data = this->data_->mutable_cpu_data(); + this->RngUniformFill(lower, upper, uniform_data); + this->RngUniformChecks(lower, upper, uniform_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform2) { + const TypeParam lower = -7.3; + const TypeParam upper = -2.3; + void* uniform_data = this->data_->mutable_cpu_data(); + this->RngUniformFill(lower, upper, uniform_data); + this->RngUniformChecks(lower, upper, uniform_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulli) { + const TypeParam p = 0.3; + void* bernoulli_data = this->int_data_->mutable_cpu_data(); + this->RngBernoulliFill(p, bernoulli_data); + this->RngBernoulliChecks(p, bernoulli_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulli2) { + const TypeParam p = 0.9; + void* bernoulli_data = this->int_data_->mutable_cpu_data(); + this->RngBernoulliFill(p, bernoulli_data); + this->RngBernoulliChecks(p, bernoulli_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesGaussian) { + const TypeParam mu = 0; + const TypeParam sigma = 1; + + // Sample from 0 mean Gaussian. + TypeParam* gaussian_data_1 = + static_cast(this->data_->mutable_cpu_data()); + this->RngGaussianFill(mu, sigma, gaussian_data_1); + + // Sample from 0 mean Gaussian again. + TypeParam* gaussian_data_2 = + static_cast(this->data_2_->mutable_cpu_data()); + this->RngGaussianFill(mu, sigma, gaussian_data_2); + + // Multiply Gaussians. + for (int i = 0; i < this->sample_size_; ++i) { + gaussian_data_1[i] *= gaussian_data_2[i]; + } + + // Check that result has mean 0. + TypeParam mu_product = pow(mu, 2); + TypeParam sigma_product = sqrt(pow(sigma, 2) / 2); + this->RngGaussianChecks(mu_product, sigma_product, gaussian_data_1); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesUniform) { + // Sample from Uniform on [-2, 2]. + const TypeParam lower_1 = -2; + const TypeParam upper_1 = -lower_1; + TypeParam* uniform_data_1 = + static_cast(this->data_->mutable_cpu_data()); + this->RngUniformFill(lower_1, upper_1, uniform_data_1); + + // Sample from Uniform on [-3, 3]. + const TypeParam lower_2 = -3; + const TypeParam upper_2 = -lower_2; + TypeParam* uniform_data_2 = + static_cast(this->data_2_->mutable_cpu_data()); + this->RngUniformFill(lower_2, upper_2, uniform_data_2); + + // Multiply Uniforms. + for (int i = 0; i < this->sample_size_; ++i) { + uniform_data_1[i] *= uniform_data_2[i]; + } + + // Check that result does not violate checked properties of Uniform on [-6, 6] + // (though it is not actually uniformly distributed). + const TypeParam lower_prod = lower_1 * upper_2; + const TypeParam upper_prod = -lower_prod; + this->RngUniformChecks(lower_prod, upper_prod, uniform_data_1); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesBernoulli) { + // Sample from 0 mean Gaussian. + const TypeParam mu = 0; + const TypeParam sigma = 1; + TypeParam* gaussian_data = + static_cast(this->data_->mutable_cpu_data()); + this->RngGaussianFill(mu, sigma, gaussian_data); + + // Sample from Bernoulli with p = 0.3. + const TypeParam bernoulli_p = 0.3; + int* bernoulli_data = + static_cast(this->int_data_->mutable_cpu_data()); + this->RngBernoulliFill(bernoulli_p, bernoulli_data); + + // Multiply Gaussian by Bernoulli. + for (int i = 0; i < this->sample_size_; ++i) { + gaussian_data[i] *= bernoulli_data[i]; + } + + // Check that result does not violate checked properties of sparsified + // Gaussian (though it is not actually a Gaussian). + this->RngGaussianChecks(mu, sigma, gaussian_data, 1 - bernoulli_p); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesBernoulli) { + // Sample from Uniform on [-1, 1]. + const TypeParam lower = -1; + const TypeParam upper = 1; + TypeParam* uniform_data = + static_cast(this->data_->mutable_cpu_data()); + this->RngUniformFill(lower, upper, uniform_data); + + // Sample from Bernoulli with p = 0.3. + const TypeParam bernoulli_p = 0.3; + int* bernoulli_data = + static_cast(this->int_data_->mutable_cpu_data()); + this->RngBernoulliFill(bernoulli_p, bernoulli_data); + + // Multiply Uniform by Bernoulli. + for (int i = 0; i < this->sample_size_; ++i) { + uniform_data[i] *= bernoulli_data[i]; + } + + // Check that result does not violate checked properties of sparsified + // Uniform on [-1, 1] (though it is not actually uniformly distributed). + this->RngUniformChecks(lower, upper, uniform_data, 1 - bernoulli_p); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulliTimesBernoulli) { + // Sample from Bernoulli with p = 0.5. + const TypeParam p_a = 0.5; + int* bernoulli_data_a = + static_cast(this->int_data_->mutable_cpu_data()); + this->RngBernoulliFill(p_a, bernoulli_data_a); + + // Sample from Bernoulli with p = 0.3. + const TypeParam p_b = 0.3; + int* bernoulli_data_b = + static_cast(this->int_data_2_->mutable_cpu_data()); + this->RngBernoulliFill(p_b, bernoulli_data_b); + + // Multiply Bernoullis. + for (int i = 0; i < this->sample_size_; ++i) { + bernoulli_data_a[i] *= bernoulli_data_b[i]; + } + int num_ones = 0; + for (int i = 0; i < this->sample_size_; ++i) { + if (bernoulli_data_a[i] != TypeParam(0)) { + EXPECT_EQ(TypeParam(1), bernoulli_data_a[i]); + ++num_ones; + } + } + + // Check that resulting product has roughly p_a * p_b ones. + const TypeParam sample_p = this->sample_mean(bernoulli_data_a); + const TypeParam true_mean = p_a * p_b; + const TypeParam true_std = sqrt(true_mean * (1 - true_mean)); + const TypeParam bound = this->mean_bound(true_std); + EXPECT_NEAR(true_mean, sample_p, bound); +} + +#ifndef CPU_ONLY + +TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianGPU) { + const TypeParam mu = 0; + const TypeParam sigma = 1; + void* gaussian_gpu_data = this->data_->mutable_gpu_data(); + this->RngGaussianFillGPU(mu, sigma, gaussian_gpu_data); + const void* gaussian_data = this->data_->cpu_data(); + this->RngGaussianChecks(mu, sigma, gaussian_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian2GPU) { + const TypeParam mu = -2; + const TypeParam sigma = 3; + void* gaussian_gpu_data = this->data_->mutable_gpu_data(); + this->RngGaussianFillGPU(mu, sigma, gaussian_gpu_data); + const void* gaussian_data = this->data_->cpu_data(); + this->RngGaussianChecks(mu, sigma, gaussian_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformGPU) { + const TypeParam lower = 0; + const TypeParam upper = 1; + void* uniform_gpu_data = this->data_->mutable_gpu_data(); + this->RngUniformFillGPU(lower, upper, uniform_gpu_data); + const void* uniform_data = this->data_->cpu_data(); + this->RngUniformChecks(lower, upper, uniform_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform2GPU) { + const TypeParam lower = -7.3; + const TypeParam upper = -2.3; + void* uniform_gpu_data = this->data_->mutable_gpu_data(); + this->RngUniformFillGPU(lower, upper, uniform_gpu_data); + const void* uniform_data = this->data_->cpu_data(); + this->RngUniformChecks(lower, upper, uniform_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformIntGPU) { + unsigned int* uniform_uint_gpu_data = + static_cast(this->int_data_->mutable_gpu_data()); + this->RngUniformIntFillGPU(uniform_uint_gpu_data); + const unsigned int* uniform_uint_data = + static_cast(this->int_data_->cpu_data()); + TypeParam* uniform_data = + static_cast(this->data_->mutable_cpu_data()); + for (int i = 0; i < this->sample_size_; ++i) { + uniform_data[i] = static_cast(uniform_uint_data[i]); + } + const TypeParam lower = 0; + const TypeParam upper = UINT_MAX; + this->RngUniformChecks(lower, upper, uniform_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesGaussianGPU) { + const TypeParam mu = 0; + const TypeParam sigma = 1; + + // Sample from 0 mean Gaussian. + TypeParam* gaussian_gpu_data_1 = + static_cast(this->data_->mutable_gpu_data()); + this->RngGaussianFillGPU(mu, sigma, gaussian_gpu_data_1); + + // Sample from 0 mean Gaussian again. + TypeParam* gaussian_gpu_data_2 = + static_cast(this->data_2_->mutable_gpu_data()); + this->RngGaussianFillGPU(mu, sigma, gaussian_gpu_data_2); + + // Multiply Gaussians. + TypeParam* gaussian_data_1 = + static_cast(this->data_->mutable_cpu_data()); + const TypeParam* gaussian_data_2 = + static_cast(this->data_2_->cpu_data()); + for (int i = 0; i < this->sample_size_; ++i) { + gaussian_data_1[i] *= gaussian_data_2[i]; + } + + // Check that result does not violate checked properties of Gaussian + // (though it is not actually a Gaussian). + TypeParam mu_product = pow(mu, 2); + TypeParam sigma_product = sqrt(pow(sigma, 2) / 2); + this->RngGaussianChecks(mu_product, sigma_product, gaussian_data_1); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesUniformGPU) { + // Sample from Uniform on [-2, 2]. + const TypeParam lower_1 = -2; + const TypeParam upper_1 = -lower_1; + TypeParam* uniform_gpu_data_1 = + static_cast(this->data_->mutable_gpu_data()); + this->RngUniformFillGPU(lower_1, upper_1, uniform_gpu_data_1); + + // Sample from Uniform on [-3, 3]. + const TypeParam lower_2 = -3; + const TypeParam upper_2 = -lower_2; + TypeParam* uniform_gpu_data_2 = + static_cast(this->data_2_->mutable_gpu_data()); + this->RngUniformFillGPU(lower_2, upper_2, uniform_gpu_data_2); + + // Multiply Uniforms. + TypeParam* uniform_data_1 = + static_cast(this->data_->mutable_cpu_data()); + const TypeParam* uniform_data_2 = + static_cast(this->data_2_->cpu_data()); + for (int i = 0; i < this->sample_size_; ++i) { + uniform_data_1[i] *= uniform_data_2[i]; + } + + // Check that result does not violate properties of Uniform on [-7, -3]. + const TypeParam lower_prod = lower_1 * upper_2; + const TypeParam upper_prod = -lower_prod; + this->RngUniformChecks(lower_prod, upper_prod, uniform_data_1); +} + +#endif + +} // namespace caffe diff --git a/src/caffe/test/test_reduction_layer.cpp b/src/caffe/test/test_reduction_layer.cpp new file mode 100755 index 0000000..f568a18 --- /dev/null +++ b/src/caffe/test/test_reduction_layer.cpp @@ -0,0 +1,297 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class ReductionLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + ReductionLayerTest() + : blob_bottom_(new Blob(2, 3, 4, 5)), + blob_top_(new Blob()) { + // fill the values + Caffe::set_random_seed(1701); + FillerParameter filler_param; + UniformFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~ReductionLayerTest() { + delete blob_bottom_; + delete blob_top_; + } + + void TestForward(ReductionParameter_ReductionOp op, + float coeff = 1, int axis = 0) { + LayerParameter layer_param; + ReductionParameter* reduction_param = layer_param.mutable_reduction_param(); + reduction_param->set_operation(op); + if (coeff != 1.0) { reduction_param->set_coeff(coeff); } + if (axis != 0) { reduction_param->set_axis(axis); } + shared_ptr > layer( + new ReductionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* in_data = this->blob_bottom_->cpu_data(); + const int num = this->blob_bottom_->count(0, axis); + const int dim = this->blob_bottom_->count(axis); + for (int n = 0; n < num; ++n) { + Dtype expected_result = 0; + for (int d = 0; d < dim; ++d) { + switch (op) { + case ReductionParameter_ReductionOp_SUM: + expected_result += *in_data; + break; + case ReductionParameter_ReductionOp_MEAN: + expected_result += *in_data / dim; + break; + case ReductionParameter_ReductionOp_ASUM: + expected_result += fabs(*in_data); + break; + case ReductionParameter_ReductionOp_SUMSQ: + expected_result += (*in_data) * (*in_data); + break; + default: + LOG(FATAL) << "Unknown reduction op: " + << ReductionParameter_ReductionOp_Name(op); + } + ++in_data; + } + expected_result *= coeff; + const Dtype computed_result = this->blob_top_->cpu_data()[n]; + EXPECT_FLOAT_EQ(expected_result, computed_result) + << "Incorrect result computed with op " + << ReductionParameter_ReductionOp_Name(op) << ", coeff " << coeff; + } + } + + void TestGradient(ReductionParameter_ReductionOp op, + float coeff = 1, int axis = 0) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ReductionParameter* reduction_param = layer_param.mutable_reduction_param(); + reduction_param->set_operation(op); + reduction_param->set_coeff(coeff); + reduction_param->set_axis(axis); + ReductionLayer layer(layer_param); + GradientChecker checker(1e-2, 2e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(ReductionLayerTest, TestDtypesAndDevices); + +TYPED_TEST(ReductionLayerTest, TestSetUp) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + shared_ptr > layer( + new ReductionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 0); +} + +TYPED_TEST(ReductionLayerTest, TestSetUpWithAxis1) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_reduction_param()->set_axis(1); + shared_ptr > layer( + new ReductionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 1); + EXPECT_EQ(this->blob_top_->shape(0), 2); +} + +TYPED_TEST(ReductionLayerTest, TestSetUpWithAxis2) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_reduction_param()->set_axis(2); + shared_ptr > layer( + new ReductionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 2); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3); +} + +TYPED_TEST(ReductionLayerTest, TestSum) { + const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUM; + this->TestForward(kOp); +} + +TYPED_TEST(ReductionLayerTest, TestSumCoeff) { + const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUM; + const float kCoeff = 2.3; + this->TestForward(kOp, kCoeff); +} + +TYPED_TEST(ReductionLayerTest, TestSumCoeffAxis1) { + const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUM; + const float kCoeff = 2.3; + const int kAxis = 1; + this->TestForward(kOp, kCoeff, kAxis); +} + +TYPED_TEST(ReductionLayerTest, TestSumGradient) { + const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUM; + this->TestGradient(kOp); +} + +TYPED_TEST(ReductionLayerTest, TestSumCoeffGradient) { + const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUM; + const float kCoeff = 2.3; + this->TestGradient(kOp, kCoeff); +} + +TYPED_TEST(ReductionLayerTest, TestSumCoeffAxis1Gradient) { + const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUM; + const float kCoeff = 2.3; + const int kAxis = 1; + this->TestGradient(kOp, kCoeff, kAxis); +} + +TYPED_TEST(ReductionLayerTest, TestMean) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_MEAN; + this->TestForward(kOp); +} + +TYPED_TEST(ReductionLayerTest, TestMeanCoeff) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_MEAN; + const float kCoeff = 2.3; + this->TestForward(kOp, kCoeff); +} + +TYPED_TEST(ReductionLayerTest, TestMeanCoeffAxis1) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_MEAN; + const float kCoeff = 2.3; + const int kAxis = 1; + this->TestForward(kOp, kCoeff, kAxis); +} + +TYPED_TEST(ReductionLayerTest, TestMeanGradient) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_MEAN; + this->TestGradient(kOp); +} + +TYPED_TEST(ReductionLayerTest, TestMeanCoeffGradient) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_MEAN; + const float kCoeff = 2.3; + this->TestGradient(kOp, kCoeff); +} + +TYPED_TEST(ReductionLayerTest, TestMeanCoeffGradientAxis1) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_MEAN; + const float kCoeff = 2.3; + const int kAxis = 1; + this->TestGradient(kOp, kCoeff, kAxis); +} + +TYPED_TEST(ReductionLayerTest, TestAbsSum) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_ASUM; + this->TestForward(kOp); +} + +TYPED_TEST(ReductionLayerTest, TestAbsSumCoeff) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_ASUM; + const float kCoeff = 2.3; + this->TestForward(kOp, kCoeff); +} + +TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffAxis1) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_ASUM; + const float kCoeff = 2.3; + const int kAxis = 1; + this->TestForward(kOp, kCoeff, kAxis); +} + +TYPED_TEST(ReductionLayerTest, TestAbsSumGradient) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_ASUM; + this->TestGradient(kOp); +} + +TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffGradient) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_ASUM; + const float kCoeff = 2.3; + this->TestGradient(kOp, kCoeff); +} + +TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffAxis1Gradient) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_ASUM; + const float kCoeff = 2.3; + const int kAxis = 1; + this->TestGradient(kOp, kCoeff, kAxis); +} + +TYPED_TEST(ReductionLayerTest, TestSumOfSquares) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_SUMSQ; + this->TestForward(kOp); +} + +TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeff) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_SUMSQ; + const float kCoeff = 2.3; + this->TestForward(kOp, kCoeff); +} + +TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffAxis1) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_SUMSQ; + const float kCoeff = 2.3; + const int kAxis = 1; + this->TestForward(kOp, kCoeff, kAxis); +} + +TYPED_TEST(ReductionLayerTest, TestSumOfSquaresGradient) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_SUMSQ; + this->TestGradient(kOp); +} + +TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffGradient) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_SUMSQ; + const float kCoeff = 2.3; + this->TestGradient(kOp, kCoeff); +} + +TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffAxis1Gradient) { + const ReductionParameter_ReductionOp kOp = + ReductionParameter_ReductionOp_SUMSQ; + const float kCoeff = 2.3; + const int kAxis = 1; + this->TestGradient(kOp, kCoeff, kAxis); +} + +} // namespace caffe diff --git a/src/caffe/test/test_reshape_layer.cpp b/src/caffe/test/test_reshape_layer.cpp new file mode 100755 index 0000000..9d08ec6 --- /dev/null +++ b/src/caffe/test/test_reshape_layer.cpp @@ -0,0 +1,280 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/common_layers.hpp" +#include "caffe/filler.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class ReshapeLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: + ReshapeLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 5)), + blob_top_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~ReshapeLayerTest() { delete blob_bottom_; delete blob_top_; } + + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(ReshapeLayerTest, TestDtypesAndDevices); + +TYPED_TEST(ReshapeLayerTest, TestFlattenOutputSizes) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + BlobShape* blob_shape = layer_param.mutable_reshape_param()->mutable_shape(); + blob_shape->add_dim(0); + blob_shape->add_dim(-1); + blob_shape->add_dim(1); + blob_shape->add_dim(1); + + ReshapeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3 * 6 * 5); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(ReshapeLayerTest, TestFlattenValues) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + BlobShape* blob_shape = layer_param.mutable_reshape_param()->mutable_shape(); + blob_shape->add_dim(0); + blob_shape->add_dim(-1); + blob_shape->add_dim(1); + blob_shape->add_dim(1); + ReshapeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int c = 0; c < 3 * 6 * 5; ++c) { + EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0), + this->blob_bottom_->data_at(0, c / (6 * 5), (c / 5) % 6, c % 5)); + EXPECT_EQ(this->blob_top_->data_at(1, c, 0, 0), + this->blob_bottom_->data_at(1, c / (6 * 5), (c / 5) % 6, c % 5)); + } +} + +// Test whether setting output dimensions to 0 either explicitly or implicitly +// copies the respective dimension of the input layer. +TYPED_TEST(ReshapeLayerTest, TestCopyDimensions) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + BlobShape* blob_shape = layer_param.mutable_reshape_param()->mutable_shape(); + blob_shape->add_dim(0); + blob_shape->add_dim(0); + blob_shape->add_dim(0); + blob_shape->add_dim(0); + ReshapeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 6); + EXPECT_EQ(this->blob_top_->width(), 5); +} + +// When a dimension is set to -1, we should infer its value from the other +// dimensions (including those that get copied from below). +TYPED_TEST(ReshapeLayerTest, TestInferenceOfUnspecified) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + BlobShape* blob_shape = layer_param.mutable_reshape_param()->mutable_shape(); + blob_shape->add_dim(0); + blob_shape->add_dim(3); + blob_shape->add_dim(10); + blob_shape->add_dim(-1); + + // Count is 180, thus height should be 180 / (2*3*10) = 3. + + ReshapeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 10); + EXPECT_EQ(this->blob_top_->width(), 3); +} + +TYPED_TEST(ReshapeLayerTest, TestInferenceOfUnspecifiedWithStartAxis) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_reshape_param()->set_axis(1); + BlobShape* blob_shape = layer_param.mutable_reshape_param()->mutable_shape(); + blob_shape->add_dim(3); + blob_shape->add_dim(10); + blob_shape->add_dim(-1); + + ReshapeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + ASSERT_EQ(this->blob_top_->num_axes(), 4); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 10); + EXPECT_EQ(this->blob_top_->width(), 3); +} + +TYPED_TEST(ReshapeLayerTest, TestInsertSingletonAxesStart) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_reshape_param()->set_axis(0); + layer_param.mutable_reshape_param()->set_num_axes(0); + BlobShape* blob_shape = layer_param.mutable_reshape_param()->mutable_shape(); + blob_shape->add_dim(1); + blob_shape->add_dim(1); + blob_shape->add_dim(1); + + ReshapeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + ASSERT_EQ(this->blob_top_->num_axes(), 7); + EXPECT_EQ(this->blob_top_->shape(0), 1); + EXPECT_EQ(this->blob_top_->shape(1), 1); + EXPECT_EQ(this->blob_top_->shape(2), 1); + EXPECT_EQ(this->blob_top_->shape(3), 2); + EXPECT_EQ(this->blob_top_->shape(4), 3); + EXPECT_EQ(this->blob_top_->shape(5), 6); + EXPECT_EQ(this->blob_top_->shape(6), 5); +} + +TYPED_TEST(ReshapeLayerTest, TestInsertSingletonAxesMiddle) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_reshape_param()->set_axis(2); + layer_param.mutable_reshape_param()->set_num_axes(0); + BlobShape* blob_shape = layer_param.mutable_reshape_param()->mutable_shape(); + blob_shape->add_dim(1); + blob_shape->add_dim(1); + blob_shape->add_dim(1); + + ReshapeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + ASSERT_EQ(this->blob_top_->num_axes(), 7); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3); + EXPECT_EQ(this->blob_top_->shape(2), 1); + EXPECT_EQ(this->blob_top_->shape(3), 1); + EXPECT_EQ(this->blob_top_->shape(4), 1); + EXPECT_EQ(this->blob_top_->shape(5), 6); + EXPECT_EQ(this->blob_top_->shape(6), 5); +} + +TYPED_TEST(ReshapeLayerTest, TestInsertSingletonAxesEnd) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_reshape_param()->set_axis(-1); + layer_param.mutable_reshape_param()->set_num_axes(0); + BlobShape* blob_shape = layer_param.mutable_reshape_param()->mutable_shape(); + blob_shape->add_dim(1); + blob_shape->add_dim(1); + blob_shape->add_dim(1); + + ReshapeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + ASSERT_EQ(this->blob_top_->num_axes(), 7); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3); + EXPECT_EQ(this->blob_top_->shape(2), 6); + EXPECT_EQ(this->blob_top_->shape(3), 5); + EXPECT_EQ(this->blob_top_->shape(4), 1); + EXPECT_EQ(this->blob_top_->shape(5), 1); + EXPECT_EQ(this->blob_top_->shape(6), 1); +} + +TYPED_TEST(ReshapeLayerTest, TestFlattenMiddle) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_reshape_param()->set_axis(1); + layer_param.mutable_reshape_param()->set_num_axes(2); + BlobShape* blob_shape = layer_param.mutable_reshape_param()->mutable_shape(); + blob_shape->add_dim(-1); + + ReshapeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + ASSERT_EQ(this->blob_top_->num_axes(), 3); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3 * 6); + EXPECT_EQ(this->blob_top_->shape(2), 5); +} + +TYPED_TEST(ReshapeLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + BlobShape* shape = layer_param.mutable_reshape_param()->mutable_shape(); + shape->add_dim(6); + shape->add_dim(2); + shape->add_dim(3); + shape->add_dim(5); + ReshapeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_EQ(this->blob_top_->cpu_data()[i], + this->blob_bottom_->cpu_data()[i]); + } +} + +TYPED_TEST(ReshapeLayerTest, TestForwardAfterReshape) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + BlobShape* shape = layer_param.mutable_reshape_param()->mutable_shape(); + shape->add_dim(6); + shape->add_dim(2); + shape->add_dim(3); + shape->add_dim(5); + ReshapeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // We know the above produced the correct result from TestForward. + // Reshape the bottom and call layer.Reshape, then try again. + vector new_bottom_shape(1, 2 * 3 * 6 * 5); + this->blob_bottom_->Reshape(new_bottom_shape); + layer.Reshape(this->blob_bottom_vec_, this->blob_top_vec_); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_EQ(this->blob_top_->cpu_data()[i], + this->blob_bottom_->cpu_data()[i]); + } +} + +TYPED_TEST(ReshapeLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + BlobShape* shape = layer_param.mutable_reshape_param()->mutable_shape(); + shape->add_dim(6); + shape->add_dim(2); + shape->add_dim(3); + shape->add_dim(5); + ReshapeLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} // namespace caffe diff --git a/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp new file mode 100755 index 0000000..e5737e4 --- /dev/null +++ b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp @@ -0,0 +1,122 @@ +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class SigmoidCrossEntropyLossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + SigmoidCrossEntropyLossLayerTest() + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_targets_(new Blob(10, 5, 1, 1)), + blob_top_loss_(new Blob()) { + // Fill the data vector + FillerParameter data_filler_param; + data_filler_param.set_std(1); + GaussianFiller data_filler(data_filler_param); + data_filler.Fill(blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); + // Fill the targets vector + FillerParameter targets_filler_param; + targets_filler_param.set_min(0); + targets_filler_param.set_max(1); + UniformFiller targets_filler(targets_filler_param); + targets_filler.Fill(blob_bottom_targets_); + blob_bottom_vec_.push_back(blob_bottom_targets_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~SigmoidCrossEntropyLossLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_targets_; + delete blob_top_loss_; + } + + Dtype SigmoidCrossEntropyLossReference(const int count, const int num, + const Dtype* input, + const Dtype* target) { + Dtype loss = 0; + for (int i = 0; i < count; ++i) { + const Dtype prediction = 1 / (1 + exp(-input[i])); + EXPECT_LE(prediction, 1); + EXPECT_GE(prediction, 0); + EXPECT_LE(target[i], 1); + EXPECT_GE(target[i], 0); + loss -= target[i] * log(prediction + (target[i] == Dtype(0))); + loss -= (1 - target[i]) * log(1 - prediction + (target[i] == Dtype(1))); + } + return loss / num; + } + + void TestForward() { + LayerParameter layer_param; + const Dtype kLossWeight = 3.7; + layer_param.add_loss_weight(kLossWeight); + FillerParameter data_filler_param; + data_filler_param.set_std(1); + GaussianFiller data_filler(data_filler_param); + FillerParameter targets_filler_param; + targets_filler_param.set_min(0.0); + targets_filler_param.set_max(1.0); + UniformFiller targets_filler(targets_filler_param); + Dtype eps = 2e-2; + for (int i = 0; i < 100; ++i) { + // Fill the data vector + data_filler.Fill(this->blob_bottom_data_); + // Fill the targets vector + targets_filler.Fill(this->blob_bottom_targets_); + SigmoidCrossEntropyLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + Dtype layer_loss = + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const int count = this->blob_bottom_data_->count(); + const int num = this->blob_bottom_data_->num(); + const Dtype* blob_bottom_data = this->blob_bottom_data_->cpu_data(); + const Dtype* blob_bottom_targets = + this->blob_bottom_targets_->cpu_data(); + Dtype reference_loss = kLossWeight * SigmoidCrossEntropyLossReference( + count, num, blob_bottom_data, blob_bottom_targets); + EXPECT_NEAR(reference_loss, layer_loss, eps) << "debug: trial #" << i; + } + } + + Blob* const blob_bottom_data_; + Blob* const blob_bottom_targets_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(SigmoidCrossEntropyLossLayerTest, TestDtypesAndDevices); + +TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestSigmoidCrossEntropyLoss) { + this->TestForward(); +} + +TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + const Dtype kLossWeight = 3.7; + layer_param.add_loss_weight(kLossWeight); + SigmoidCrossEntropyLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); +} + + +} // namespace caffe diff --git a/src/caffe/test/test_slice_layer.cpp b/src/caffe/test/test_slice_layer.cpp new file mode 100755 index 0000000..ccd0364 --- /dev/null +++ b/src/caffe/test/test_slice_layer.cpp @@ -0,0 +1,189 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class SliceLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + SliceLayerTest() + : blob_bottom_(new Blob(6, 12, 2, 3)), + blob_top_0_(new Blob()), + blob_top_1_(new Blob()), + blob_top_2_(new Blob()) {} + virtual void SetUp() { + // fill the values + Caffe::set_random_seed(1701); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_top_vec_0_.push_back(blob_top_0_); + blob_top_vec_0_.push_back(blob_top_1_); + blob_top_vec_1_.push_back(blob_top_0_); + blob_top_vec_1_.push_back(blob_top_1_); + blob_top_vec_1_.push_back(blob_top_2_); + blob_bottom_vec_.push_back(blob_bottom_); + } + + virtual void ReduceBottomBlobSize() { + blob_bottom_->Reshape(4, 5, 2, 2); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + } + + virtual ~SliceLayerTest() { + delete blob_top_0_; delete blob_top_1_; + delete blob_top_2_; delete blob_bottom_; + } + + Blob* const blob_bottom_; + Blob* const blob_top_0_; + Blob* const blob_top_1_; + Blob* const blob_top_2_; + vector*> blob_top_vec_0_, blob_top_vec_1_; + vector*> blob_bottom_vec_; +}; + +TYPED_TEST_CASE(SliceLayerTest, TestDtypesAndDevices); + +TYPED_TEST(SliceLayerTest, TestSetupNum) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_slice_param()->set_axis(0); + SliceLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_1_); + EXPECT_EQ(this->blob_bottom_->num(), 3 * this->blob_top_0_->num()); + EXPECT_EQ(this->blob_top_0_->num(), this->blob_top_1_->num()); + EXPECT_EQ(this->blob_top_0_->num(), this->blob_top_2_->num()); + EXPECT_EQ(this->blob_bottom_->channels(), this->blob_top_0_->channels()); + EXPECT_EQ(this->blob_bottom_->height(), this->blob_top_0_->height()); + EXPECT_EQ(this->blob_bottom_->width(), this->blob_top_0_->width()); +} + +TYPED_TEST(SliceLayerTest, TestSetupChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_slice_param()->add_slice_point(3); + SliceLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_0_); + EXPECT_EQ(this->blob_top_0_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_0_->channels(), 3); + EXPECT_EQ(this->blob_top_1_->channels(), 9); + EXPECT_EQ(this->blob_bottom_->channels(), + this->blob_top_0_->channels() + this->blob_top_1_->channels()); + EXPECT_EQ(this->blob_bottom_->height(), this->blob_top_0_->height()); + EXPECT_EQ(this->blob_bottom_->width(), this->blob_top_0_->width()); +} + +TYPED_TEST(SliceLayerTest, TestSliceAcrossNum) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_slice_param()->set_axis(0); + SliceLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_0_); + const int top_num = this->blob_bottom_->num() / 2; + ASSERT_EQ(top_num, this->blob_top_0_->num()); + ASSERT_EQ(top_num, this->blob_top_1_->num()); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_0_); + for (int n = 0; n < top_num; ++n) { + for (int c = 0; c < this->blob_top_0_->channels(); ++c) { + for (int h = 0; h < this->blob_bottom_->height(); ++h) { + for (int w = 0; w < this->blob_bottom_->width(); ++w) { + EXPECT_EQ(this->blob_bottom_->data_at(n, c, h, w), + this->blob_top_0_->data_at(n, c, h, w)); + } + } + } + for (int c = 0; c < this->blob_top_1_->channels(); ++c) { + for (int h = 0; h < this->blob_bottom_->height(); ++h) { + for (int w = 0; w < this->blob_bottom_->width(); ++w) { + EXPECT_EQ(this->blob_bottom_->data_at(n + 3, c, h, w), + this->blob_top_1_->data_at(n, c, h, w)); + } + } + } + } +} + +TYPED_TEST(SliceLayerTest, TestSliceAcrossChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + // Slice at 2, 8: should produce output blobs with #channels 2, 6, 4. + const int kSlicePoint0 = 2; + const int kSlicePoint1 = 8; + layer_param.mutable_slice_param()->add_slice_point(kSlicePoint0); + layer_param.mutable_slice_param()->add_slice_point(kSlicePoint1); + SliceLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_1_); + ASSERT_EQ(kSlicePoint0, this->blob_top_0_->channels()); + ASSERT_EQ(kSlicePoint1 - kSlicePoint0, this->blob_top_1_->channels()); + ASSERT_EQ(this->blob_bottom_->channels() - kSlicePoint1, + this->blob_top_2_->channels()); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_1_); + for (int n = 0; n < this->blob_bottom_->num(); ++n) { + for (int c = 0; c < this->blob_top_0_->channels(); ++c) { + for (int h = 0; h < this->blob_bottom_->height(); ++h) { + for (int w = 0; w < this->blob_bottom_->width(); ++w) { + EXPECT_EQ(this->blob_bottom_->data_at(n, c, h, w), + this->blob_top_0_->data_at(n, c, h, w)); + } + } + } + for (int c = 0; c < this->blob_top_1_->channels(); ++c) { + for (int h = 0; h < this->blob_bottom_->height(); ++h) { + for (int w = 0; w < this->blob_bottom_->width(); ++w) { + EXPECT_EQ(this->blob_bottom_->data_at(n, c + kSlicePoint0, h, w), + this->blob_top_1_->data_at(n, c, h, w)); + } + } + } + for (int c = 0; c < this->blob_top_2_->channels(); ++c) { + for (int h = 0; h < this->blob_bottom_->height(); ++h) { + for (int w = 0; w < this->blob_bottom_->width(); ++w) { + EXPECT_EQ(this->blob_bottom_->data_at(n, c + kSlicePoint1, h, w), + this->blob_top_2_->data_at(n, c, h, w)); + } + } + } + } +} + +TYPED_TEST(SliceLayerTest, TestGradientAcrossNum) { + typedef typename TypeParam::Dtype Dtype; + // Gradient checks are slow; reduce blob size. + this->ReduceBottomBlobSize(); + LayerParameter layer_param; + layer_param.mutable_slice_param()->set_axis(0); + SliceLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_0_); +} + +TYPED_TEST(SliceLayerTest, TestGradientAcrossChannels) { + typedef typename TypeParam::Dtype Dtype; + // Gradient checks are slow; reduce blob size. + this->ReduceBottomBlobSize(); + LayerParameter layer_param; + const int kSlicePoint = 4; + layer_param.mutable_slice_param()->add_slice_point(kSlicePoint); + SliceLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_0_); +} + +} // namespace caffe diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp new file mode 100755 index 0000000..996da4b --- /dev/null +++ b/src/caffe/test/test_softmax_layer.cpp @@ -0,0 +1,149 @@ +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class SoftmaxLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: + SoftmaxLayerTest() + : blob_bottom_(new Blob(2, 10, 2, 3)), + blob_top_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~SoftmaxLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(SoftmaxLayerTest, TestDtypesAndDevices); + +TYPED_TEST(SoftmaxLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + SoftmaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Test sum + for (int i = 0; i < this->blob_bottom_->num(); ++i) { + for (int k = 0; k < this->blob_bottom_->height(); ++k) { + for (int l = 0; l < this->blob_bottom_->width(); ++l) { + Dtype sum = 0; + for (int j = 0; j < this->blob_top_->channels(); ++j) { + sum += this->blob_top_->data_at(i, j, k, l); + } + EXPECT_GE(sum, 0.999); + EXPECT_LE(sum, 1.001); + // Test exact values + Dtype scale = 0; + for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + scale += exp(this->blob_bottom_->data_at(i, j, k, l)); + } + for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, + exp(this->blob_bottom_->data_at(i, j, k, l)) / scale) + << "debug: " << i << " " << j; + EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4, + exp(this->blob_bottom_->data_at(i, j, k, l)) / scale) + << "debug: " << i << " " << j; + } + } + } + } +} + +TYPED_TEST(SoftmaxLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + SoftmaxLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +#ifdef USE_CUDNN +template +class CuDNNSoftmaxLayerTest : public GPUDeviceTest { + protected: + CuDNNSoftmaxLayerTest() + : blob_bottom_(new Blob(2, 10, 2, 3)), + blob_top_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~CuDNNSoftmaxLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(CuDNNSoftmaxLayerTest, TestDtypes); + +TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { + LayerParameter layer_param; + CuDNNSoftmaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Test sum + for (int i = 0; i < this->blob_bottom_->num(); ++i) { + for (int k = 0; k < this->blob_bottom_->height(); ++k) { + for (int l = 0; l < this->blob_bottom_->width(); ++l) { + TypeParam sum = 0; + for (int j = 0; j < this->blob_top_->channels(); ++j) { + sum += this->blob_top_->data_at(i, j, k, l); + } + EXPECT_GE(sum, 0.999); + EXPECT_LE(sum, 1.001); + // Test exact values + TypeParam scale = 0; + for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + scale += exp(this->blob_bottom_->data_at(i, j, k, l)); + } + for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, + exp(this->blob_bottom_->data_at(i, j, k, l)) / scale) + << "debug: " << i << " " << j; + EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4, + exp(this->blob_bottom_->data_at(i, j, k, l)) / scale) + << "debug: " << i << " " << j; + } + } + } + } +} + +TYPED_TEST(CuDNNSoftmaxLayerTest, TestGradientCuDNN) { + LayerParameter layer_param; + CuDNNSoftmaxLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +#endif + +} // namespace caffe diff --git a/src/caffe/test/test_softmax_with_loss_layer.cpp b/src/caffe/test/test_softmax_with_loss_layer.cpp new file mode 100755 index 0000000..1498d5c --- /dev/null +++ b/src/caffe/test/test_softmax_with_loss_layer.cpp @@ -0,0 +1,110 @@ +#include +#include +#include +#include + +#include "boost/scoped_ptr.hpp" +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +using boost::scoped_ptr; + +namespace caffe { + +template +class SoftmaxWithLossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + SoftmaxWithLossLayerTest() + : blob_bottom_data_(new Blob(10, 5, 2, 3)), + blob_bottom_label_(new Blob(10, 1, 2, 3)), + blob_top_loss_(new Blob()) { + // fill the values + FillerParameter filler_param; + filler_param.set_std(10); + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); + for (int i = 0; i < blob_bottom_label_->count(); ++i) { + blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; + } + blob_bottom_vec_.push_back(blob_bottom_label_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~SoftmaxWithLossLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_label_; + delete blob_top_loss_; + } + Blob* const blob_bottom_data_; + Blob* const blob_bottom_label_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(SoftmaxWithLossLayerTest, TestDtypesAndDevices); + +TYPED_TEST(SoftmaxWithLossLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.add_loss_weight(3); + SoftmaxWithLossLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2, 1701); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); +} + +TYPED_TEST(SoftmaxWithLossLayerTest, TestForwardIgnoreLabel) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_loss_param()->set_normalize(false); + // First, compute the loss with all labels + scoped_ptr > layer( + new SoftmaxWithLossLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Dtype full_loss = this->blob_top_loss_->cpu_data()[0]; + // Now, accumulate the loss, ignoring each label in {0, ..., 4} in turn. + Dtype accum_loss = 0; + for (int label = 0; label < 5; ++label) { + layer_param.mutable_loss_param()->set_ignore_label(label); + layer.reset(new SoftmaxWithLossLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + accum_loss += this->blob_top_loss_->cpu_data()[0]; + } + // Check that each label was included all but once. + EXPECT_NEAR(4 * full_loss, accum_loss, 1e-4); +} + +TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientIgnoreLabel) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + // labels are in {0, ..., 4}, so we'll ignore about a fifth of them + layer_param.mutable_loss_param()->set_ignore_label(0); + SoftmaxWithLossLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2, 1701); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); +} + +TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientUnnormalized) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_loss_param()->set_normalize(false); + SoftmaxWithLossLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2, 1701); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); +} + +} // namespace caffe diff --git a/src/caffe/test/test_solver.cpp b/src/caffe/test/test_solver.cpp new file mode 100755 index 0000000..ceabc9c --- /dev/null +++ b/src/caffe/test/test_solver.cpp @@ -0,0 +1,108 @@ +#include +#include +#include + +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/solver.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +using std::ostringstream; + +namespace caffe { + +template +class SolverTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolverFromProtoString(const string& proto) { + SolverParameter param; + CHECK(google::protobuf::TextFormat::ParseFromString(proto, ¶m)); + // Set the solver_mode according to current Caffe::mode. + switch (Caffe::mode()) { + case Caffe::CPU: + param.set_solver_mode(SolverParameter_SolverMode_CPU); + break; + case Caffe::GPU: + param.set_solver_mode(SolverParameter_SolverMode_GPU); + break; + default: + LOG(FATAL) << "Unknown Caffe mode: " << Caffe::mode(); + } + solver_.reset(new SGDSolver(param)); + } + + shared_ptr > solver_; +}; + +TYPED_TEST_CASE(SolverTest, TestDtypesAndDevices); + +TYPED_TEST(SolverTest, TestInitTrainTestNets) { + const string& proto = + "test_interval: 10 " + "test_iter: 10 " + "test_state: { stage: 'with-softmax' }" + "test_iter: 10 " + "test_state: {}" + "net_param { " + " name: 'TestNetwork' " + " layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " shape { " + " dim: 5 " + " dim: 2 " + " dim: 3 " + " dim: 4 " + " } " + " shape { " + " dim: 5 " + " } " + " } " + " top: 'data' " + " top: 'label' " + " } " + " layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " } " + " bottom: 'data' " + " top: 'innerprod' " + " } " + " layer { " + " name: 'accuracy' " + " type: 'Accuracy' " + " bottom: 'innerprod' " + " bottom: 'label' " + " top: 'accuracy' " + " exclude: { phase: TRAIN } " + " } " + " layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { phase: TRAIN } " + " include: { phase: TEST stage: 'with-softmax' } " + " } " + "} "; + this->InitSolverFromProtoString(proto); + ASSERT_TRUE(this->solver_->net() != NULL); + EXPECT_TRUE(this->solver_->net()->has_layer("loss")); + EXPECT_FALSE(this->solver_->net()->has_layer("accuracy")); + ASSERT_EQ(2, this->solver_->test_nets().size()); + EXPECT_TRUE(this->solver_->test_nets()[0]->has_layer("loss")); + EXPECT_TRUE(this->solver_->test_nets()[0]->has_layer("accuracy")); + EXPECT_FALSE(this->solver_->test_nets()[1]->has_layer("loss")); + EXPECT_TRUE(this->solver_->test_nets()[1]->has_layer("accuracy")); +} + +} // namespace caffe diff --git a/src/caffe/test/test_split_layer.cpp b/src/caffe/test/test_split_layer.cpp new file mode 100755 index 0000000..be5204b --- /dev/null +++ b/src/caffe/test/test_split_layer.cpp @@ -0,0 +1,1045 @@ +#include +#include +#include + +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/insert_splits.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class SplitLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + SplitLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 5)), + blob_top_a_(new Blob()), + blob_top_b_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_a_); + blob_top_vec_.push_back(blob_top_b_); + } + virtual ~SplitLayerTest() { + delete blob_bottom_; + delete blob_top_a_; + delete blob_top_b_; + } + Blob* const blob_bottom_; + Blob* const blob_top_a_; + Blob* const blob_top_b_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(SplitLayerTest, TestDtypesAndDevices); + +TYPED_TEST(SplitLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + SplitLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_a_->num(), 2); + EXPECT_EQ(this->blob_top_a_->channels(), 3); + EXPECT_EQ(this->blob_top_a_->height(), 6); + EXPECT_EQ(this->blob_top_a_->width(), 5); + EXPECT_EQ(this->blob_top_b_->num(), 2); + EXPECT_EQ(this->blob_top_b_->channels(), 3); + EXPECT_EQ(this->blob_top_b_->height(), 6); + EXPECT_EQ(this->blob_top_b_->width(), 5); +} + +TYPED_TEST(SplitLayerTest, Test) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + SplitLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + Dtype bottom_value = this->blob_bottom_->cpu_data()[i]; + EXPECT_EQ(bottom_value, this->blob_top_a_->cpu_data()[i]); + EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]); + } +} + +TYPED_TEST(SplitLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + SplitLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + + +class SplitLayerInsertionTest : public ::testing::Test { + protected: + void RunInsertionTest( + const string& input_param_string, const string& output_param_string) { + // Test that InsertSplits called on the proto specified by + // input_param_string results in the proto specified by + // output_param_string. + NetParameter input_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + input_param_string, &input_param)); + NetParameter expected_output_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + output_param_string, &expected_output_param)); + NetParameter actual_output_param; + InsertSplits(input_param, &actual_output_param); + EXPECT_EQ(expected_output_param.DebugString(), + actual_output_param.DebugString()); + // Also test idempotence. + NetParameter double_split_insert_param; + InsertSplits(actual_output_param, &double_split_insert_param); + EXPECT_EQ(actual_output_param.DebugString(), + double_split_insert_param.DebugString()); + } +}; + +TEST_F(SplitLayerInsertionTest, TestNoInsertion1) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunInsertionTest(input_proto, input_proto); +} + +TEST_F(SplitLayerInsertionTest, TestNoInsertion2) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'data_split' " + " type: 'Split' " + " bottom: 'data' " + " top: 'data_split_0' " + " top: 'data_split_1' " + "} " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data_split_0' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'data_split_1' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1' " + " bottom: 'innerprod2' " + "} "; + this->RunInsertionTest(input_proto, input_proto); +} + +TEST_F(SplitLayerInsertionTest, TestNoInsertionImageNet) { + const string& input_proto = + "name: 'CaffeNet' " + "layer { " + " name: 'data' " + " type: 'Data' " + " data_param { " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " batch_size: 256 " + " } " + " transform_param { " + " crop_size: 227 " + " mirror: true " + " mean_file: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 96 " + " kernel_size: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layer { " + " name: 'relu1' " + " type: 'ReLU' " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layer { " + " name: 'pool1' " + " type: 'Pooling' " + " pooling_param { " + " pool: MAX " + " kernel_size: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1' " + "} " + "layer { " + " name: 'norm1' " + " type: 'LRN' " + " lrn_param { " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1' " + " top: 'norm1' " + "} " + "layer { " + " name: 'conv2' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 256 " + " group: 2 " + " kernel_size: 5 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'norm1' " + " top: 'conv2' " + "} " + "layer { " + " name: 'relu2' " + " type: 'ReLU' " + " bottom: 'conv2' " + " top: 'conv2' " + "} " + "layer { " + " name: 'pool2' " + " type: 'Pooling' " + " pooling_param { " + " pool: MAX " + " kernel_size: 3 " + " stride: 2 " + " } " + " bottom: 'conv2' " + " top: 'pool2' " + "} " + "layer { " + " name: 'norm2' " + " type: 'LRN' " + " lrn_param { " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool2' " + " top: 'norm2' " + "} " + "layer { " + " name: 'conv3' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 384 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'norm2' " + " top: 'conv3' " + "} " + "layer { " + " name: 'relu3' " + " type: 'ReLU' " + " bottom: 'conv3' " + " top: 'conv3' " + "} " + "layer { " + " name: 'conv4' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 384 " + " group: 2 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'conv3' " + " top: 'conv4' " + "} " + "layer { " + " name: 'relu4' " + " type: 'ReLU' " + " bottom: 'conv4' " + " top: 'conv4' " + "} " + "layer { " + " name: 'conv5' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 256 " + " group: 2 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'conv4' " + " top: 'conv5' " + "} " + "layer { " + " name: 'relu5' " + " type: 'ReLU' " + " bottom: 'conv5' " + " top: 'conv5' " + "} " + "layer { " + " name: 'pool5' " + " type: 'Pooling' " + " pooling_param { " + " kernel_size: 3 " + " pool: MAX " + " stride: 2 " + " } " + " bottom: 'conv5' " + " top: 'pool5' " + "} " + "layer { " + " name: 'fc6' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'pool5' " + " top: 'fc6' " + "} " + "layer { " + " name: 'relu6' " + " type: 'ReLU' " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layer { " + " name: 'drop6' " + " type: 'Dropout' " + " dropout_param { " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layer { " + " name: 'fc7' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'fc6' " + " top: 'fc7' " + "} " + "layer { " + " name: 'relu7' " + " type: 'ReLU' " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layer { " + " name: 'drop7' " + " type: 'Dropout' " + " dropout_param { " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layer { " + " name: 'fc8' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'fc7' " + " top: 'fc8' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + this->RunInsertionTest(input_proto, input_proto); +} + +TEST_F(SplitLayerInsertionTest, TestNoInsertionWithInPlace) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + "} " + "layer { " + " name: 'relu' " + " type: 'ReLU' " + " bottom: 'innerprod' " + " top: 'innerprod' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunInsertionTest(input_proto, input_proto); +} + +TEST_F(SplitLayerInsertionTest, TestLossInsertion) { + const string& input_proto = + "name: 'UnsharedWeightsNetwork' " + "force_backward: true " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { name: 'unsharedweights1' } " + " bottom: 'data' " + " top: 'innerproduct1' " + " loss_weight: 2.5 " + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { name: 'unsharedweights2' } " + " bottom: 'data' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerproduct1' " + " bottom: 'innerproduct2' " + "} "; + const string& expected_output_proto = + "name: 'UnsharedWeightsNetwork' " + "force_backward: true " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + "} " + "layer { " + " name: 'data_data_0_split' " + " type: 'Split' " + " bottom: 'data' " + " top: 'data_data_0_split_0' " + " top: 'data_data_0_split_1' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { name: 'unsharedweights1' } " + " bottom: 'data_data_0_split_0' " + " top: 'innerproduct1' " + "} " + "layer { " + " name: 'innerproduct1_innerproduct1_0_split' " + " type: 'Split' " + " bottom: 'innerproduct1' " + " top: 'innerproduct1_innerproduct1_0_split_0' " + " top: 'innerproduct1_innerproduct1_0_split_1' " + " loss_weight: 2.5 " + " loss_weight: 0 " + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { name: 'unsharedweights2' } " + " bottom: 'data_data_0_split_1' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerproduct1_innerproduct1_0_split_1' " + " bottom: 'innerproduct2' " + "} "; + this->RunInsertionTest(input_proto, expected_output_proto); +} + +TEST_F(SplitLayerInsertionTest, TestInsertion) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'innerprod3' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod3' " + "} " + "layer { " + " name: 'loss1' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1' " + " bottom: 'innerprod2' " + "} " + "layer { " + " name: 'loss2' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod2' " + " bottom: 'innerprod3' " + "} "; + const string& expected_output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'data_data_0_split' " + " type: 'Split' " + " bottom: 'data' " + " top: 'data_data_0_split_0' " + " top: 'data_data_0_split_1' " + " top: 'data_data_0_split_2' " + "} " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data_data_0_split_0' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'data_data_0_split_1' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'innerprod2_innerprod2_0_split' " + " type: 'Split' " + " bottom: 'innerprod2' " + " top: 'innerprod2_innerprod2_0_split_0' " + " top: 'innerprod2_innerprod2_0_split_1' " + "} " + "layer { " + " name: 'innerprod3' " + " type: 'InnerProduct' " + " bottom: 'data_data_0_split_2' " + " top: 'innerprod3' " + "} " + "layer { " + " name: 'loss1' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1' " + " bottom: 'innerprod2_innerprod2_0_split_0' " + "} " + "layer { " + " name: 'loss2' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod2_innerprod2_0_split_1' " + " bottom: 'innerprod3' " + "} "; + this->RunInsertionTest(input_proto, expected_output_proto); +} + +TEST_F(SplitLayerInsertionTest, TestInsertionTwoTop) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'label' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'innerprod3' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod3' " + "} " + "layer { " + " name: 'innerprod4' " + " type: 'InnerProduct' " + " bottom: 'label' " + " top: 'innerprod4' " + "} " + "layer { " + " name: 'loss1' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1' " + " bottom: 'innerprod3' " + "} " + "layer { " + " name: 'loss2' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod2' " + " bottom: 'innerprod4' " + "} "; + const string& expected_output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'data_data_0_split' " + " type: 'Split' " + " bottom: 'data' " + " top: 'data_data_0_split_0' " + " top: 'data_data_0_split_1' " + "} " + "layer { " + " name: 'label_data_1_split' " + " type: 'Split' " + " bottom: 'label' " + " top: 'label_data_1_split_0' " + " top: 'label_data_1_split_1' " + "} " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data_data_0_split_0' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'label_data_1_split_0' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'innerprod3' " + " type: 'InnerProduct' " + " bottom: 'data_data_0_split_1' " + " top: 'innerprod3' " + "} " + "layer { " + " name: 'innerprod4' " + " type: 'InnerProduct' " + " bottom: 'label_data_1_split_1' " + " top: 'innerprod4' " + "} " + "layer { " + " name: 'loss1' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1' " + " bottom: 'innerprod3' " + "} " + "layer { " + " name: 'loss2' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod2' " + " bottom: 'innerprod4' " + "} "; + this->RunInsertionTest(input_proto, expected_output_proto); +} + +TEST_F(SplitLayerInsertionTest, TestInputInsertion) { + const string& input_proto = + "name: 'TestNetwork' " + "input: 'data' " + "input_dim: 10 " + "input_dim: 3 " + "input_dim: 227 " + "input_dim: 227 " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1' " + " bottom: 'innerprod2' " + "} "; + const string& expected_output_proto = + "name: 'TestNetwork' " + "input: 'data' " + "input_dim: 10 " + "input_dim: 3 " + "input_dim: 227 " + "input_dim: 227 " + "layer { " + " name: 'data_input_0_split' " + " type: 'Split' " + " bottom: 'data' " + " top: 'data_input_0_split_0' " + " top: 'data_input_0_split_1' " + "} " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data_input_0_split_0' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'data_input_0_split_1' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1' " + " bottom: 'innerprod2' " + "} "; + this->RunInsertionTest(input_proto, expected_output_proto); +} + +TEST_F(SplitLayerInsertionTest, TestWithInPlace) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'relu1' " + " type: 'ReLU' " + " bottom: 'innerprod1' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'innerprod1' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'loss1' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1' " + " bottom: 'label' " + "} " + "layer { " + " name: 'loss2' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod2' " + " bottom: 'data' " + "} "; + const string& expected_output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'data_data_0_split' " + " type: 'Split' " + " bottom: 'data' " + " top: 'data_data_0_split_0' " + " top: 'data_data_0_split_1' " + "} " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data_data_0_split_0' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'relu1' " + " type: 'ReLU' " + " bottom: 'innerprod1' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod1_relu1_0_split' " + " type: 'Split' " + " bottom: 'innerprod1' " + " top: 'innerprod1_relu1_0_split_0' " + " top: 'innerprod1_relu1_0_split_1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'innerprod1_relu1_0_split_0' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'loss1' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1_relu1_0_split_1' " + " bottom: 'label' " + "} " + "layer { " + " name: 'loss2' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod2' " + " bottom: 'data_data_0_split_1' " + "} "; + this->RunInsertionTest(input_proto, expected_output_proto); +} + +} // namespace caffe diff --git a/src/caffe/test/test_spp_layer.cpp b/src/caffe/test/test_spp_layer.cpp new file mode 100755 index 0000000..b2585f1 --- /dev/null +++ b/src/caffe/test/test_spp_layer.cpp @@ -0,0 +1,131 @@ +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class SPPLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + SPPLayerTest() + : blob_bottom_(new Blob()), + blob_bottom_2_(new Blob()), + blob_bottom_3_(new Blob()), + blob_top_(new Blob()) {} + virtual void SetUp() { + Caffe::set_random_seed(1701); + blob_bottom_->Reshape(2, 3, 9, 8); + blob_bottom_2_->Reshape(4, 3, 1024, 765); + blob_bottom_3_->Reshape(10, 3, 7, 7); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_bottom_vec_2_.push_back(blob_bottom_2_); + blob_bottom_vec_3_.push_back(blob_bottom_3_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~SPPLayerTest() { delete blob_bottom_; delete blob_top_; } + + Blob* const blob_bottom_; + Blob* const blob_bottom_2_; + Blob* const blob_bottom_3_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_bottom_vec_2_; + vector*> blob_bottom_vec_3_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(SPPLayerTest, TestDtypesAndDevices); + +TYPED_TEST(SPPLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_spp_param()->set_pyramid_height(3); + SPPLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + // expected number of pool results is geometric sum + // (1 - r ** n)/(1 - r) where r = 4 and n = pyramid_height + // (1 - 4 ** 3)/(1 - 4) = 21 + // multiply bottom num_channels * expected_pool_results + // to get expected num_channels (3 * 21 = 63) + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 63); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(SPPLayerTest, TestEqualOutputDims) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_spp_param()->set_pyramid_height(5); + SPPLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_2_, this->blob_top_vec_); + // expected number of pool results is geometric sum + // (1 - r ** n)/(1 - r) where r = 4 and n = pyramid_height + // (1 - 4 ** 5)/(1 - 4) = 341 + // multiply bottom num_channels * expected_pool_results + // to get expected num_channels (3 * 341 = 1023) + EXPECT_EQ(this->blob_top_->num(), 4); + EXPECT_EQ(this->blob_top_->channels(), 1023); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(SPPLayerTest, TestEqualOutputDims2) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_spp_param()->set_pyramid_height(3); + SPPLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_3_, this->blob_top_vec_); + // expected number of pool results is geometric sum + // (1 - r ** n)/(1 - r) where r = 4 and n = pyramid_height + // (1 - 4 ** 3)/(1 - 4) = 21 + // multiply bottom num_channels * expected_pool_results + // to get expected num_channels (3 * 21 = 63) + EXPECT_EQ(this->blob_top_->num(), 10); + EXPECT_EQ(this->blob_top_->channels(), 63); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(SPPLayerTest, TestForwardBackward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_spp_param()->set_pyramid_height(3); + SPPLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + vector propagate_down(this->blob_bottom_vec_.size(), true); + layer.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); +} + +TYPED_TEST(SPPLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + SPPParameter* spp_param = layer_param.mutable_spp_param(); + spp_param->set_pyramid_height(3); + SPPLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + + +} // namespace caffe diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp new file mode 100755 index 0000000..f84464c --- /dev/null +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -0,0 +1,176 @@ +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +using std::min; + +namespace caffe { + +template +class StochasticPoolingLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + StochasticPoolingLayerTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()) {} + virtual void SetUp() { + Caffe::set_random_seed(1701); + blob_bottom_->Reshape(2, 3, 6, 5); + // fill the values + FillerParameter filler_param; + filler_param.set_min(0.1); + filler_param.set_max(1.); + UniformFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~StochasticPoolingLayerTest() { + delete blob_bottom_; delete blob_top_; + } + + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +template +class CPUStochasticPoolingLayerTest + : public StochasticPoolingLayerTest > { +}; + +TYPED_TEST_CASE(CPUStochasticPoolingLayerTest, TestDtypes); + +TYPED_TEST(CPUStochasticPoolingLayerTest, TestSetup) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 2); +} + +#ifndef CPU_ONLY + +template +class GPUStochasticPoolingLayerTest + : public StochasticPoolingLayerTest > { +}; + +TYPED_TEST_CASE(GPUStochasticPoolingLayerTest, TestDtypes); + +TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochastic) { + LayerParameter layer_param; + layer_param.set_phase(TRAIN); + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + // Check if the output is correct - it should do random sampling + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + TypeParam total = 0; + for (int n = 0; n < this->blob_top_->num(); ++n) { + for (int c = 0; c < this->blob_top_->channels(); ++c) { + for (int ph = 0; ph < this->blob_top_->height(); ++ph) { + for (int pw = 0; pw < this->blob_top_->width(); ++pw) { + TypeParam pooled = top_data[this->blob_top_->offset(n, c, ph, pw)]; + total += pooled; + int hstart = ph * 2; + int hend = min(hstart + 3, this->blob_bottom_->height()); + int wstart = pw * 2; + int wend = min(wstart + 3, this->blob_bottom_->width()); + bool has_equal = false; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + has_equal |= (pooled == bottom_data[this->blob_bottom_-> + offset(n, c, h, w)]); + } + } + EXPECT_TRUE(has_equal); + } + } + } + } + // When we are doing stochastic pooling, the average we get should be higher + // than the simple data average since we are weighting more on higher-valued + // ones. + EXPECT_GE(total / this->blob_top_->count(), 0.55); +} + +TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochasticTestPhase) { + LayerParameter layer_param; + layer_param.set_phase(TEST); + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + // Check if the output is correct - it should do random sampling + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int n = 0; n < this->blob_top_->num(); ++n) { + for (int c = 0; c < this->blob_top_->channels(); ++c) { + for (int ph = 0; ph < this->blob_top_->height(); ++ph) { + for (int pw = 0; pw < this->blob_top_->width(); ++pw) { + TypeParam pooled = top_data[this->blob_top_->offset(n, c, ph, pw)]; + int hstart = ph * 2; + int hend = min(hstart + 3, this->blob_bottom_->height()); + int wstart = pw * 2; + int wend = min(wstart + 3, this->blob_bottom_->width()); + bool smaller_than_max = false; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + smaller_than_max |= (pooled <= bottom_data[this->blob_bottom_-> + offset(n, c, h, w)]); + } + } + EXPECT_TRUE(smaller_than_max); + } + } + } + } +} + +TYPED_TEST(GPUStochasticPoolingLayerTest, TestGradient) { + LayerParameter layer_param; + layer_param.set_phase(TRAIN); + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC); + PoolingLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + // it is too expensive to call curand multiple times, so we don't do an + // exhaustive gradient check. + checker.CheckGradient(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +#endif + +} // namespace caffe diff --git a/src/caffe/test/test_syncedmem.cpp b/src/caffe/test/test_syncedmem.cpp new file mode 100755 index 0000000..b946233 --- /dev/null +++ b/src/caffe/test/test_syncedmem.cpp @@ -0,0 +1,126 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/device_alternate.hpp" +#include "caffe/util/math_functions.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +class SyncedMemoryTest : public ::testing::Test {}; + +TEST_F(SyncedMemoryTest, TestInitialization) { + SyncedMemory mem(10); + EXPECT_EQ(mem.head(), SyncedMemory::UNINITIALIZED); + EXPECT_EQ(mem.size(), 10); + SyncedMemory* p_mem = new SyncedMemory(10 * sizeof(float)); + EXPECT_EQ(p_mem->size(), 10 * sizeof(float)); + delete p_mem; +} + +#ifndef CPU_ONLY // GPU test + +TEST_F(SyncedMemoryTest, TestAllocationCPUGPU) { + SyncedMemory mem(10); + EXPECT_TRUE(mem.cpu_data()); + EXPECT_TRUE(mem.gpu_data()); + EXPECT_TRUE(mem.mutable_cpu_data()); + EXPECT_TRUE(mem.mutable_gpu_data()); +} + +#endif + +TEST_F(SyncedMemoryTest, TestAllocationCPU) { + SyncedMemory mem(10); + EXPECT_TRUE(mem.cpu_data()); + EXPECT_TRUE(mem.mutable_cpu_data()); +} + +#ifndef CPU_ONLY // GPU test + +TEST_F(SyncedMemoryTest, TestAllocationGPU) { + SyncedMemory mem(10); + EXPECT_TRUE(mem.gpu_data()); + EXPECT_TRUE(mem.mutable_gpu_data()); +} + +#endif + +TEST_F(SyncedMemoryTest, TestCPUWrite) { + SyncedMemory mem(10); + void* cpu_data = mem.mutable_cpu_data(); + EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU); + caffe_memset(mem.size(), 1, cpu_data); + for (int i = 0; i < mem.size(); ++i) { + EXPECT_EQ((static_cast(cpu_data))[i], 1); + } + // do another round + cpu_data = mem.mutable_cpu_data(); + EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU); + caffe_memset(mem.size(), 2, cpu_data); + for (int i = 0; i < mem.size(); ++i) { + EXPECT_EQ((static_cast(cpu_data))[i], 2); + } +} + +#ifndef CPU_ONLY // GPU test + +TEST_F(SyncedMemoryTest, TestGPURead) { + SyncedMemory mem(10); + void* cpu_data = mem.mutable_cpu_data(); + EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU); + caffe_memset(mem.size(), 1, cpu_data); + const void* gpu_data = mem.gpu_data(); + EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); + // check if values are the same + char* recovered_value = new char[10]; + caffe_gpu_memcpy(10, gpu_data, recovered_value); + for (int i = 0; i < mem.size(); ++i) { + EXPECT_EQ((static_cast(recovered_value))[i], 1); + } + // do another round + cpu_data = mem.mutable_cpu_data(); + EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU); + caffe_memset(mem.size(), 2, cpu_data); + for (int i = 0; i < mem.size(); ++i) { + EXPECT_EQ((static_cast(cpu_data))[i], 2); + } + gpu_data = mem.gpu_data(); + EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); + // check if values are the same + caffe_gpu_memcpy(10, gpu_data, recovered_value); + for (int i = 0; i < mem.size(); ++i) { + EXPECT_EQ((static_cast(recovered_value))[i], 2); + } + delete[] recovered_value; +} + +TEST_F(SyncedMemoryTest, TestGPUWrite) { + SyncedMemory mem(10); + void* gpu_data = mem.mutable_gpu_data(); + EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_GPU); + caffe_gpu_memset(mem.size(), 1, gpu_data); + const void* cpu_data = mem.cpu_data(); + for (int i = 0; i < mem.size(); ++i) { + EXPECT_EQ((static_cast(cpu_data))[i], 1); + } + EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); + + gpu_data = mem.mutable_gpu_data(); + EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_GPU); + caffe_gpu_memset(mem.size(), 2, gpu_data); + cpu_data = mem.cpu_data(); + for (int i = 0; i < mem.size(); ++i) { + EXPECT_EQ((static_cast(cpu_data))[i], 2); + } + EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); +} + +#endif + +} // namespace caffe diff --git a/src/caffe/test/test_tanh_layer.cpp b/src/caffe/test/test_tanh_layer.cpp new file mode 100755 index 0000000..5dc9283 --- /dev/null +++ b/src/caffe/test/test_tanh_layer.cpp @@ -0,0 +1,101 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/common_layers.hpp" +#include "caffe/filler.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +double tanh_naive(double x) { + if (x < -40) { + // avoid negative overflow + return -1; + } else if (x > 40) { + // avoid positive overflow + return 1; + } else { + // exact expression for tanh, which is unstable for large x + double exp2x = exp(2 * x); + return (exp2x - 1.0) / (exp2x + 1.0); + } +} + +template +class TanHLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + TanHLayerTest() + : blob_bottom_(new Blob(2, 3, 4, 5)), + blob_top_(new Blob()) { + Caffe::set_random_seed(1701); + FillerParameter filler_param; + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~TanHLayerTest() { delete blob_bottom_; delete blob_top_; } + + void TestForward(Dtype filler_std) { + FillerParameter filler_param; + filler_param.set_std(filler_std); + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + + LayerParameter layer_param; + TanHLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + const Dtype min_precision = 1e-5; + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + Dtype expected_value = tanh_naive(bottom_data[i]); + Dtype precision = std::max( + Dtype(std::abs(expected_value * Dtype(1e-4))), min_precision); + EXPECT_NEAR(expected_value, top_data[i], precision); + } + } + + void TestBackward(Dtype filler_std) { + FillerParameter filler_param; + filler_param.set_std(filler_std); + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + + LayerParameter layer_param; + TanHLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2, 1701); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(TanHLayerTest, TestDtypesAndDevices); + +TYPED_TEST(TanHLayerTest, TestTanH) { + this->TestForward(1.0); +} + +TYPED_TEST(TanHLayerTest, TestTanHOverflow) { + // this will fail if tanh overflow is not properly handled + this->TestForward(10000.0); +} + +TYPED_TEST(TanHLayerTest, TestTanHGradient) { + this->TestBackward(1.0); +} + +} // namespace caffe diff --git a/src/caffe/test/test_threshold_layer.cpp b/src/caffe/test/test_threshold_layer.cpp new file mode 100755 index 0000000..05ce821 --- /dev/null +++ b/src/caffe/test/test_threshold_layer.cpp @@ -0,0 +1,98 @@ +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class ThresholdLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: + ThresholdLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 5)), + blob_top_(new Blob()) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~ThresholdLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(ThresholdLayerTest, TestDtypesAndDevices); + + +TYPED_TEST(ThresholdLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ThresholdLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_->width()); +} + +TYPED_TEST(ThresholdLayerTest, Test) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ThresholdLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + const Dtype threshold_ = layer_param.threshold_param().threshold(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_LE(top_data[i], 1.); + if (top_data[i] == 0) { + EXPECT_LE(bottom_data[i], threshold_); + } + if (top_data[i] == 1) { + EXPECT_GT(bottom_data[i], threshold_); + } + } +} + +TYPED_TEST(ThresholdLayerTest, Test2) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ThresholdParameter* threshold_param = + layer_param.mutable_threshold_param(); + threshold_param->set_threshold(0.5); + ThresholdLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + const Dtype threshold_ = layer_param.threshold_param().threshold(); + EXPECT_FLOAT_EQ(threshold_, 0.5); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_LE(top_data[i], 1.); + if (top_data[i] == 0) { + EXPECT_LE(bottom_data[i], threshold_); + } + if (top_data[i] == 1) { + EXPECT_GT(bottom_data[i], threshold_); + } + } +} + +} // namespace caffe diff --git a/src/caffe/test/test_upgrade_proto.cpp b/src/caffe/test/test_upgrade_proto.cpp new file mode 100755 index 0000000..0067202 --- /dev/null +++ b/src/caffe/test/test_upgrade_proto.cpp @@ -0,0 +1,2921 @@ +#include +#include +#include + +#include "boost/scoped_ptr.hpp" +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/db.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/upgrade_proto.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +class PaddingLayerUpgradeTest : public ::testing::Test { + protected: + void RunPaddingUpgradeTest( + const string& input_param_string, const string& output_param_string) { + // Test that UpgradeV0PaddingLayers called on the proto specified by + // input_param_string results in the proto specified by + // output_param_string. + NetParameter input_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + input_param_string, &input_param)); + NetParameter expected_output_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + output_param_string, &expected_output_param)); + NetParameter actual_output_param; + UpgradeV0PaddingLayers(input_param, &actual_output_param); + EXPECT_EQ(expected_output_param.DebugString(), + actual_output_param.DebugString()); + // Also test idempotence. + NetParameter double_pad_upgrade_param; + UpgradeV0PaddingLayers(actual_output_param, &double_pad_upgrade_param); + EXPECT_EQ(actual_output_param.DebugString(), + double_pad_upgrade_param.DebugString()); + } +}; + +TEST_F(PaddingLayerUpgradeTest, TestSimple) { + const string& input_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'pad1' " + " type: 'padding' " + " pad: 2 " + " } " + " bottom: 'data' " + " top: 'pad1' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad1' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'conv1' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + const string& expected_output_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'conv1' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + this->RunPaddingUpgradeTest(input_proto, expected_output_proto); +} + +TEST_F(PaddingLayerUpgradeTest, TestTwoTops) { + const string& input_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'pad1' " + " type: 'padding' " + " pad: 2 " + " } " + " bottom: 'data' " + " top: 'pad1' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad1' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'conv1' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'conv2' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad1' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + const string& expected_output_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'conv1' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'conv2' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'data' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + this->RunPaddingUpgradeTest(input_proto, expected_output_proto); +} + +TEST_F(PaddingLayerUpgradeTest, TestImageNet) { + const string& input_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'relu1' " + " type: 'relu' " + " } " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'pool1' " + " type: 'pool' " + " pool: MAX " + " kernelsize: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1' " + "} " + "layers { " + " layer { " + " name: 'norm1' " + " type: 'lrn' " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1' " + " top: 'norm1' " + "} " + "layers { " + " layer { " + " name: 'pad2' " + " type: 'padding' " + " pad: 2 " + " } " + " bottom: 'norm1' " + " top: 'pad2' " + "} " + "layers { " + " layer { " + " name: 'conv2' " + " type: 'conv' " + " num_output: 256 " + " group: 2 " + " kernelsize: 5 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad2' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'relu2' " + " type: 'relu' " + " } " + " bottom: 'conv2' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'pool2' " + " type: 'pool' " + " pool: MAX " + " kernelsize: 3 " + " stride: 2 " + " } " + " bottom: 'conv2' " + " top: 'pool2' " + "} " + "layers { " + " layer { " + " name: 'norm2' " + " type: 'lrn' " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool2' " + " top: 'norm2' " + "} " + "layers { " + " layer { " + " name: 'pad3' " + " type: 'padding' " + " pad: 1 " + " } " + " bottom: 'norm2' " + " top: 'pad3' " + "} " + "layers { " + " layer { " + " name: 'conv3' " + " type: 'conv' " + " num_output: 384 " + " kernelsize: 3 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad3' " + " top: 'conv3' " + "} " + "layers { " + " layer { " + " name: 'relu3' " + " type: 'relu' " + " } " + " bottom: 'conv3' " + " top: 'conv3' " + "} " + "layers { " + " layer { " + " name: 'pad4' " + " type: 'padding' " + " pad: 1 " + " } " + " bottom: 'conv3' " + " top: 'pad4' " + "} " + "layers { " + " layer { " + " name: 'conv4' " + " type: 'conv' " + " num_output: 384 " + " group: 2 " + " kernelsize: 3 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad4' " + " top: 'conv4' " + "} " + "layers { " + " layer { " + " name: 'relu4' " + " type: 'relu' " + " } " + " bottom: 'conv4' " + " top: 'conv4' " + "} " + "layers { " + " layer { " + " name: 'pad5' " + " type: 'padding' " + " pad: 1 " + " } " + " bottom: 'conv4' " + " top: 'pad5' " + "} " + "layers { " + " layer { " + " name: 'conv5' " + " type: 'conv' " + " num_output: 256 " + " group: 2 " + " kernelsize: 3 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad5' " + " top: 'conv5' " + "} " + "layers { " + " layer { " + " name: 'relu5' " + " type: 'relu' " + " } " + " bottom: 'conv5' " + " top: 'conv5' " + "} " + "layers { " + " layer { " + " name: 'pool5' " + " type: 'pool' " + " kernelsize: 3 " + " pool: MAX " + " stride: 2 " + " } " + " bottom: 'conv5' " + " top: 'pool5' " + "} " + "layers { " + " layer { " + " name: 'fc6' " + " type: 'innerproduct' " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pool5' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'relu6' " + " type: 'relu' " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'drop6' " + " type: 'dropout' " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'fc7' " + " type: 'innerproduct' " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'fc6' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'relu7' " + " type: 'relu' " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'drop7' " + " type: 'dropout' " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'fc7' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + const string& expected_output_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'relu1' " + " type: 'relu' " + " } " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'pool1' " + " type: 'pool' " + " pool: MAX " + " kernelsize: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1' " + "} " + "layers { " + " layer { " + " name: 'norm1' " + " type: 'lrn' " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1' " + " top: 'norm1' " + "} " + "layers { " + " layer { " + " name: 'conv2' " + " type: 'conv' " + " num_output: 256 " + " group: 2 " + " kernelsize: 5 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'norm1' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'relu2' " + " type: 'relu' " + " } " + " bottom: 'conv2' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'pool2' " + " type: 'pool' " + " pool: MAX " + " kernelsize: 3 " + " stride: 2 " + " } " + " bottom: 'conv2' " + " top: 'pool2' " + "} " + "layers { " + " layer { " + " name: 'norm2' " + " type: 'lrn' " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool2' " + " top: 'norm2' " + "} " + "layers { " + " layer { " + " name: 'conv3' " + " type: 'conv' " + " num_output: 384 " + " kernelsize: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'norm2' " + " top: 'conv3' " + "} " + "layers { " + " layer { " + " name: 'relu3' " + " type: 'relu' " + " } " + " bottom: 'conv3' " + " top: 'conv3' " + "} " + "layers { " + " layer { " + " name: 'conv4' " + " type: 'conv' " + " num_output: 384 " + " group: 2 " + " kernelsize: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'conv3' " + " top: 'conv4' " + "} " + "layers { " + " layer { " + " name: 'relu4' " + " type: 'relu' " + " } " + " bottom: 'conv4' " + " top: 'conv4' " + "} " + "layers { " + " layer { " + " name: 'conv5' " + " type: 'conv' " + " num_output: 256 " + " group: 2 " + " kernelsize: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'conv4' " + " top: 'conv5' " + "} " + "layers { " + " layer { " + " name: 'relu5' " + " type: 'relu' " + " } " + " bottom: 'conv5' " + " top: 'conv5' " + "} " + "layers { " + " layer { " + " name: 'pool5' " + " type: 'pool' " + " kernelsize: 3 " + " pool: MAX " + " stride: 2 " + " } " + " bottom: 'conv5' " + " top: 'pool5' " + "} " + "layers { " + " layer { " + " name: 'fc6' " + " type: 'innerproduct' " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pool5' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'relu6' " + " type: 'relu' " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'drop6' " + " type: 'dropout' " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'fc7' " + " type: 'innerproduct' " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'fc6' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'relu7' " + " type: 'relu' " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'drop7' " + " type: 'dropout' " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'fc7' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + this->RunPaddingUpgradeTest(input_proto, expected_output_proto); +} + +class NetUpgradeTest : public ::testing::Test { + protected: + void RunV0UpgradeTest( + const string& input_param_string, const string& output_param_string) { + // Test that UpgradeV0Net called on the NetParameter proto specified by + // input_param_string results in the NetParameter proto specified by + // output_param_string. + NetParameter input_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + input_param_string, &input_param)); + NetParameter expected_output_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + output_param_string, &expected_output_param)); + NetParameter actual_output_param; + UpgradeV0Net(input_param, &actual_output_param); + EXPECT_EQ(expected_output_param.DebugString(), + actual_output_param.DebugString()); + } + + void RunV1UpgradeTest( + const string& input_param_string, const string& output_param_string) { + // Test that UpgradeV0Net called on the NetParameter proto specified by + // input_param_string results in the NetParameter proto specified by + // output_param_string. + NetParameter input_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + input_param_string, &input_param)); + NetParameter expected_output_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + output_param_string, &expected_output_param)); + NetParameter actual_output_param; + UpgradeV1Net(input_param, &actual_output_param); + EXPECT_EQ(expected_output_param.DebugString(), + actual_output_param.DebugString()); + } +}; + +TEST_F(NetUpgradeTest, TestSimple) { + const string& v0_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'pad1' " + " type: 'padding' " + " pad: 2 " + " } " + " bottom: 'data' " + " top: 'pad1' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad1' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'conv1' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + const string& expected_v1_proto = + "name: 'CaffeNet' " + "layers { " + " name: 'data' " + " type: DATA " + " data_param { " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " batch_size: 256 " + " } " + " transform_param { " + " crop_size: 227 " + " mirror: true " + " mean_file: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " name: 'conv1' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 96 " + " kernel_size: 11 " + " stride: 4 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " name: 'fc8' " + " type: INNER_PRODUCT " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'conv1' " + " top: 'fc8' " + "} " + "layers { " + " name: 'loss' " + " type: SOFTMAX_LOSS " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + this->RunV0UpgradeTest(v0_proto, expected_v1_proto); + + const string& expected_v2_proto = + "name: 'CaffeNet' " + "layer { " + " name: 'data' " + " type: 'Data' " + " data_param { " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " batch_size: 256 " + " } " + " transform_param { " + " crop_size: 227 " + " mirror: true " + " mean_file: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 96 " + " kernel_size: 11 " + " stride: 4 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layer { " + " name: 'fc8' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'conv1' " + " top: 'fc8' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + this->RunV1UpgradeTest(expected_v1_proto, expected_v2_proto); +} + +// Test any layer or parameter upgrades not covered by other tests. +TEST_F(NetUpgradeTest, TestAllParams) { + const string& input_proto = + "name: 'CaffeNet' " + "input: 'input_data' " + "input_dim: 64 " + "input_dim: 3 " + "input_dim: 32 " + "input_dim: 32 " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " scale: 0.25 " + " rand_skip: 73 " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'images' " + " type: 'images' " + " source: '/home/jiayq/Data/ILSVRC12/train-images' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " scale: 0.25 " + " rand_skip: 73 " + " shuffle_images: true " + " new_height: 40 " + " new_width: 30 " + " } " + " top: 'images_data' " + " top: 'images_label' " + "} " + "layers { " + " layer { " + " name: 'window_data' " + " type: 'window_data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " det_fg_threshold: 0.25 " + " det_bg_threshold: 0.75 " + " det_fg_fraction: 0.5 " + " det_context_pad: 16 " + " det_crop_mode: 'square' " + " } " + " top: 'window_data' " + " top: 'window_label' " + "} " + "layers { " + " layer { " + " name: 'hdf5data' " + " type: 'hdf5_data' " + " source: '/my/hdf5/data' " + " batchsize: 256 " + " } " + " top: 'hdf5data' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " biasterm: false " + " pad: 4 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 3. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'pool1ave' " + " type: 'pool' " + " pool: AVE " + " kernelsize: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1ave' " + "} " + "layers { " + " layer { " + " name: 'pool1stoch' " + " type: 'pool' " + " pool: STOCHASTIC " + " kernelsize: 4 " + " stride: 5 " + " } " + " bottom: 'conv1' " + " top: 'pool1stoch' " + "} " + "layers { " + " layer { " + " name: 'concat' " + " type: 'concat' " + " concat_dim: 2 " + " } " + " bottom: 'pool1ave' " + " bottom: 'pool1stoch' " + " top: 'pool1concat' " + "} " + "layers { " + " layer { " + " name: 'norm1' " + " type: 'lrn' " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1concat' " + " top: 'norm1' " + "} " + "layers { " + " layer { " + " name: 'fc6' " + " type: 'innerproduct' " + " num_output: 4096 " + " biasterm: false " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'norm1' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'relu6' " + " type: 'relu' " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'drop6' " + " type: 'dropout' " + " dropout_ratio: 0.2 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'infogain_loss' " + " source: '/my/infogain/matrix' " + " } " + " bottom: 'fc6' " + " bottom: 'label' " + "} " + "layers { " + " layer { " + " name: 'accuracy' " + " type: 'accuracy' " + " } " + "} " + "layers { " + " layer { " + " name: 'bnll' " + " type: 'bnll' " + " } " + "} " + "layers { " + " layer { " + " name: 'euclidean_loss' " + " type: 'euclidean_loss' " + " } " + "} " + "layers { " + " layer { " + " name: 'flatten' " + " type: 'flatten' " + " } " + "} " + "layers { " + " layer { " + " name: 'hdf5_output' " + " type: 'hdf5_output' " + " hdf5_output_param { " + " file_name: '/my/hdf5/output/file' " + " } " + " } " + "} " + "layers { " + " layer { " + " name: 'im2col' " + " type: 'im2col' " + " } " + "} " + "layers { " + " layer { " + " name: 'images' " + " type: 'images' " + " } " + "} " + "layers { " + " layer { " + " name: 'multinomial_logistic_loss' " + " type: 'multinomial_logistic_loss' " + " } " + "} " + "layers { " + " layer { " + " name: 'sigmoid' " + " type: 'sigmoid' " + " } " + "} " + "layers { " + " layer { " + " name: 'softmax' " + " type: 'softmax' " + " } " + "} " + "layers { " + " layer { " + " name: 'split' " + " type: 'split' " + " } " + "} " + "layers { " + " layer { " + " name: 'tanh' " + " type: 'tanh' " + " } " + "} "; + const string& expected_output_proto = + "name: 'CaffeNet' " + "input: 'input_data' " + "input_dim: 64 " + "input_dim: 3 " + "input_dim: 32 " + "input_dim: 32 " + "layers { " + " name: 'data' " + " type: DATA " + " data_param { " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " batch_size: 256 " + " rand_skip: 73 " + " } " + " transform_param { " + " crop_size: 227 " + " mirror: true " + " scale: 0.25 " + " mean_file: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " name: 'images' " + " type: IMAGE_DATA " + " image_data_param { " + " source: '/home/jiayq/Data/ILSVRC12/train-images' " + " batch_size: 256 " + " rand_skip: 73 " + " shuffle: true " + " new_height: 40 " + " new_width: 30 " + " } " + " transform_param {" + " mean_file: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " crop_size: 227 " + " mirror: true " + " scale: 0.25 " + " } " + " top: 'images_data' " + " top: 'images_label' " + "} " + "layers { " + " name: 'window_data' " + " type: WINDOW_DATA " + " window_data_param { " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " batch_size: 256 " + " fg_threshold: 0.25 " + " bg_threshold: 0.75 " + " fg_fraction: 0.5 " + " context_pad: 16 " + " crop_mode: 'square' " + " } " + " transform_param { " + " mirror: true " + " crop_size: 227 " + " mean_file: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " }" + " top: 'window_data' " + " top: 'window_label' " + "} " + "layers { " + " name: 'hdf5data' " + " type: HDF5_DATA " + " hdf5_data_param { " + " source: '/my/hdf5/data' " + " batch_size: 256 " + " } " + " top: 'hdf5data' " + "} " + "layers { " + " name: 'conv1' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 96 " + " bias_term: false " + " pad: 4 " + " kernel_size: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 3. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " name: 'pool1ave' " + " type: POOLING " + " pooling_param { " + " pool: AVE " + " kernel_size: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1ave' " + "} " + "layers { " + " name: 'pool1stoch' " + " type: POOLING " + " pooling_param { " + " pool: STOCHASTIC " + " kernel_size: 4 " + " stride: 5 " + " } " + " bottom: 'conv1' " + " top: 'pool1stoch' " + "} " + "layers { " + " name: 'concat' " + " type: CONCAT " + " concat_param { " + " concat_dim: 2 " + " } " + " bottom: 'pool1ave' " + " bottom: 'pool1stoch' " + " top: 'pool1concat' " + "} " + "layers { " + " name: 'norm1' " + " type: LRN " + " lrn_param { " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1concat' " + " top: 'norm1' " + "} " + "layers { " + " name: 'fc6' " + " type: INNER_PRODUCT " + " inner_product_param { " + " num_output: 4096 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'norm1' " + " top: 'fc6' " + "} " + "layers { " + " name: 'relu6' " + " type: RELU " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " name: 'drop6' " + " type: DROPOUT " + " dropout_param { " + " dropout_ratio: 0.2 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " name: 'loss' " + " type: INFOGAIN_LOSS " + " infogain_loss_param { " + " source: '/my/infogain/matrix' " + " } " + " bottom: 'fc6' " + " bottom: 'label' " + "} " + "layers { " + " name: 'accuracy' " + " type: ACCURACY " + "} " + "layers { " + " name: 'bnll' " + " type: BNLL " + "} " + "layers { " + " name: 'euclidean_loss' " + " type: EUCLIDEAN_LOSS " + "} " + "layers { " + " name: 'flatten' " + " type: FLATTEN " + "} " + "layers { " + " name: 'hdf5_output' " + " type: HDF5_OUTPUT " + " hdf5_output_param { " + " file_name: '/my/hdf5/output/file' " + " } " + "} " + "layers { " + " name: 'im2col' " + " type: IM2COL " + "} " + "layers { " + " name: 'images' " + " type: IMAGE_DATA " + "} " + "layers { " + " name: 'multinomial_logistic_loss' " + " type: MULTINOMIAL_LOGISTIC_LOSS " + "} " + "layers { " + " name: 'sigmoid' " + " type: SIGMOID " + "} " + "layers { " + " name: 'softmax' " + " type: SOFTMAX " + "} " + "layers { " + " name: 'split' " + " type: SPLIT " + "} " + "layers { " + " name: 'tanh' " + " type: TANH " + "} "; + this->RunV0UpgradeTest(input_proto, expected_output_proto); +} + +TEST_F(NetUpgradeTest, TestImageNet) { + const string& v0_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'relu1' " + " type: 'relu' " + " } " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'pool1' " + " type: 'pool' " + " pool: MAX " + " kernelsize: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1' " + "} " + "layers { " + " layer { " + " name: 'norm1' " + " type: 'lrn' " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1' " + " top: 'norm1' " + "} " + "layers { " + " layer { " + " name: 'pad2' " + " type: 'padding' " + " pad: 2 " + " } " + " bottom: 'norm1' " + " top: 'pad2' " + "} " + "layers { " + " layer { " + " name: 'conv2' " + " type: 'conv' " + " num_output: 256 " + " group: 2 " + " kernelsize: 5 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad2' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'relu2' " + " type: 'relu' " + " } " + " bottom: 'conv2' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'pool2' " + " type: 'pool' " + " pool: MAX " + " kernelsize: 3 " + " stride: 2 " + " } " + " bottom: 'conv2' " + " top: 'pool2' " + "} " + "layers { " + " layer { " + " name: 'norm2' " + " type: 'lrn' " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool2' " + " top: 'norm2' " + "} " + "layers { " + " layer { " + " name: 'pad3' " + " type: 'padding' " + " pad: 1 " + " } " + " bottom: 'norm2' " + " top: 'pad3' " + "} " + "layers { " + " layer { " + " name: 'conv3' " + " type: 'conv' " + " num_output: 384 " + " kernelsize: 3 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad3' " + " top: 'conv3' " + "} " + "layers { " + " layer { " + " name: 'relu3' " + " type: 'relu' " + " } " + " bottom: 'conv3' " + " top: 'conv3' " + "} " + "layers { " + " layer { " + " name: 'pad4' " + " type: 'padding' " + " pad: 1 " + " } " + " bottom: 'conv3' " + " top: 'pad4' " + "} " + "layers { " + " layer { " + " name: 'conv4' " + " type: 'conv' " + " num_output: 384 " + " group: 2 " + " kernelsize: 3 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad4' " + " top: 'conv4' " + "} " + "layers { " + " layer { " + " name: 'relu4' " + " type: 'relu' " + " } " + " bottom: 'conv4' " + " top: 'conv4' " + "} " + "layers { " + " layer { " + " name: 'pad5' " + " type: 'padding' " + " pad: 1 " + " } " + " bottom: 'conv4' " + " top: 'pad5' " + "} " + "layers { " + " layer { " + " name: 'conv5' " + " type: 'conv' " + " num_output: 256 " + " group: 2 " + " kernelsize: 3 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad5' " + " top: 'conv5' " + "} " + "layers { " + " layer { " + " name: 'relu5' " + " type: 'relu' " + " } " + " bottom: 'conv5' " + " top: 'conv5' " + "} " + "layers { " + " layer { " + " name: 'pool5' " + " type: 'pool' " + " kernelsize: 3 " + " pool: MAX " + " stride: 2 " + " } " + " bottom: 'conv5' " + " top: 'pool5' " + "} " + "layers { " + " layer { " + " name: 'fc6' " + " type: 'innerproduct' " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pool5' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'relu6' " + " type: 'relu' " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'drop6' " + " type: 'dropout' " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'fc7' " + " type: 'innerproduct' " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'fc6' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'relu7' " + " type: 'relu' " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'drop7' " + " type: 'dropout' " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'fc7' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + const string& expected_v1_proto = + "name: 'CaffeNet' " + "layers { " + " name: 'data' " + " type: DATA " + " data_param { " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " batch_size: 256 " + " } " + " transform_param { " + " crop_size: 227 " + " mirror: true " + " mean_file: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " name: 'conv1' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 96 " + " kernel_size: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " name: 'relu1' " + " type: RELU " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layers { " + " name: 'pool1' " + " type: POOLING " + " pooling_param { " + " pool: MAX " + " kernel_size: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1' " + "} " + "layers { " + " name: 'norm1' " + " type: LRN " + " lrn_param { " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1' " + " top: 'norm1' " + "} " + "layers { " + " name: 'conv2' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 256 " + " group: 2 " + " kernel_size: 5 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'norm1' " + " top: 'conv2' " + "} " + "layers { " + " name: 'relu2' " + " type: RELU " + " bottom: 'conv2' " + " top: 'conv2' " + "} " + "layers { " + " name: 'pool2' " + " type: POOLING " + " pooling_param { " + " pool: MAX " + " kernel_size: 3 " + " stride: 2 " + " } " + " bottom: 'conv2' " + " top: 'pool2' " + "} " + "layers { " + " name: 'norm2' " + " type: LRN " + " lrn_param { " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool2' " + " top: 'norm2' " + "} " + "layers { " + " name: 'conv3' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 384 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'norm2' " + " top: 'conv3' " + "} " + "layers { " + " name: 'relu3' " + " type: RELU " + " bottom: 'conv3' " + " top: 'conv3' " + "} " + "layers { " + " name: 'conv4' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 384 " + " group: 2 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'conv3' " + " top: 'conv4' " + "} " + "layers { " + " name: 'relu4' " + " type: RELU " + " bottom: 'conv4' " + " top: 'conv4' " + "} " + "layers { " + " name: 'conv5' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 256 " + " group: 2 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'conv4' " + " top: 'conv5' " + "} " + "layers { " + " name: 'relu5' " + " type: RELU " + " bottom: 'conv5' " + " top: 'conv5' " + "} " + "layers { " + " name: 'pool5' " + " type: POOLING " + " pooling_param { " + " kernel_size: 3 " + " pool: MAX " + " stride: 2 " + " } " + " bottom: 'conv5' " + " top: 'pool5' " + "} " + "layers { " + " name: 'fc6' " + " type: INNER_PRODUCT " + " inner_product_param { " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'pool5' " + " top: 'fc6' " + "} " + "layers { " + " name: 'relu6' " + " type: RELU " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " name: 'drop6' " + " type: DROPOUT " + " dropout_param { " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " name: 'fc7' " + " type: INNER_PRODUCT " + " inner_product_param { " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'fc6' " + " top: 'fc7' " + "} " + "layers { " + " name: 'relu7' " + " type: RELU " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " name: 'drop7' " + " type: DROPOUT " + " dropout_param { " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " name: 'fc8' " + " type: INNER_PRODUCT " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'fc7' " + " top: 'fc8' " + "} " + "layers { " + " name: 'loss' " + " type: SOFTMAX_LOSS " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + this->RunV0UpgradeTest(v0_proto, expected_v1_proto); + + const string& expected_v2_proto = + "name: 'CaffeNet' " + "layer { " + " name: 'data' " + " type: 'Data' " + " data_param { " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " batch_size: 256 " + " } " + " transform_param { " + " crop_size: 227 " + " mirror: true " + " mean_file: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 96 " + " kernel_size: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layer { " + " name: 'relu1' " + " type: 'ReLU' " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layer { " + " name: 'pool1' " + " type: 'Pooling' " + " pooling_param { " + " pool: MAX " + " kernel_size: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1' " + "} " + "layer { " + " name: 'norm1' " + " type: 'LRN' " + " lrn_param { " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1' " + " top: 'norm1' " + "} " + "layer { " + " name: 'conv2' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 256 " + " group: 2 " + " kernel_size: 5 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'norm1' " + " top: 'conv2' " + "} " + "layer { " + " name: 'relu2' " + " type: 'ReLU' " + " bottom: 'conv2' " + " top: 'conv2' " + "} " + "layer { " + " name: 'pool2' " + " type: 'Pooling' " + " pooling_param { " + " pool: MAX " + " kernel_size: 3 " + " stride: 2 " + " } " + " bottom: 'conv2' " + " top: 'pool2' " + "} " + "layer { " + " name: 'norm2' " + " type: 'LRN' " + " lrn_param { " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool2' " + " top: 'norm2' " + "} " + "layer { " + " name: 'conv3' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 384 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'norm2' " + " top: 'conv3' " + "} " + "layer { " + " name: 'relu3' " + " type: 'ReLU' " + " bottom: 'conv3' " + " top: 'conv3' " + "} " + "layer { " + " name: 'conv4' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 384 " + " group: 2 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'conv3' " + " top: 'conv4' " + "} " + "layer { " + " name: 'relu4' " + " type: 'ReLU' " + " bottom: 'conv4' " + " top: 'conv4' " + "} " + "layer { " + " name: 'conv5' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 256 " + " group: 2 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'conv4' " + " top: 'conv5' " + "} " + "layer { " + " name: 'relu5' " + " type: 'ReLU' " + " bottom: 'conv5' " + " top: 'conv5' " + "} " + "layer { " + " name: 'pool5' " + " type: 'Pooling' " + " pooling_param { " + " kernel_size: 3 " + " pool: MAX " + " stride: 2 " + " } " + " bottom: 'conv5' " + " top: 'pool5' " + "} " + "layer { " + " name: 'fc6' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'pool5' " + " top: 'fc6' " + "} " + "layer { " + " name: 'relu6' " + " type: 'ReLU' " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layer { " + " name: 'drop6' " + " type: 'Dropout' " + " dropout_param { " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layer { " + " name: 'fc7' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'fc6' " + " top: 'fc7' " + "} " + "layer { " + " name: 'relu7' " + " type: 'ReLU' " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layer { " + " name: 'drop7' " + " type: 'Dropout' " + " dropout_param { " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layer { " + " name: 'fc8' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'fc7' " + " top: 'fc8' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + this->RunV1UpgradeTest(expected_v1_proto, expected_v2_proto); +} // NOLINT(readability/fn_size) + +TEST_F(NetUpgradeTest, TestUpgradeV1LayerType) { + LayerParameter layer_param; + shared_ptr > layer; + for (int i = 0; i < V1LayerParameter_LayerType_LayerType_ARRAYSIZE; ++i) { + ASSERT_TRUE(V1LayerParameter_LayerType_IsValid(i)); + V1LayerParameter_LayerType v1_type = V1LayerParameter_LayerType(i); + string v2_layer_type(UpgradeV1LayerType(v1_type)); + if (v2_layer_type == "") { + EXPECT_EQ(V1LayerParameter_LayerType_NONE, v1_type); + continue; // Empty string isn't actually a valid layer type. + } + layer_param.set_type(v2_layer_type); + // Data layers expect a DB + if (v2_layer_type == "Data") { + string tmp; + MakeTempDir(&tmp); + boost::scoped_ptr db(db::GetDB(DataParameter_DB_LEVELDB)); + db->Open(tmp, db::NEW); + db->Close(); + layer_param.mutable_data_param()->set_source(tmp); + } + layer = LayerRegistry::CreateLayer(layer_param); + EXPECT_EQ(v2_layer_type, layer->type()); + } +} + +} // NOLINT(readability/fn_size) // namespace caffe diff --git a/src/caffe/test/test_util_blas.cpp b/src/caffe/test/test_util_blas.cpp new file mode 100755 index 0000000..8770f30 --- /dev/null +++ b/src/caffe/test/test_util_blas.cpp @@ -0,0 +1,134 @@ +#ifndef CPU_ONLY // CPU-GPU test + +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/util/device_alternate.hpp" +#include "caffe/util/math_functions.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class GemmTest : public ::testing::Test {}; + +TYPED_TEST_CASE(GemmTest, TestDtypes); + +TYPED_TEST(GemmTest, TestGemmCPUGPU) { + Blob A(1, 1, 2, 3); + Blob B(1, 1, 3, 4); + Blob C(1, 1, 2, 4); + TypeParam data[12] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + TypeParam A_reshape_data[6] = {1, 4, 2, 5, 3, 6}; + TypeParam B_reshape_data[12] = {1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12}; + TypeParam result[8] = {38, 44, 50, 56, 83, 98, 113, 128}; + caffe_copy(6, data, A.mutable_cpu_data()); + caffe_copy(12, data, B.mutable_cpu_data()); + + if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) { + // [1, 2, 3; 4 5 6] * [1, 2, 3, 4; 5, 6, 7, 8; 9, 10, 11, 12]; + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., + A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., + A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + + // Test when we have a transposed A + A.Reshape(1, 1, 3, 2); + caffe_copy(6, A_reshape_data, A.mutable_cpu_data()); + caffe_cpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., + A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + caffe_gpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., + A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + + // Test when we have a transposed A and a transposed B too + B.Reshape(1, 1, 4, 3); + caffe_copy(12, B_reshape_data, B.mutable_cpu_data()); + caffe_cpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., + A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + caffe_gpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., + A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + + // Test when we have a transposed B + A.Reshape(1, 1, 2, 3); + caffe_copy(6, data, A.mutable_cpu_data()); + caffe_cpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., + A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + caffe_gpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., + A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + } else { + LOG(ERROR) << "Skipping test due to old architecture."; + } +} + + +TYPED_TEST(GemmTest, TestGemvCPUGPU) { + Blob A(1, 1, 2, 3); + Blob x(1, 1, 1, 3); + Blob y(1, 1, 1, 2); + TypeParam data[6] = {1, 2, 3, 4, 5, 6}; + TypeParam result_2[2] = {14, 32}; + TypeParam result_3[3] = {9, 12, 15}; + caffe_copy(6, data, A.mutable_cpu_data()); + caffe_copy(3, data, x.mutable_cpu_data()); + + if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) { + caffe_cpu_gemv(CblasNoTrans, 2, 3, 1., A.cpu_data(), + x.cpu_data(), 0., y.mutable_cpu_data()); + for (int i = 0; i < 2; ++i) { + EXPECT_EQ(y.cpu_data()[i], result_2[i]); + } + caffe_gpu_gemv(CblasNoTrans, 2, 3, 1., A.gpu_data(), + x.gpu_data(), 0., y.mutable_gpu_data()); + for (int i = 0; i < 2; ++i) { + EXPECT_EQ(y.cpu_data()[i], result_2[i]); + } + + // Test transpose case + caffe_copy(2, data, y.mutable_cpu_data()); + caffe_cpu_gemv(CblasTrans, 2, 3, 1., A.cpu_data(), + y.cpu_data(), 0., x.mutable_cpu_data()); + for (int i = 0; i < 3; ++i) { + EXPECT_EQ(x.cpu_data()[i], result_3[i]); + } + caffe_gpu_gemv(CblasTrans, 2, 3, 1., A.gpu_data(), + y.gpu_data(), 0., x.mutable_gpu_data()); + for (int i = 0; i < 3; ++i) { + EXPECT_EQ(x.cpu_data()[i], result_3[i]); + } + } else { + LOG(ERROR) << "Skipping test due to old architecture."; + } +} + +} // namespace caffe + +#endif // CPU_ONLY diff --git a/src/caffe/util/benchmark.cpp b/src/caffe/util/benchmark.cpp new file mode 100755 index 0000000..1d269c3 --- /dev/null +++ b/src/caffe/util/benchmark.cpp @@ -0,0 +1,168 @@ +#include + +#include "caffe/common.hpp" +#include "caffe/util/benchmark.hpp" + +namespace caffe { + +Timer::Timer() + : initted_(false), + running_(false), + has_run_at_least_once_(false) { + Init(); +} + +Timer::~Timer() { + if (Caffe::mode() == Caffe::GPU) { +#ifndef CPU_ONLY + CUDA_CHECK(cudaEventDestroy(start_gpu_)); + CUDA_CHECK(cudaEventDestroy(stop_gpu_)); +#else + NO_GPU; +#endif + } +} + +void Timer::Start() { + if (!running()) { + if (Caffe::mode() == Caffe::GPU) { +#ifndef CPU_ONLY + CUDA_CHECK(cudaEventRecord(start_gpu_, 0)); +#else + NO_GPU; +#endif + } else { + start_cpu_ = boost::posix_time::microsec_clock::local_time(); + } + running_ = true; + has_run_at_least_once_ = true; + } +} + +void Timer::Stop() { + if (running()) { + if (Caffe::mode() == Caffe::GPU) { +#ifndef CPU_ONLY + CUDA_CHECK(cudaEventRecord(stop_gpu_, 0)); + CUDA_CHECK(cudaEventSynchronize(stop_gpu_)); +#else + NO_GPU; +#endif + } else { + stop_cpu_ = boost::posix_time::microsec_clock::local_time(); + } + running_ = false; + } +} + + +float Timer::MicroSeconds() { + if (!has_run_at_least_once()) { + LOG(WARNING) << "Timer has never been run before reading time."; + return 0; + } + if (running()) { + Stop(); + } + if (Caffe::mode() == Caffe::GPU) { +#ifndef CPU_ONLY + CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, + stop_gpu_)); + // Cuda only measure milliseconds + elapsed_microseconds_ = elapsed_milliseconds_ * 1000; +#else + NO_GPU; +#endif + } else { + elapsed_microseconds_ = (stop_cpu_ - start_cpu_).total_microseconds(); + } + return elapsed_microseconds_; +} + +float Timer::MilliSeconds() { + if (!has_run_at_least_once()) { + LOG(WARNING) << "Timer has never been run before reading time."; + return 0; + } + if (running()) { + Stop(); + } + if (Caffe::mode() == Caffe::GPU) { +#ifndef CPU_ONLY + CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, + stop_gpu_)); +#else + NO_GPU; +#endif + } else { + elapsed_milliseconds_ = (stop_cpu_ - start_cpu_).total_milliseconds(); + } + return elapsed_milliseconds_; +} + +float Timer::Seconds() { + return MilliSeconds() / 1000.; +} + +void Timer::Init() { + if (!initted()) { + if (Caffe::mode() == Caffe::GPU) { +#ifndef CPU_ONLY + CUDA_CHECK(cudaEventCreate(&start_gpu_)); + CUDA_CHECK(cudaEventCreate(&stop_gpu_)); +#else + NO_GPU; +#endif + } + initted_ = true; + } +} + +CPUTimer::CPUTimer() { + this->initted_ = true; + this->running_ = false; + this->has_run_at_least_once_ = false; +} + +void CPUTimer::Start() { + if (!running()) { + this->start_cpu_ = boost::posix_time::microsec_clock::local_time(); + this->running_ = true; + this->has_run_at_least_once_ = true; + } +} + +void CPUTimer::Stop() { + if (running()) { + this->stop_cpu_ = boost::posix_time::microsec_clock::local_time(); + this->running_ = false; + } +} + +float CPUTimer::MilliSeconds() { + if (!has_run_at_least_once()) { + LOG(WARNING) << "Timer has never been run before reading time."; + return 0; + } + if (running()) { + Stop(); + } + this->elapsed_milliseconds_ = (this->stop_cpu_ - + this->start_cpu_).total_milliseconds(); + return this->elapsed_milliseconds_; +} + +float CPUTimer::MicroSeconds() { + if (!has_run_at_least_once()) { + LOG(WARNING) << "Timer has never been run before reading time."; + return 0; + } + if (running()) { + Stop(); + } + this->elapsed_microseconds_ = (this->stop_cpu_ - + this->start_cpu_).total_microseconds(); + return this->elapsed_microseconds_; +} + +} // namespace caffe diff --git a/src/caffe/util/blocking_queue.cpp b/src/caffe/util/blocking_queue.cpp new file mode 100755 index 0000000..d1d1fa8 --- /dev/null +++ b/src/caffe/util/blocking_queue.cpp @@ -0,0 +1,96 @@ +#include +#include + +#include "caffe/data_layers.hpp" +#include "caffe/data_reader.hpp" +#include "caffe/parallel.hpp" +#include "caffe/util/blocking_queue.hpp" + +namespace caffe { + +template +class BlockingQueue::sync { + public: + mutable boost::mutex mutex_; + boost::condition_variable condition_; +}; + +template +BlockingQueue::BlockingQueue() + : sync_(new sync()) { +} + +template +void BlockingQueue::push(const T& t) { + boost::mutex::scoped_lock lock(sync_->mutex_); + queue_.push(t); + lock.unlock(); + sync_->condition_.notify_one(); +} + +template +bool BlockingQueue::try_pop(T* t) { + boost::mutex::scoped_lock lock(sync_->mutex_); + + if (queue_.empty()) { + return false; + } + + *t = queue_.front(); + queue_.pop(); + return true; +} + +template +T BlockingQueue::pop(const string& log_on_wait) { + boost::mutex::scoped_lock lock(sync_->mutex_); + + while (queue_.empty()) { + if (!log_on_wait.empty()) { + LOG_EVERY_N(INFO, 1000)<< log_on_wait; + } + sync_->condition_.wait(lock); + } + + T t = queue_.front(); + queue_.pop(); + return t; +} + +template +bool BlockingQueue::try_peek(T* t) { + boost::mutex::scoped_lock lock(sync_->mutex_); + + if (queue_.empty()) { + return false; + } + + *t = queue_.front(); + return true; +} + +template +T BlockingQueue::peek() { + boost::mutex::scoped_lock lock(sync_->mutex_); + + while (queue_.empty()) { + sync_->condition_.wait(lock); + } + + return queue_.front(); +} + +template +size_t BlockingQueue::size() const { + boost::mutex::scoped_lock lock(sync_->mutex_); + return queue_.size(); +} + +template class BlockingQueue*>; +template class BlockingQueue*>; +template class BlockingQueue; +template class BlockingQueue >; +template class BlockingQueue*>; +template class BlockingQueue*>; + +} // namespace caffe diff --git a/src/caffe/util/cudnn.cpp b/src/caffe/util/cudnn.cpp new file mode 100755 index 0000000..1772f00 --- /dev/null +++ b/src/caffe/util/cudnn.cpp @@ -0,0 +1,23 @@ +#ifdef USE_CUDNN +#include "caffe/util/cudnn.hpp" + +namespace caffe { +namespace cudnn { + +float dataType::oneval = 1.0; +float dataType::zeroval = 0.0; +const void* dataType::one = + static_cast(&dataType::oneval); +const void* dataType::zero = + static_cast(&dataType::zeroval); + +double dataType::oneval = 1.0; +double dataType::zeroval = 0.0; +const void* dataType::one = + static_cast(&dataType::oneval); +const void* dataType::zero = + static_cast(&dataType::zeroval); + +} // namespace cudnn +} // namespace caffe +#endif diff --git a/src/caffe/util/db.cpp b/src/caffe/util/db.cpp new file mode 100755 index 0000000..f55420e --- /dev/null +++ b/src/caffe/util/db.cpp @@ -0,0 +1,31 @@ +#include "caffe/util/db.hpp" +#include "caffe/util/db_leveldb.hpp" +#include "caffe/util/db_lmdb.hpp" + +#include + +namespace caffe { namespace db { + +DB* GetDB(DataParameter::DB backend) { + switch (backend) { + case DataParameter_DB_LEVELDB: + return new LevelDB(); + case DataParameter_DB_LMDB: + return new LMDB(); + default: + LOG(FATAL) << "Unknown database backend"; + } +} + +DB* GetDB(const string& backend) { + if (backend == "leveldb") { + return new LevelDB(); + } else if (backend == "lmdb") { + return new LMDB(); + } else { + LOG(FATAL) << "Unknown database backend"; + } +} + +} // namespace db +} // namespace caffe diff --git a/src/caffe/util/db_leveldb.cpp b/src/caffe/util/db_leveldb.cpp new file mode 100755 index 0000000..06c4662 --- /dev/null +++ b/src/caffe/util/db_leveldb.cpp @@ -0,0 +1,21 @@ +#include "caffe/util/db_leveldb.hpp" + +#include + +namespace caffe { namespace db { + +void LevelDB::Open(const string& source, Mode mode) { + leveldb::Options options; + options.block_size = 65536; + options.write_buffer_size = 268435456; + options.max_open_files = 100; + options.error_if_exists = mode == NEW; + options.create_if_missing = mode != READ; + leveldb::Status status = leveldb::DB::Open(options, source, &db_); + CHECK(status.ok()) << "Failed to open leveldb " << source + << std::endl << status.ToString(); + LOG(INFO) << "Opened leveldb " << source; +} + +} // namespace db +} // namespace caffe diff --git a/src/caffe/util/db_lmdb.cpp b/src/caffe/util/db_lmdb.cpp new file mode 100755 index 0000000..a054b79 --- /dev/null +++ b/src/caffe/util/db_lmdb.cpp @@ -0,0 +1,51 @@ +#include "caffe/util/db_lmdb.hpp" + +#include + +#include + +namespace caffe { namespace db { + +const size_t LMDB_MAP_SIZE = 1099511627776; // 1 TB + +void LMDB::Open(const string& source, Mode mode) { + MDB_CHECK(mdb_env_create(&mdb_env_)); + MDB_CHECK(mdb_env_set_mapsize(mdb_env_, LMDB_MAP_SIZE)); + if (mode == NEW) { + CHECK_EQ(mkdir(source.c_str(), 0744), 0) << "mkdir " << source << "failed"; + } + int flags = 0; + if (mode == READ) { + flags = MDB_RDONLY | MDB_NOTLS; + } + MDB_CHECK(mdb_env_open(mdb_env_, source.c_str(), flags, 0664)); + LOG(INFO) << "Opened lmdb " << source; +} + +LMDBCursor* LMDB::NewCursor() { + MDB_txn* mdb_txn; + MDB_cursor* mdb_cursor; + MDB_CHECK(mdb_txn_begin(mdb_env_, NULL, MDB_RDONLY, &mdb_txn)); + MDB_CHECK(mdb_dbi_open(mdb_txn, NULL, 0, &mdb_dbi_)); + MDB_CHECK(mdb_cursor_open(mdb_txn, mdb_dbi_, &mdb_cursor)); + return new LMDBCursor(mdb_txn, mdb_cursor); +} + +LMDBTransaction* LMDB::NewTransaction() { + MDB_txn* mdb_txn; + MDB_CHECK(mdb_txn_begin(mdb_env_, NULL, 0, &mdb_txn)); + MDB_CHECK(mdb_dbi_open(mdb_txn, NULL, 0, &mdb_dbi_)); + return new LMDBTransaction(&mdb_dbi_, mdb_txn); +} + +void LMDBTransaction::Put(const string& key, const string& value) { + MDB_val mdb_key, mdb_value; + mdb_key.mv_data = const_cast(key.data()); + mdb_key.mv_size = key.size(); + mdb_value.mv_data = const_cast(value.data()); + mdb_value.mv_size = value.size(); + MDB_CHECK(mdb_put(mdb_txn_, *mdb_dbi_, &mdb_key, &mdb_value, 0)); +} + +} // namespace db +} // namespace caffe diff --git a/src/caffe/util/hdf5.cpp b/src/caffe/util/hdf5.cpp new file mode 100755 index 0000000..d0d05f7 --- /dev/null +++ b/src/caffe/util/hdf5.cpp @@ -0,0 +1,160 @@ +#include "caffe/util/hdf5.hpp" + +#include +#include + +namespace caffe { + +// Verifies format of data stored in HDF5 file and reshapes blob accordingly. +template +void hdf5_load_nd_dataset_helper( + hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, + Blob* blob) { + // Verify that the dataset exists. + CHECK(H5LTfind_dataset(file_id, dataset_name_)) + << "Failed to find HDF5 dataset " << dataset_name_; + // Verify that the number of dimensions is in the accepted range. + herr_t status; + int ndims; + status = H5LTget_dataset_ndims(file_id, dataset_name_, &ndims); + CHECK_GE(status, 0) << "Failed to get dataset ndims for " << dataset_name_; + CHECK_GE(ndims, min_dim); + CHECK_LE(ndims, max_dim); + + // Verify that the data format is what we expect: float or double. + std::vector dims(ndims); + H5T_class_t class_; + status = H5LTget_dataset_info( + file_id, dataset_name_, dims.data(), &class_, NULL); + CHECK_GE(status, 0) << "Failed to get dataset info for " << dataset_name_; + CHECK_EQ(class_, H5T_FLOAT) << "Expected float or double data"; + + vector blob_dims(dims.size()); + for (int i = 0; i < dims.size(); ++i) { + blob_dims[i] = dims[i]; + } + blob->Reshape(blob_dims); +} + +template <> +void hdf5_load_nd_dataset(hid_t file_id, const char* dataset_name_, + int min_dim, int max_dim, Blob* blob) { + hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob); + herr_t status = H5LTread_dataset_float( + file_id, dataset_name_, blob->mutable_cpu_data()); + CHECK_GE(status, 0) << "Failed to read float dataset " << dataset_name_; +} + +template <> +void hdf5_load_nd_dataset(hid_t file_id, const char* dataset_name_, + int min_dim, int max_dim, Blob* blob) { + hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob); + herr_t status = H5LTread_dataset_double( + file_id, dataset_name_, blob->mutable_cpu_data()); + CHECK_GE(status, 0) << "Failed to read double dataset " << dataset_name_; +} + +template <> +void hdf5_save_nd_dataset( + const hid_t file_id, const string& dataset_name, const Blob& blob, + bool write_diff) { + int num_axes = blob.num_axes(); + hsize_t *dims = new hsize_t[num_axes]; + for (int i = 0; i < num_axes; ++i) { + dims[i] = blob.shape(i); + } + const float* data; + if (write_diff) { + data = blob.cpu_diff(); + } else { + data = blob.cpu_data(); + } + herr_t status = H5LTmake_dataset_float( + file_id, dataset_name.c_str(), num_axes, dims, data); + CHECK_GE(status, 0) << "Failed to make float dataset " << dataset_name; + delete[] dims; +} + +template <> +void hdf5_save_nd_dataset( + hid_t file_id, const string& dataset_name, const Blob& blob, + bool write_diff) { + int num_axes = blob.num_axes(); + hsize_t *dims = new hsize_t[num_axes]; + for (int i = 0; i < num_axes; ++i) { + dims[i] = blob.shape(i); + } + const double* data; + if (write_diff) { + data = blob.cpu_diff(); + } else { + data = blob.cpu_data(); + } + herr_t status = H5LTmake_dataset_double( + file_id, dataset_name.c_str(), num_axes, dims, data); + CHECK_GE(status, 0) << "Failed to make double dataset " << dataset_name; + delete[] dims; +} + +string hdf5_load_string(hid_t loc_id, const string& dataset_name) { + // Get size of dataset + size_t size; + H5T_class_t class_; + herr_t status = \ + H5LTget_dataset_info(loc_id, dataset_name.c_str(), NULL, &class_, &size); + CHECK_GE(status, 0) << "Failed to get dataset info for " << dataset_name; + char *buf = new char[size]; + status = H5LTread_dataset_string(loc_id, dataset_name.c_str(), buf); + CHECK_GE(status, 0) + << "Failed to load int dataset with name " << dataset_name; + string val(buf); + delete[] buf; + return val; +} + +void hdf5_save_string(hid_t loc_id, const string& dataset_name, + const string& s) { + herr_t status = \ + H5LTmake_dataset_string(loc_id, dataset_name.c_str(), s.c_str()); + CHECK_GE(status, 0) + << "Failed to save string dataset with name " << dataset_name; +} + +int hdf5_load_int(hid_t loc_id, const string& dataset_name) { + int val; + herr_t status = H5LTread_dataset_int(loc_id, dataset_name.c_str(), &val); + CHECK_GE(status, 0) + << "Failed to load int dataset with name " << dataset_name; + return val; +} + +void hdf5_save_int(hid_t loc_id, const string& dataset_name, int i) { + hsize_t one = 1; + herr_t status = \ + H5LTmake_dataset_int(loc_id, dataset_name.c_str(), 1, &one, &i); + CHECK_GE(status, 0) + << "Failed to save int dataset with name " << dataset_name; +} + +int hdf5_get_num_links(hid_t loc_id) { + H5G_info_t info; + herr_t status = H5Gget_info(loc_id, &info); + CHECK_GE(status, 0) << "Error while counting HDF5 links."; + return info.nlinks; +} + +string hdf5_get_name_by_idx(hid_t loc_id, int idx) { + ssize_t str_size = H5Lget_name_by_idx( + loc_id, ".", H5_INDEX_NAME, H5_ITER_NATIVE, idx, NULL, 0, H5P_DEFAULT); + CHECK_GE(str_size, 0) << "Error retrieving HDF5 dataset at index " << idx; + char *c_str = new char[str_size+1]; + ssize_t status = H5Lget_name_by_idx( + loc_id, ".", H5_INDEX_NAME, H5_ITER_NATIVE, idx, c_str, str_size+1, + H5P_DEFAULT); + CHECK_GE(status, 0) << "Error retrieving HDF5 dataset at index " << idx; + string result(c_str); + delete[] c_str; + return result; +} + +} // namespace caffe diff --git a/src/caffe/util/im2col.cpp b/src/caffe/util/im2col.cpp new file mode 100755 index 0000000..c48f31f --- /dev/null +++ b/src/caffe/util/im2col.cpp @@ -0,0 +1,83 @@ +#include +#include +#include + +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void im2col_cpu(const Dtype* data_im, const int channels, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + Dtype* data_col) { + int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; + int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; + int channels_col = channels * kernel_h * kernel_w; + for (int c = 0; c < channels_col; ++c) { + int w_offset = c % kernel_w; + int h_offset = (c / kernel_w) % kernel_h; + int c_im = c / kernel_h / kernel_w; + for (int h = 0; h < height_col; ++h) { + for (int w = 0; w < width_col; ++w) { + int h_pad = h * stride_h - pad_h + h_offset; + int w_pad = w * stride_w - pad_w + w_offset; + if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) + data_col[(c * height_col + h) * width_col + w] = + data_im[(c_im * height + h_pad) * width + w_pad]; + else + data_col[(c * height_col + h) * width_col + w] = 0; + } + } + } +} + +// Explicit instantiation +template void im2col_cpu(const float* data_im, const int channels, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, float* data_col); +template void im2col_cpu(const double* data_im, const int channels, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, double* data_col); + +template +void col2im_cpu(const Dtype* data_col, const int channels, + const int height, const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + Dtype* data_im) { + caffe_set(height * width * channels, Dtype(0), data_im); + int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; + int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; + int channels_col = channels * patch_h * patch_w; + for (int c = 0; c < channels_col; ++c) { + int w_offset = c % patch_w; + int h_offset = (c / patch_w) % patch_h; + int c_im = c / patch_h / patch_w; + for (int h = 0; h < height_col; ++h) { + for (int w = 0; w < width_col; ++w) { + int h_pad = h * stride_h - pad_h + h_offset; + int w_pad = w * stride_w - pad_w + w_offset; + if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) + data_im[(c_im * height + h_pad) * width + w_pad] += + data_col[(c * height_col + h) * width_col + w]; + } + } + } +} + +// Explicit instantiation +template void col2im_cpu(const float* data_col, const int channels, + const int height, const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, float* data_im); +template void col2im_cpu(const double* data_col, const int channels, + const int height, const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, double* data_im); + +} // namespace caffe diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu new file mode 100755 index 0000000..c90f93e --- /dev/null +++ b/src/caffe/util/im2col.cu @@ -0,0 +1,144 @@ +#include +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/util/im2col.hpp" + +namespace caffe { + +template +__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + Dtype* data_col) { + CUDA_KERNEL_LOOP(index, n) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + Dtype* data_col_ptr = data_col; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + const Dtype* data_im_ptr = data_im; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + int h = h_in + i; + int w = w_in + j; + *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } +} + +template +void im2col_gpu(const Dtype* data_im, const int channels, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + Dtype* data_col) { + // We are going to launch channels * height_col * width_col kernels, each + // kernel responsible for copying a single-channel grid. + int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; + int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; + int num_kernels = channels * height_col * width_col; + // NOLINT_NEXT_LINE(whitespace/operators) + im2col_gpu_kernel<<>>( + num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h, + pad_w, stride_h, stride_w, height_col, + width_col, data_col); + CUDA_POST_KERNEL_CHECK; +} + + +// Explicit instantiation +template void im2col_gpu(const float* data_im, const int channels, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + float* data_col); +template void im2col_gpu(const double* data_im, const int channels, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + double* data_col); + +template +__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, + const int height, const int width, const int channels, + const int patch_h, const int patch_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + Dtype* data_im) { + CUDA_KERNEL_LOOP(index, n) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; + int w_col_end = min(w / stride_w + 1, width_col); + int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; + int h_col_end = min(h / stride_h + 1, height_col); + /* + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + // the col location: [c * width * height + h_out, w_out] + int c_col = c * patch_h * patch_w + (h - h_col * stride_h) * ksize + + (w - w_col * stride_w); + val += data_col[(c_col * height_col + h_col) * width_col + w_col]; + } + } + */ + // equivalent implementation + int offset = + (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col; + int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col; + int coeff_w_col = (1 - stride_w * height_col * width_col); + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; + } + } + data_im[index] = val; + } +} + +template +void col2im_gpu(const Dtype* data_col, const int channels, + const int height, const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, Dtype* data_im) { + int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; + int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; + int num_kernels = channels * height * width; + // To avoid involving atomic operations, we will launch one kernel per + // bottom dimension, and then in the kernel add up the top dimensions. + // NOLINT_NEXT_LINE(whitespace/operators) + col2im_gpu_kernel<<>>( + num_kernels, data_col, height, width, channels, patch_h, patch_w, + pad_h, pad_w, stride_h, stride_w, + height_col, width_col, data_im); + CUDA_POST_KERNEL_CHECK; +} + +// Explicit instantiation +template void col2im_gpu(const float* data_col, const int channels, + const int height, const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, float* data_im); +template void col2im_gpu(const double* data_col, const int channels, + const int height, const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, double* data_im); + +} // namespace caffe diff --git a/src/caffe/util/insert_splits.cpp b/src/caffe/util/insert_splits.cpp new file mode 100755 index 0000000..416f80a --- /dev/null +++ b/src/caffe/util/insert_splits.cpp @@ -0,0 +1,144 @@ +#include +#include +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/util/insert_splits.hpp" + +namespace caffe { + +void InsertSplits(const NetParameter& param, NetParameter* param_split) { + // Initialize by copying from the input NetParameter. + param_split->CopyFrom(param); + param_split->clear_layer(); + map > blob_name_to_last_top_idx; + map, pair > bottom_idx_to_source_top_idx; + map, int> top_idx_to_bottom_count; + map, float> top_idx_to_loss_weight; + map, int> top_idx_to_bottom_split_idx; + map layer_idx_to_layer_name; + layer_idx_to_layer_name[-1] = "input"; + // Determine the number of times each blob is used as an input (bottom) blob. + for (int i = 0; i < param.input_size(); ++i) { + const string& blob_name = param.input(i); + blob_name_to_last_top_idx[blob_name] = make_pair(-1, i); + } + for (int i = 0; i < param.layer_size(); ++i) { + const LayerParameter& layer_param = param.layer(i); + layer_idx_to_layer_name[i] = layer_param.name(); + for (int j = 0; j < layer_param.bottom_size(); ++j) { + const string& blob_name = layer_param.bottom(j); + if (blob_name_to_last_top_idx.find(blob_name) == + blob_name_to_last_top_idx.end()) { + LOG(FATAL) << "Unknown blob input " << blob_name << " to layer " << j; + } + const pair& bottom_idx = make_pair(i, j); + const pair& top_idx = blob_name_to_last_top_idx[blob_name]; + bottom_idx_to_source_top_idx[bottom_idx] = top_idx; + ++top_idx_to_bottom_count[top_idx]; + } + for (int j = 0; j < layer_param.top_size(); ++j) { + const string& blob_name = layer_param.top(j); + blob_name_to_last_top_idx[blob_name] = make_pair(i, j); + } + // A use of a top blob as a loss should be handled similarly to the use of + // a top blob as an input (bottom) blob to another layer. + const int last_loss = + std::min(layer_param.loss_weight_size(), layer_param.top_size()); + for (int j = 0; j < last_loss; ++j) { + const string& blob_name = layer_param.top(j); + const pair& top_idx = blob_name_to_last_top_idx[blob_name]; + top_idx_to_loss_weight[top_idx] = layer_param.loss_weight(j); + if (top_idx_to_loss_weight[top_idx]) { + ++top_idx_to_bottom_count[top_idx]; + } + } + } + // Create split layer for any input blobs used by other layer as bottom + // blobs more than once. + for (int i = 0; i < param.input_size(); ++i) { + const int split_count = top_idx_to_bottom_count[make_pair(-1, i)]; + if (split_count > 1) { + const string& layer_name = layer_idx_to_layer_name[-1]; + const string& blob_name = param.input(i); + LayerParameter* split_layer_param = param_split->add_layer(); + const float kZeroLossWeight = 0; + ConfigureSplitLayer(layer_name, blob_name, i, split_count, + kZeroLossWeight, split_layer_param); + } + } + for (int i = 0; i < param.layer_size(); ++i) { + LayerParameter* layer_param = param_split->add_layer(); + layer_param->CopyFrom(param.layer(i)); + // Replace any shared bottom blobs with split layer outputs. + for (int j = 0; j < layer_param->bottom_size(); ++j) { + const pair& top_idx = + bottom_idx_to_source_top_idx[make_pair(i, j)]; + const int split_count = top_idx_to_bottom_count[top_idx]; + if (split_count > 1) { + const string& layer_name = layer_idx_to_layer_name[top_idx.first]; + const string& blob_name = layer_param->bottom(j); + layer_param->set_bottom(j, SplitBlobName(layer_name, + blob_name, top_idx.second, top_idx_to_bottom_split_idx[top_idx]++)); + } + } + // Create split layer for any top blobs used by other layer as bottom + // blobs more than once. + for (int j = 0; j < layer_param->top_size(); ++j) { + const pair& top_idx = make_pair(i, j); + const int split_count = top_idx_to_bottom_count[top_idx]; + if (split_count > 1) { + const string& layer_name = layer_idx_to_layer_name[i]; + const string& blob_name = layer_param->top(j); + LayerParameter* split_layer_param = param_split->add_layer(); + const float loss_weight = top_idx_to_loss_weight[top_idx]; + ConfigureSplitLayer(layer_name, blob_name, j, split_count, + loss_weight, split_layer_param); + if (loss_weight) { + layer_param->clear_loss_weight(); + top_idx_to_bottom_split_idx[top_idx]++; + } + } + } + } +} + +void ConfigureSplitLayer(const string& layer_name, const string& blob_name, + const int blob_idx, const int split_count, const float loss_weight, + LayerParameter* split_layer_param) { + split_layer_param->Clear(); + split_layer_param->add_bottom(blob_name); + split_layer_param->set_name(SplitLayerName(layer_name, blob_name, blob_idx)); + split_layer_param->set_type("Split"); + for (int k = 0; k < split_count; ++k) { + split_layer_param->add_top( + SplitBlobName(layer_name, blob_name, blob_idx, k)); + if (loss_weight) { + if (k == 0) { + split_layer_param->add_loss_weight(loss_weight); + } else { + split_layer_param->add_loss_weight(0); + } + } + } +} + +string SplitLayerName(const string& layer_name, const string& blob_name, + const int blob_idx) { + ostringstream split_layer_name; + split_layer_name << blob_name << "_" << layer_name << "_" << blob_idx + << "_split"; + return split_layer_name.str(); +} + +string SplitBlobName(const string& layer_name, const string& blob_name, + const int blob_idx, const int split_idx) { + ostringstream split_blob_name; + split_blob_name << blob_name << "_" << layer_name << "_" << blob_idx + << "_split_" << split_idx; + return split_blob_name.str(); +} + +} // namespace caffe diff --git a/src/caffe/util/io.cpp b/src/caffe/util/io.cpp new file mode 100755 index 0000000..6f03314 --- /dev/null +++ b/src/caffe/util/io.cpp @@ -0,0 +1,232 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include // NOLINT(readability/streams) +#include +#include + +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/io.hpp" + +const int kProtoReadBytesLimit = INT_MAX; // Max size of 2 GB minus 1 byte. + +namespace caffe { + +using google::protobuf::io::FileInputStream; +using google::protobuf::io::FileOutputStream; +using google::protobuf::io::ZeroCopyInputStream; +using google::protobuf::io::CodedInputStream; +using google::protobuf::io::ZeroCopyOutputStream; +using google::protobuf::io::CodedOutputStream; +using google::protobuf::Message; + +bool ReadProtoFromTextFile(const char* filename, Message* proto) { + int fd = open(filename, O_RDONLY); + CHECK_NE(fd, -1) << "File not found: " << filename; + FileInputStream* input = new FileInputStream(fd); + bool success = google::protobuf::TextFormat::Parse(input, proto); + delete input; + close(fd); + return success; +} + +void WriteProtoToTextFile(const Message& proto, const char* filename) { + int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0644); + FileOutputStream* output = new FileOutputStream(fd); + CHECK(google::protobuf::TextFormat::Print(proto, output)); + delete output; + close(fd); +} + +bool ReadProtoFromBinaryFile(const char* filename, Message* proto) { + int fd = open(filename, O_RDONLY); + CHECK_NE(fd, -1) << "File not found: " << filename; + ZeroCopyInputStream* raw_input = new FileInputStream(fd); + CodedInputStream* coded_input = new CodedInputStream(raw_input); + coded_input->SetTotalBytesLimit(kProtoReadBytesLimit, 536870912); + + bool success = proto->ParseFromCodedStream(coded_input); + + delete coded_input; + delete raw_input; + close(fd); + return success; +} + +void WriteProtoToBinaryFile(const Message& proto, const char* filename) { + fstream output(filename, ios::out | ios::trunc | ios::binary); + CHECK(proto.SerializeToOstream(&output)); +} + +cv::Mat ReadImageToCVMat(const string& filename, + const int height, const int width, const bool is_color) { + cv::Mat cv_img; + int cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR : + CV_LOAD_IMAGE_GRAYSCALE); + cv::Mat cv_img_origin = cv::imread(filename, cv_read_flag); + if (!cv_img_origin.data) { + LOG(ERROR) << "Could not open or find file " << filename; + return cv_img_origin; + } + if (height > 0 && width > 0) { + cv::resize(cv_img_origin, cv_img, cv::Size(width, height)); + } else { + cv_img = cv_img_origin; + } + return cv_img; +} + +cv::Mat ReadImageToCVMat(const string& filename, + const int height, const int width) { + return ReadImageToCVMat(filename, height, width, true); +} + +cv::Mat ReadImageToCVMat(const string& filename, + const bool is_color) { + return ReadImageToCVMat(filename, 0, 0, is_color); +} + +cv::Mat ReadImageToCVMat(const string& filename) { + return ReadImageToCVMat(filename, 0, 0, true); +} +// Do the file extension and encoding match? +static bool matchExt(const std::string & fn, + std::string en) { + size_t p = fn.rfind('.'); + std::string ext = p != fn.npos ? fn.substr(p) : fn; + std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower); + std::transform(en.begin(), en.end(), en.begin(), ::tolower); + if ( ext == en ) + return true; + if ( en == "jpg" && ext == "jpeg" ) + return true; + return false; +} +bool ReadImageToDatum(const string& filename, const int label, + const int height, const int width, const bool is_color, + const std::string & encoding, Datum* datum) { + cv::Mat cv_img = ReadImageToCVMat(filename, height, width, is_color); + if (cv_img.data) { + if (encoding.size()) { + if ( (cv_img.channels() == 3) == is_color && !height && !width && + matchExt(filename, encoding) ) + return ReadFileToDatum(filename, label, datum); + std::vector buf; + cv::imencode("."+encoding, cv_img, buf); + datum->set_data(std::string(reinterpret_cast(&buf[0]), + buf.size())); + datum->set_label(label); + datum->set_encoded(true); + return true; + } + CVMatToDatum(cv_img, datum); + datum->set_label(label); + return true; + } else { + return false; + } +} + +bool ReadFileToDatum(const string& filename, const int label, + Datum* datum) { + std::streampos size; + + fstream file(filename.c_str(), ios::in|ios::binary|ios::ate); + if (file.is_open()) { + size = file.tellg(); + std::string buffer(size, ' '); + file.seekg(0, ios::beg); + file.read(&buffer[0], size); + file.close(); + datum->set_data(buffer); + datum->set_label(label); + datum->set_encoded(true); + return true; + } else { + return false; + } +} + +cv::Mat DecodeDatumToCVMatNative(const Datum& datum) { + cv::Mat cv_img; + CHECK(datum.encoded()) << "Datum not encoded"; + const string& data = datum.data(); + std::vector vec_data(data.c_str(), data.c_str() + data.size()); + cv_img = cv::imdecode(vec_data, -1); + if (!cv_img.data) { + LOG(ERROR) << "Could not decode datum "; + } + return cv_img; +} +cv::Mat DecodeDatumToCVMat(const Datum& datum, bool is_color) { + cv::Mat cv_img; + CHECK(datum.encoded()) << "Datum not encoded"; + const string& data = datum.data(); + std::vector vec_data(data.c_str(), data.c_str() + data.size()); + int cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR : + CV_LOAD_IMAGE_GRAYSCALE); + cv_img = cv::imdecode(vec_data, cv_read_flag); + if (!cv_img.data) { + LOG(ERROR) << "Could not decode datum "; + } + return cv_img; +} + +// If Datum is encoded will decoded using DecodeDatumToCVMat and CVMatToDatum +// If Datum is not encoded will do nothing +bool DecodeDatumNative(Datum* datum) { + if (datum->encoded()) { + cv::Mat cv_img = DecodeDatumToCVMatNative((*datum)); + CVMatToDatum(cv_img, datum); + return true; + } else { + return false; + } +} +bool DecodeDatum(Datum* datum, bool is_color) { + if (datum->encoded()) { + cv::Mat cv_img = DecodeDatumToCVMat((*datum), is_color); + CVMatToDatum(cv_img, datum); + return true; + } else { + return false; + } +} + +void CVMatToDatum(const cv::Mat& cv_img, Datum* datum) { + CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; + datum->set_channels(cv_img.channels()); + datum->set_height(cv_img.rows); + datum->set_width(cv_img.cols); + datum->clear_data(); + datum->clear_float_data(); + datum->set_encoded(false); + int datum_channels = datum->channels(); + int datum_height = datum->height(); + int datum_width = datum->width(); + int datum_size = datum_channels * datum_height * datum_width; + std::string buffer(datum_size, ' '); + for (int h = 0; h < datum_height; ++h) { + const uchar* ptr = cv_img.ptr(h); + int img_index = 0; + for (int w = 0; w < datum_width; ++w) { + for (int c = 0; c < datum_channels; ++c) { + int datum_index = (c * datum_height + h) * datum_width + w; + buffer[datum_index] = static_cast(ptr[img_index++]); + } + } + } + datum->set_data(buffer); +} + + +} // namespace caffe diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp new file mode 100755 index 0000000..0aab6b1 --- /dev/null +++ b/src/caffe/util/math_functions.cpp @@ -0,0 +1,397 @@ +#include +#include + +#include + +#include "caffe/common.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/rng.hpp" + +namespace caffe { + +template<> +void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, + const float alpha, const float* A, const float* B, const float beta, + float* C) { + int lda = (TransA == CblasNoTrans) ? K : M; + int ldb = (TransB == CblasNoTrans) ? N : K; + cblas_sgemm(CblasRowMajor, TransA, TransB, M, N, K, alpha, A, lda, B, + ldb, beta, C, N); +} + +template<> +void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, + const double alpha, const double* A, const double* B, const double beta, + double* C) { + int lda = (TransA == CblasNoTrans) ? K : M; + int ldb = (TransB == CblasNoTrans) ? N : K; + cblas_dgemm(CblasRowMajor, TransA, TransB, M, N, K, alpha, A, lda, B, + ldb, beta, C, N); +} + +template <> +void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, + const int N, const float alpha, const float* A, const float* x, + const float beta, float* y) { + cblas_sgemv(CblasRowMajor, TransA, M, N, alpha, A, N, x, 1, beta, y, 1); +} + +template <> +void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, + const int N, const double alpha, const double* A, const double* x, + const double beta, double* y) { + cblas_dgemv(CblasRowMajor, TransA, M, N, alpha, A, N, x, 1, beta, y, 1); +} + +template <> +void caffe_axpy(const int N, const float alpha, const float* X, + float* Y) { cblas_saxpy(N, alpha, X, 1, Y, 1); } + +template <> +void caffe_axpy(const int N, const double alpha, const double* X, + double* Y) { cblas_daxpy(N, alpha, X, 1, Y, 1); } + +template +void caffe_set(const int N, const Dtype alpha, Dtype* Y) { + if (alpha == 0) { + memset(Y, 0, sizeof(Dtype) * N); // NOLINT(caffe/alt_fn) + return; + } + for (int i = 0; i < N; ++i) { + Y[i] = alpha; + } +} + +template void caffe_set(const int N, const int alpha, int* Y); +template void caffe_set(const int N, const float alpha, float* Y); +template void caffe_set(const int N, const double alpha, double* Y); + +template <> +void caffe_add_scalar(const int N, const float alpha, float* Y) { + for (int i = 0; i < N; ++i) { + Y[i] += alpha; + } +} + +template <> +void caffe_add_scalar(const int N, const double alpha, double* Y) { + for (int i = 0; i < N; ++i) { + Y[i] += alpha; + } +} + +template +void caffe_copy(const int N, const Dtype* X, Dtype* Y) { + if (X != Y) { + if (Caffe::mode() == Caffe::GPU) { +#ifndef CPU_ONLY + // NOLINT_NEXT_LINE(caffe/alt_fn) + CUDA_CHECK(cudaMemcpy(Y, X, sizeof(Dtype) * N, cudaMemcpyDefault)); +#else + NO_GPU; +#endif + } else { + memcpy(Y, X, sizeof(Dtype) * N); // NOLINT(caffe/alt_fn) + } + } +} + +template void caffe_copy(const int N, const int* X, int* Y); +template void caffe_copy(const int N, const unsigned int* X, + unsigned int* Y); +template void caffe_copy(const int N, const float* X, float* Y); +template void caffe_copy(const int N, const double* X, double* Y); + +template <> +void caffe_scal(const int N, const float alpha, float *X) { + cblas_sscal(N, alpha, X, 1); +} + +template <> +void caffe_scal(const int N, const double alpha, double *X) { + cblas_dscal(N, alpha, X, 1); +} + +template <> +void caffe_cpu_axpby(const int N, const float alpha, const float* X, + const float beta, float* Y) { + cblas_saxpby(N, alpha, X, 1, beta, Y, 1); +} + +template <> +void caffe_cpu_axpby(const int N, const double alpha, const double* X, + const double beta, double* Y) { + cblas_daxpby(N, alpha, X, 1, beta, Y, 1); +} + +template <> +void caffe_add(const int n, const float* a, const float* b, + float* y) { + vsAdd(n, a, b, y); +} + +template <> +void caffe_add(const int n, const double* a, const double* b, + double* y) { + vdAdd(n, a, b, y); +} + +template <> +void caffe_sub(const int n, const float* a, const float* b, + float* y) { + vsSub(n, a, b, y); +} + +template <> +void caffe_sub(const int n, const double* a, const double* b, + double* y) { + vdSub(n, a, b, y); +} + +template <> +void caffe_mul(const int n, const float* a, const float* b, + float* y) { + vsMul(n, a, b, y); +} + +template <> +void caffe_mul(const int n, const double* a, const double* b, + double* y) { + vdMul(n, a, b, y); +} + +template <> +void caffe_div(const int n, const float* a, const float* b, + float* y) { + vsDiv(n, a, b, y); +} + +template <> +void caffe_div(const int n, const double* a, const double* b, + double* y) { + vdDiv(n, a, b, y); +} + +template <> +void caffe_powx(const int n, const float* a, const float b, + float* y) { + vsPowx(n, a, b, y); +} + +template <> +void caffe_powx(const int n, const double* a, const double b, + double* y) { + vdPowx(n, a, b, y); +} + +template <> +void caffe_sqr(const int n, const float* a, float* y) { + vsSqr(n, a, y); +} + +template <> +void caffe_sqr(const int n, const double* a, double* y) { + vdSqr(n, a, y); +} + +template <> +void caffe_exp(const int n, const float* a, float* y) { + vsExp(n, a, y); +} + +template <> +void caffe_exp(const int n, const double* a, double* y) { + vdExp(n, a, y); +} + +template <> +void caffe_log(const int n, const float* a, float* y) { + vsLn(n, a, y); +} + +template <> +void caffe_log(const int n, const double* a, double* y) { + vdLn(n, a, y); +} + +template <> +void caffe_abs(const int n, const float* a, float* y) { + vsAbs(n, a, y); +} + +template <> +void caffe_abs(const int n, const double* a, double* y) { + vdAbs(n, a, y); +} + +unsigned int caffe_rng_rand() { + return (*caffe_rng())(); +} + +template +Dtype caffe_nextafter(const Dtype b) { + return boost::math::nextafter( + b, std::numeric_limits::max()); +} + +template +float caffe_nextafter(const float b); + +template +double caffe_nextafter(const double b); + +template +void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r) { + CHECK_GE(n, 0); + CHECK(r); + CHECK_LE(a, b); + boost::uniform_real random_distribution(a, caffe_nextafter(b)); + boost::variate_generator > + variate_generator(caffe_rng(), random_distribution); + for (int i = 0; i < n; ++i) { + r[i] = variate_generator(); + } +} + +template +void caffe_rng_uniform(const int n, const float a, const float b, + float* r); + +template +void caffe_rng_uniform(const int n, const double a, const double b, + double* r); + +template +void caffe_rng_gaussian(const int n, const Dtype a, + const Dtype sigma, Dtype* r) { + CHECK_GE(n, 0); + CHECK(r); + CHECK_GT(sigma, 0); + boost::normal_distribution random_distribution(a, sigma); + boost::variate_generator > + variate_generator(caffe_rng(), random_distribution); + for (int i = 0; i < n; ++i) { + r[i] = variate_generator(); + } +} + +template +void caffe_rng_gaussian(const int n, const float mu, + const float sigma, float* r); + +template +void caffe_rng_gaussian(const int n, const double mu, + const double sigma, double* r); + +template +void caffe_rng_bernoulli(const int n, const Dtype p, int* r) { + CHECK_GE(n, 0); + CHECK(r); + CHECK_GE(p, 0); + CHECK_LE(p, 1); + boost::bernoulli_distribution random_distribution(p); + boost::variate_generator > + variate_generator(caffe_rng(), random_distribution); + for (int i = 0; i < n; ++i) { + r[i] = variate_generator(); + } +} + +template +void caffe_rng_bernoulli(const int n, const double p, int* r); + +template +void caffe_rng_bernoulli(const int n, const float p, int* r); + +template +void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r) { + CHECK_GE(n, 0); + CHECK(r); + CHECK_GE(p, 0); + CHECK_LE(p, 1); + boost::bernoulli_distribution random_distribution(p); + boost::variate_generator > + variate_generator(caffe_rng(), random_distribution); + for (int i = 0; i < n; ++i) { + r[i] = static_cast(variate_generator()); + } +} + +template +void caffe_rng_bernoulli(const int n, const double p, unsigned int* r); + +template +void caffe_rng_bernoulli(const int n, const float p, unsigned int* r); + +template <> +float caffe_cpu_strided_dot(const int n, const float* x, const int incx, + const float* y, const int incy) { + return cblas_sdot(n, x, incx, y, incy); +} + +template <> +double caffe_cpu_strided_dot(const int n, const double* x, + const int incx, const double* y, const int incy) { + return cblas_ddot(n, x, incx, y, incy); +} + +template +Dtype caffe_cpu_dot(const int n, const Dtype* x, const Dtype* y) { + return caffe_cpu_strided_dot(n, x, 1, y, 1); +} + +template +float caffe_cpu_dot(const int n, const float* x, const float* y); + +template +double caffe_cpu_dot(const int n, const double* x, const double* y); + +template <> +int caffe_cpu_hamming_distance(const int n, const float* x, + const float* y) { + int dist = 0; + for (int i = 0; i < n; ++i) { + dist += __builtin_popcount(static_cast(x[i]) ^ + static_cast(y[i])); + } + return dist; +} + +template <> +int caffe_cpu_hamming_distance(const int n, const double* x, + const double* y) { + int dist = 0; + for (int i = 0; i < n; ++i) { + dist += __builtin_popcountl(static_cast(x[i]) ^ + static_cast(y[i])); + } + return dist; +} + +template <> +float caffe_cpu_asum(const int n, const float* x) { + return cblas_sasum(n, x, 1); +} + +template <> +double caffe_cpu_asum(const int n, const double* x) { + return cblas_dasum(n, x, 1); +} + +template <> +void caffe_cpu_scale(const int n, const float alpha, const float *x, + float* y) { + cblas_scopy(n, x, 1, y, 1); + cblas_sscal(n, alpha, y, 1); +} + +template <> +void caffe_cpu_scale(const int n, const double alpha, const double *x, + double* y) { + cblas_dcopy(n, x, 1, y, 1); + cblas_dscal(n, alpha, y, 1); +} + +} // namespace caffe diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu new file mode 100755 index 0000000..2631a07 --- /dev/null +++ b/src/caffe/util/math_functions.cu @@ -0,0 +1,465 @@ +#include // CUDA's, not caffe's, for fabs, signbit +#include +#include // thrust::plus +#include + +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template <> +void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, + const float alpha, const float* A, const float* B, const float beta, + float* C) { + // Note that cublas follows fortran order. + int lda = (TransA == CblasNoTrans) ? K : M; + int ldb = (TransB == CblasNoTrans) ? N : K; + cublasOperation_t cuTransA = + (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; + cublasOperation_t cuTransB = + (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; + CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, + N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); +} + +template <> +void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, + const double alpha, const double* A, const double* B, const double beta, + double* C) { + // Note that cublas follows fortran order. + int lda = (TransA == CblasNoTrans) ? K : M; + int ldb = (TransB == CblasNoTrans) ? N : K; + cublasOperation_t cuTransA = + (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; + cublasOperation_t cuTransB = + (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; + CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, + N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); +} + +template <> +void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, + const int N, const float alpha, const float* A, const float* x, + const float beta, float* y) { + cublasOperation_t cuTransA = + (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; + CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, + A, N, x, 1, &beta, y, 1)); +} + +template <> +void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, + const int N, const double alpha, const double* A, const double* x, + const double beta, double* y) { + cublasOperation_t cuTransA = + (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; + CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, + A, N, x, 1, &beta, y, 1)); +} + +template <> +void caffe_gpu_axpy(const int N, const float alpha, const float* X, + float* Y) { + CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); +} + +template <> +void caffe_gpu_axpy(const int N, const double alpha, const double* X, + double* Y) { + CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); +} + +void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { + if (X != Y) { + CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) + } +} + +template <> +void caffe_gpu_scal(const int N, const float alpha, float *X) { + CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); +} + +template <> +void caffe_gpu_scal(const int N, const double alpha, double *X) { + CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); +} + +template <> +void caffe_gpu_axpby(const int N, const float alpha, const float* X, + const float beta, float* Y) { + caffe_gpu_scal(N, beta, Y); + caffe_gpu_axpy(N, alpha, X, Y); +} + +template <> +void caffe_gpu_axpby(const int N, const double alpha, const double* X, + const double beta, double* Y) { + caffe_gpu_scal(N, beta, Y); + caffe_gpu_axpy(N, alpha, X, Y); +} + +template <> +void caffe_gpu_dot(const int n, const float* x, const float* y, + float* out) { + CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); +} + +template <> +void caffe_gpu_dot(const int n, const double* x, const double* y, + double * out) { + CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); +} + +template <> +void caffe_gpu_asum(const int n, const float* x, float* y) { + CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); +} + +template <> +void caffe_gpu_asum(const int n, const double* x, double* y) { + CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); +} + +template <> +void caffe_gpu_scale(const int n, const float alpha, const float *x, + float* y) { + CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); + CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); +} + +template <> +void caffe_gpu_scale(const int n, const double alpha, const double *x, + double* y) { + CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); + CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); +} + +template +__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = alpha; + } +} + +template +void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { + if (alpha == 0) { + CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) + return; + } + // NOLINT_NEXT_LINE(whitespace/operators) + set_kernel<<>>( + N, alpha, Y); +} + +template void caffe_gpu_set(const int N, const int alpha, int* Y); +template void caffe_gpu_set(const int N, const float alpha, float* Y); +template void caffe_gpu_set(const int N, const double alpha, double* Y); + +template +__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] += alpha; + } +} + +template <> +void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { + // NOLINT_NEXT_LINE(whitespace/operators) + add_scalar_kernel<<>>( + N, alpha, Y); +} + +template <> +void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { + // NOLINT_NEXT_LINE(whitespace/operators) + add_scalar_kernel<<>>( + N, alpha, Y); +} + +template +__global__ void add_kernel(const int n, const Dtype* a, + const Dtype* b, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = a[index] + b[index]; + } +} + +template <> +void caffe_gpu_add(const int N, const float* a, const float* b, + float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + add_kernel<<>>( + N, a, b, y); +} + +template <> +void caffe_gpu_add(const int N, const double* a, const double* b, + double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + add_kernel<<>>( + N, a, b, y); +} + +template +__global__ void sub_kernel(const int n, const Dtype* a, + const Dtype* b, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = a[index] - b[index]; + } +} + +template <> +void caffe_gpu_sub(const int N, const float* a, const float* b, + float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + sub_kernel<<>>( + N, a, b, y); +} + +template <> +void caffe_gpu_sub(const int N, const double* a, const double* b, + double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + sub_kernel<<>>( + N, a, b, y); +} + +template +__global__ void mul_kernel(const int n, const Dtype* a, + const Dtype* b, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = a[index] * b[index]; + } +} + +template <> +void caffe_gpu_mul(const int N, const float* a, + const float* b, float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + mul_kernel<<>>( + N, a, b, y); +} + +template <> +void caffe_gpu_mul(const int N, const double* a, + const double* b, double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + mul_kernel<<>>( + N, a, b, y); +} + +template +__global__ void div_kernel(const int n, const Dtype* a, + const Dtype* b, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = a[index] / b[index]; + } +} + +template <> +void caffe_gpu_div(const int N, const float* a, + const float* b, float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + div_kernel<<>>( + N, a, b, y); +} + +template <> +void caffe_gpu_div(const int N, const double* a, + const double* b, double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + div_kernel<<>>( + N, a, b, y); +} + +template +__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = abs(a[index]); + } +} + +template <> +void caffe_gpu_abs(const int N, const float* a, float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + abs_kernel<<>>( + N, a, y); +} + +template <> +void caffe_gpu_abs(const int N, const double* a, double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + abs_kernel<<>>( + N, a, y); +} + + +template +__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = exp(a[index]); + } +} + +template <> +void caffe_gpu_exp(const int N, const float* a, float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + exp_kernel<<>>( + N, a, y); +} + +template <> +void caffe_gpu_exp(const int N, const double* a, double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + exp_kernel<<>>( + N, a, y); +} + +template +__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = log(a[index]); + } +} + +template <> +void caffe_gpu_log(const int N, const float* a, float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + log_kernel<<>>( + N, a, y); +} + +template <> +void caffe_gpu_log(const int N, const double* a, double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + log_kernel<<>>( + N, a, y); +} + +template +__global__ void powx_kernel(const int n, const Dtype* a, + const Dtype alpha, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = pow(a[index], alpha); + } +} + +template <> +void caffe_gpu_powx(const int N, const float* a, + const float alpha, float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + powx_kernel<<>>( + N, a, alpha, y); +} + +template <> +void caffe_gpu_powx(const int N, const double* a, + const double alpha, double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + powx_kernel<<>>( + N, a, alpha, y); +} + +DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) + - (x[index] < Dtype(0))); +DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); + +__global__ void popc_kernel(const int n, const float* a, + const float* b, uint8_t* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = __popc(static_cast(a[index]) ^ + static_cast(b[index])); + } +} + +__global__ void popcll_kernel(const int n, const double* a, + const double* b, uint8_t* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = __popcll(static_cast(a[index]) ^ + static_cast(b[index])); + } +} + +template <> +uint32_t caffe_gpu_hamming_distance(const int n, const float* x, + const float* y) { + // TODO: Fix caffe_gpu_hamming_distance (see failing unit test + // TestHammingDistanceGPU in test_math_functions.cpp). + NOT_IMPLEMENTED; + thrust::device_vector popcounts(n); + // NOLINT_NEXT_LINE(whitespace/operators) + popc_kernel<<>>( + n, x, y, thrust::raw_pointer_cast(popcounts.data())); + return thrust::reduce(popcounts.begin(), popcounts.end(), + (uint32_t) 0, thrust::plus()); +} + +template <> +uint32_t caffe_gpu_hamming_distance(const int n, const double* x, + const double* y) { + // TODO: Fix caffe_gpu_hamming_distance (see failing unit test + // TestHammingDistanceGPU in test_math_functions.cpp). + NOT_IMPLEMENTED; + thrust::device_vector popcounts(n); + // NOLINT_NEXT_LINE(whitespace/operators) + popcll_kernel<<>>( + n, x, y, thrust::raw_pointer_cast(popcounts.data())); + return thrust::reduce(popcounts.begin(), popcounts.end(), + /* NOLINT_NEXT_LINE(build/include_what_you_use) */ + (uint32_t) 0, thrust::plus()); +} + +void caffe_gpu_rng_uniform(const int n, unsigned int* r) { + CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); +} + +template <> +void caffe_gpu_rng_uniform(const int n, const float a, const float b, + float* r) { + CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); + const float range = b - a; + if (range != static_cast(1)) { + caffe_gpu_scal(n, range, r); + } + if (a != static_cast(0)) { + caffe_gpu_add_scalar(n, a, r); + } +} + +template <> +void caffe_gpu_rng_uniform(const int n, const double a, const double b, + double* r) { + CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); + const double range = b - a; + if (range != static_cast(1)) { + caffe_gpu_scal(n, range, r); + } + if (a != static_cast(0)) { + caffe_gpu_add_scalar(n, a, r); + } +} + +template <> +void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, + float* r) { + CURAND_CHECK( + curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); +} + +template <> +void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, + double* r) { + CURAND_CHECK( + curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); +} + +} // namespace caffe diff --git a/src/caffe/util/upgrade_proto.cpp b/src/caffe/util/upgrade_proto.cpp new file mode 100755 index 0000000..92e5cf5 --- /dev/null +++ b/src/caffe/util/upgrade_proto.cpp @@ -0,0 +1,940 @@ +#include +#include +#include + +#include +#include + +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/io.hpp" +#include "caffe/util/upgrade_proto.hpp" + +namespace caffe { + +bool NetNeedsUpgrade(const NetParameter& net_param) { + return NetNeedsV0ToV1Upgrade(net_param) || NetNeedsV1ToV2Upgrade(net_param); +} + +bool NetNeedsV0ToV1Upgrade(const NetParameter& net_param) { + for (int i = 0; i < net_param.layers_size(); ++i) { + if (net_param.layers(i).has_layer()) { + return true; + } + } + return false; +} + +bool NetNeedsV1ToV2Upgrade(const NetParameter& net_param) { + return net_param.layers_size() > 0; +} + +bool UpgradeV0Net(const NetParameter& v0_net_param_padding_layers, + NetParameter* net_param) { + // First upgrade padding layers to padded conv layers. + NetParameter v0_net_param; + UpgradeV0PaddingLayers(v0_net_param_padding_layers, &v0_net_param); + // Now upgrade layer parameters. + bool is_fully_compatible = true; + net_param->Clear(); + if (v0_net_param.has_name()) { + net_param->set_name(v0_net_param.name()); + } + for (int i = 0; i < v0_net_param.layers_size(); ++i) { + is_fully_compatible &= UpgradeV0LayerParameter(v0_net_param.layers(i), + net_param->add_layers()); + } + for (int i = 0; i < v0_net_param.input_size(); ++i) { + net_param->add_input(v0_net_param.input(i)); + } + for (int i = 0; i < v0_net_param.input_dim_size(); ++i) { + net_param->add_input_dim(v0_net_param.input_dim(i)); + } + if (v0_net_param.has_force_backward()) { + net_param->set_force_backward(v0_net_param.force_backward()); + } + return is_fully_compatible; +} + +void UpgradeV0PaddingLayers(const NetParameter& param, + NetParameter* param_upgraded_pad) { + // Copy everything other than the layers from the original param. + param_upgraded_pad->Clear(); + param_upgraded_pad->CopyFrom(param); + param_upgraded_pad->clear_layers(); + // Figure out which layer each bottom blob comes from. + map blob_name_to_last_top_idx; + for (int i = 0; i < param.input_size(); ++i) { + const string& blob_name = param.input(i); + blob_name_to_last_top_idx[blob_name] = -1; + } + for (int i = 0; i < param.layers_size(); ++i) { + const V1LayerParameter& layer_connection = param.layers(i); + const V0LayerParameter& layer_param = layer_connection.layer(); + // Add the layer to the new net, unless it's a padding layer. + if (layer_param.type() != "padding") { + param_upgraded_pad->add_layers()->CopyFrom(layer_connection); + } + for (int j = 0; j < layer_connection.bottom_size(); ++j) { + const string& blob_name = layer_connection.bottom(j); + if (blob_name_to_last_top_idx.find(blob_name) == + blob_name_to_last_top_idx.end()) { + LOG(FATAL) << "Unknown blob input " << blob_name << " to layer " << j; + } + const int top_idx = blob_name_to_last_top_idx[blob_name]; + if (top_idx == -1) { + continue; + } + const V1LayerParameter& source_layer = param.layers(top_idx); + if (source_layer.layer().type() == "padding") { + // This layer has a padding layer as input -- check that it is a conv + // layer or a pooling layer and takes only one input. Also check that + // the padding layer input has only one input and one output. Other + // cases have undefined behavior in Caffe. + CHECK((layer_param.type() == "conv") || (layer_param.type() == "pool")) + << "Padding layer input to " + "non-convolutional / non-pooling layer type " + << layer_param.type(); + CHECK_EQ(layer_connection.bottom_size(), 1) + << "Conv Layer takes a single blob as input."; + CHECK_EQ(source_layer.bottom_size(), 1) + << "Padding Layer takes a single blob as input."; + CHECK_EQ(source_layer.top_size(), 1) + << "Padding Layer produces a single blob as output."; + int layer_index = param_upgraded_pad->layers_size() - 1; + param_upgraded_pad->mutable_layers(layer_index)->mutable_layer() + ->set_pad(source_layer.layer().pad()); + param_upgraded_pad->mutable_layers(layer_index) + ->set_bottom(j, source_layer.bottom(0)); + } + } + for (int j = 0; j < layer_connection.top_size(); ++j) { + const string& blob_name = layer_connection.top(j); + blob_name_to_last_top_idx[blob_name] = i; + } + } +} + +bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, + V1LayerParameter* layer_param) { + bool is_fully_compatible = true; + layer_param->Clear(); + for (int i = 0; i < v0_layer_connection.bottom_size(); ++i) { + layer_param->add_bottom(v0_layer_connection.bottom(i)); + } + for (int i = 0; i < v0_layer_connection.top_size(); ++i) { + layer_param->add_top(v0_layer_connection.top(i)); + } + if (v0_layer_connection.has_layer()) { + const V0LayerParameter& v0_layer_param = v0_layer_connection.layer(); + if (v0_layer_param.has_name()) { + layer_param->set_name(v0_layer_param.name()); + } + const string& type = v0_layer_param.type(); + if (v0_layer_param.has_type()) { + layer_param->set_type(UpgradeV0LayerType(type)); + } + for (int i = 0; i < v0_layer_param.blobs_size(); ++i) { + layer_param->add_blobs()->CopyFrom(v0_layer_param.blobs(i)); + } + for (int i = 0; i < v0_layer_param.blobs_lr_size(); ++i) { + layer_param->add_blobs_lr(v0_layer_param.blobs_lr(i)); + } + for (int i = 0; i < v0_layer_param.weight_decay_size(); ++i) { + layer_param->add_weight_decay(v0_layer_param.weight_decay(i)); + } + if (v0_layer_param.has_num_output()) { + if (type == "conv") { + layer_param->mutable_convolution_param()->set_num_output( + v0_layer_param.num_output()); + } else if (type == "innerproduct") { + layer_param->mutable_inner_product_param()->set_num_output( + v0_layer_param.num_output()); + } else { + LOG(ERROR) << "Unknown parameter num_output for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_biasterm()) { + if (type == "conv") { + layer_param->mutable_convolution_param()->set_bias_term( + v0_layer_param.biasterm()); + } else if (type == "innerproduct") { + layer_param->mutable_inner_product_param()->set_bias_term( + v0_layer_param.biasterm()); + } else { + LOG(ERROR) << "Unknown parameter biasterm for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_weight_filler()) { + if (type == "conv") { + layer_param->mutable_convolution_param()-> + mutable_weight_filler()->CopyFrom(v0_layer_param.weight_filler()); + } else if (type == "innerproduct") { + layer_param->mutable_inner_product_param()-> + mutable_weight_filler()->CopyFrom(v0_layer_param.weight_filler()); + } else { + LOG(ERROR) << "Unknown parameter weight_filler for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_bias_filler()) { + if (type == "conv") { + layer_param->mutable_convolution_param()-> + mutable_bias_filler()->CopyFrom(v0_layer_param.bias_filler()); + } else if (type == "innerproduct") { + layer_param->mutable_inner_product_param()-> + mutable_bias_filler()->CopyFrom(v0_layer_param.bias_filler()); + } else { + LOG(ERROR) << "Unknown parameter bias_filler for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_pad()) { + if (type == "conv") { + layer_param->mutable_convolution_param()->set_pad(v0_layer_param.pad()); + } else if (type == "pool") { + layer_param->mutable_pooling_param()->set_pad(v0_layer_param.pad()); + } else { + LOG(ERROR) << "Unknown parameter pad for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_kernelsize()) { + if (type == "conv") { + layer_param->mutable_convolution_param()->set_kernel_size( + v0_layer_param.kernelsize()); + } else if (type == "pool") { + layer_param->mutable_pooling_param()->set_kernel_size( + v0_layer_param.kernelsize()); + } else { + LOG(ERROR) << "Unknown parameter kernelsize for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_group()) { + if (type == "conv") { + layer_param->mutable_convolution_param()->set_group( + v0_layer_param.group()); + } else { + LOG(ERROR) << "Unknown parameter group for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_stride()) { + if (type == "conv") { + layer_param->mutable_convolution_param()->set_stride( + v0_layer_param.stride()); + } else if (type == "pool") { + layer_param->mutable_pooling_param()->set_stride( + v0_layer_param.stride()); + } else { + LOG(ERROR) << "Unknown parameter stride for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_pool()) { + if (type == "pool") { + V0LayerParameter_PoolMethod pool = v0_layer_param.pool(); + switch (pool) { + case V0LayerParameter_PoolMethod_MAX: + layer_param->mutable_pooling_param()->set_pool( + PoolingParameter_PoolMethod_MAX); + break; + case V0LayerParameter_PoolMethod_AVE: + layer_param->mutable_pooling_param()->set_pool( + PoolingParameter_PoolMethod_AVE); + break; + case V0LayerParameter_PoolMethod_STOCHASTIC: + layer_param->mutable_pooling_param()->set_pool( + PoolingParameter_PoolMethod_STOCHASTIC); + break; + default: + LOG(ERROR) << "Unknown pool method " << pool; + is_fully_compatible = false; + } + } else { + LOG(ERROR) << "Unknown parameter pool for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_dropout_ratio()) { + if (type == "dropout") { + layer_param->mutable_dropout_param()->set_dropout_ratio( + v0_layer_param.dropout_ratio()); + } else { + LOG(ERROR) << "Unknown parameter dropout_ratio for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_local_size()) { + if (type == "lrn") { + layer_param->mutable_lrn_param()->set_local_size( + v0_layer_param.local_size()); + } else { + LOG(ERROR) << "Unknown parameter local_size for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_alpha()) { + if (type == "lrn") { + layer_param->mutable_lrn_param()->set_alpha(v0_layer_param.alpha()); + } else { + LOG(ERROR) << "Unknown parameter alpha for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_beta()) { + if (type == "lrn") { + layer_param->mutable_lrn_param()->set_beta(v0_layer_param.beta()); + } else { + LOG(ERROR) << "Unknown parameter beta for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_k()) { + if (type == "lrn") { + layer_param->mutable_lrn_param()->set_k(v0_layer_param.k()); + } else { + LOG(ERROR) << "Unknown parameter k for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_source()) { + if (type == "data") { + layer_param->mutable_data_param()->set_source(v0_layer_param.source()); + } else if (type == "hdf5_data") { + layer_param->mutable_hdf5_data_param()->set_source( + v0_layer_param.source()); + } else if (type == "images") { + layer_param->mutable_image_data_param()->set_source( + v0_layer_param.source()); + } else if (type == "window_data") { + layer_param->mutable_window_data_param()->set_source( + v0_layer_param.source()); + } else if (type == "infogain_loss") { + layer_param->mutable_infogain_loss_param()->set_source( + v0_layer_param.source()); + } else { + LOG(ERROR) << "Unknown parameter source for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_scale()) { + layer_param->mutable_transform_param()-> + set_scale(v0_layer_param.scale()); + } + if (v0_layer_param.has_meanfile()) { + layer_param->mutable_transform_param()-> + set_mean_file(v0_layer_param.meanfile()); + } + if (v0_layer_param.has_batchsize()) { + if (type == "data") { + layer_param->mutable_data_param()->set_batch_size( + v0_layer_param.batchsize()); + } else if (type == "hdf5_data") { + layer_param->mutable_hdf5_data_param()->set_batch_size( + v0_layer_param.batchsize()); + } else if (type == "images") { + layer_param->mutable_image_data_param()->set_batch_size( + v0_layer_param.batchsize()); + } else if (type == "window_data") { + layer_param->mutable_window_data_param()->set_batch_size( + v0_layer_param.batchsize()); + } else { + LOG(ERROR) << "Unknown parameter batchsize for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_cropsize()) { + layer_param->mutable_transform_param()-> + set_crop_size(v0_layer_param.cropsize()); + } + if (v0_layer_param.has_mirror()) { + layer_param->mutable_transform_param()-> + set_mirror(v0_layer_param.mirror()); + } + if (v0_layer_param.has_rand_skip()) { + if (type == "data") { + layer_param->mutable_data_param()->set_rand_skip( + v0_layer_param.rand_skip()); + } else if (type == "images") { + layer_param->mutable_image_data_param()->set_rand_skip( + v0_layer_param.rand_skip()); + } else { + LOG(ERROR) << "Unknown parameter rand_skip for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_shuffle_images()) { + if (type == "images") { + layer_param->mutable_image_data_param()->set_shuffle( + v0_layer_param.shuffle_images()); + } else { + LOG(ERROR) << "Unknown parameter shuffle for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_new_height()) { + if (type == "images") { + layer_param->mutable_image_data_param()->set_new_height( + v0_layer_param.new_height()); + } else { + LOG(ERROR) << "Unknown parameter new_height for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_new_width()) { + if (type == "images") { + layer_param->mutable_image_data_param()->set_new_width( + v0_layer_param.new_width()); + } else { + LOG(ERROR) << "Unknown parameter new_width for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_concat_dim()) { + if (type == "concat") { + layer_param->mutable_concat_param()->set_concat_dim( + v0_layer_param.concat_dim()); + } else { + LOG(ERROR) << "Unknown parameter concat_dim for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_det_fg_threshold()) { + if (type == "window_data") { + layer_param->mutable_window_data_param()->set_fg_threshold( + v0_layer_param.det_fg_threshold()); + } else { + LOG(ERROR) << "Unknown parameter det_fg_threshold for layer type " + << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_det_bg_threshold()) { + if (type == "window_data") { + layer_param->mutable_window_data_param()->set_bg_threshold( + v0_layer_param.det_bg_threshold()); + } else { + LOG(ERROR) << "Unknown parameter det_bg_threshold for layer type " + << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_det_fg_fraction()) { + if (type == "window_data") { + layer_param->mutable_window_data_param()->set_fg_fraction( + v0_layer_param.det_fg_fraction()); + } else { + LOG(ERROR) << "Unknown parameter det_fg_fraction for layer type " + << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_det_context_pad()) { + if (type == "window_data") { + layer_param->mutable_window_data_param()->set_context_pad( + v0_layer_param.det_context_pad()); + } else { + LOG(ERROR) << "Unknown parameter det_context_pad for layer type " + << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_det_crop_mode()) { + if (type == "window_data") { + layer_param->mutable_window_data_param()->set_crop_mode( + v0_layer_param.det_crop_mode()); + } else { + LOG(ERROR) << "Unknown parameter det_crop_mode for layer type " + << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_hdf5_output_param()) { + if (type == "hdf5_output") { + layer_param->mutable_hdf5_output_param()->CopyFrom( + v0_layer_param.hdf5_output_param()); + } else { + LOG(ERROR) << "Unknown parameter hdf5_output_param for layer type " + << type; + is_fully_compatible = false; + } + } + } + return is_fully_compatible; +} + +V1LayerParameter_LayerType UpgradeV0LayerType(const string& type) { + if (type == "accuracy") { + return V1LayerParameter_LayerType_ACCURACY; + } else if (type == "bnll") { + return V1LayerParameter_LayerType_BNLL; + } else if (type == "concat") { + return V1LayerParameter_LayerType_CONCAT; + } else if (type == "conv") { + return V1LayerParameter_LayerType_CONVOLUTION; + } else if (type == "data") { + return V1LayerParameter_LayerType_DATA; + } else if (type == "dropout") { + return V1LayerParameter_LayerType_DROPOUT; + } else if (type == "euclidean_loss") { + return V1LayerParameter_LayerType_EUCLIDEAN_LOSS; + } else if (type == "flatten") { + return V1LayerParameter_LayerType_FLATTEN; + } else if (type == "hdf5_data") { + return V1LayerParameter_LayerType_HDF5_DATA; + } else if (type == "hdf5_output") { + return V1LayerParameter_LayerType_HDF5_OUTPUT; + } else if (type == "im2col") { + return V1LayerParameter_LayerType_IM2COL; + } else if (type == "images") { + return V1LayerParameter_LayerType_IMAGE_DATA; + } else if (type == "infogain_loss") { + return V1LayerParameter_LayerType_INFOGAIN_LOSS; + } else if (type == "innerproduct") { + return V1LayerParameter_LayerType_INNER_PRODUCT; + } else if (type == "lrn") { + return V1LayerParameter_LayerType_LRN; + } else if (type == "multinomial_logistic_loss") { + return V1LayerParameter_LayerType_MULTINOMIAL_LOGISTIC_LOSS; + } else if (type == "pool") { + return V1LayerParameter_LayerType_POOLING; + } else if (type == "relu") { + return V1LayerParameter_LayerType_RELU; + } else if (type == "sigmoid") { + return V1LayerParameter_LayerType_SIGMOID; + } else if (type == "softmax") { + return V1LayerParameter_LayerType_SOFTMAX; + } else if (type == "softmax_loss") { + return V1LayerParameter_LayerType_SOFTMAX_LOSS; + } else if (type == "split") { + return V1LayerParameter_LayerType_SPLIT; + } else if (type == "tanh") { + return V1LayerParameter_LayerType_TANH; + } else if (type == "window_data") { + return V1LayerParameter_LayerType_WINDOW_DATA; + } else { + LOG(FATAL) << "Unknown layer name: " << type; + return V1LayerParameter_LayerType_NONE; + } +} + +bool NetNeedsDataUpgrade(const NetParameter& net_param) { + for (int i = 0; i < net_param.layers_size(); ++i) { + if (net_param.layers(i).type() == V1LayerParameter_LayerType_DATA) { + DataParameter layer_param = net_param.layers(i).data_param(); + if (layer_param.has_scale()) { return true; } + if (layer_param.has_mean_file()) { return true; } + if (layer_param.has_crop_size()) { return true; } + if (layer_param.has_mirror()) { return true; } + } + if (net_param.layers(i).type() == V1LayerParameter_LayerType_IMAGE_DATA) { + ImageDataParameter layer_param = net_param.layers(i).image_data_param(); + if (layer_param.has_scale()) { return true; } + if (layer_param.has_mean_file()) { return true; } + if (layer_param.has_crop_size()) { return true; } + if (layer_param.has_mirror()) { return true; } + } + if (net_param.layers(i).type() == V1LayerParameter_LayerType_WINDOW_DATA) { + WindowDataParameter layer_param = net_param.layers(i).window_data_param(); + if (layer_param.has_scale()) { return true; } + if (layer_param.has_mean_file()) { return true; } + if (layer_param.has_crop_size()) { return true; } + if (layer_param.has_mirror()) { return true; } + } + } + return false; +} + +#define CONVERT_LAYER_TRANSFORM_PARAM(TYPE, Name, param_name) \ + do { \ + if (net_param->layers(i).type() == V1LayerParameter_LayerType_##TYPE) { \ + Name##Parameter* layer_param = \ + net_param->mutable_layers(i)->mutable_##param_name##_param(); \ + TransformationParameter* transform_param = \ + net_param->mutable_layers(i)->mutable_transform_param(); \ + if (layer_param->has_scale()) { \ + transform_param->set_scale(layer_param->scale()); \ + layer_param->clear_scale(); \ + } \ + if (layer_param->has_mean_file()) { \ + transform_param->set_mean_file(layer_param->mean_file()); \ + layer_param->clear_mean_file(); \ + } \ + if (layer_param->has_crop_size()) { \ + transform_param->set_crop_size(layer_param->crop_size()); \ + layer_param->clear_crop_size(); \ + } \ + if (layer_param->has_mirror()) { \ + transform_param->set_mirror(layer_param->mirror()); \ + layer_param->clear_mirror(); \ + } \ + } \ + } while (0) + +void UpgradeNetDataTransformation(NetParameter* net_param) { + for (int i = 0; i < net_param->layers_size(); ++i) { + CONVERT_LAYER_TRANSFORM_PARAM(DATA, Data, data); + CONVERT_LAYER_TRANSFORM_PARAM(IMAGE_DATA, ImageData, image_data); + CONVERT_LAYER_TRANSFORM_PARAM(WINDOW_DATA, WindowData, window_data); + } +} + +bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) { + bool success = true; + if (NetNeedsV0ToV1Upgrade(*param)) { + // NetParameter was specified using the old style (V0LayerParameter); try to + // upgrade it. + LOG(INFO) << "Attempting to upgrade input file specified using deprecated " + << "V0LayerParameter: " << param_file; + NetParameter original_param(*param); + if (!UpgradeV0Net(original_param, param)) { + success = false; + LOG(ERROR) << "Warning: had one or more problems upgrading " + << "V0NetParameter to NetParameter (see above); continuing anyway."; + } else { + LOG(INFO) << "Successfully upgraded file specified using deprecated " + << "V0LayerParameter"; + } + LOG(WARNING) << "Note that future Caffe releases will not support " + << "V0NetParameter; use ./build/tools/upgrade_net_proto_text for " + << "prototxt and ./build/tools/upgrade_net_proto_binary for model " + << "weights upgrade this and any other net protos to the new format."; + } + // NetParameter uses old style data transformation fields; try to upgrade it. + if (NetNeedsDataUpgrade(*param)) { + LOG(INFO) << "Attempting to upgrade input file specified using deprecated " + << "transformation parameters: " << param_file; + UpgradeNetDataTransformation(param); + LOG(INFO) << "Successfully upgraded file specified using deprecated " + << "data transformation parameters."; + LOG(WARNING) << "Note that future Caffe releases will only support " + << "transform_param messages for transformation fields."; + } + if (NetNeedsV1ToV2Upgrade(*param)) { + LOG(INFO) << "Attempting to upgrade input file specified using deprecated " + << "V1LayerParameter: " << param_file; + NetParameter original_param(*param); + if (!UpgradeV1Net(original_param, param)) { + success = false; + LOG(ERROR) << "Warning: had one or more problems upgrading " + << "V1LayerParameter (see above); continuing anyway."; + } else { + LOG(INFO) << "Successfully upgraded file specified using deprecated " + << "V1LayerParameter"; + } + } + return success; +} + +bool UpgradeV1Net(const NetParameter& v1_net_param, NetParameter* net_param) { + bool is_fully_compatible = true; + if (v1_net_param.layer_size() > 0) { + LOG(ERROR) << "Input NetParameter to be upgraded already specifies 'layer' " + << "fields; these will be ignored for the upgrade."; + is_fully_compatible = false; + } + net_param->CopyFrom(v1_net_param); + net_param->clear_layers(); + net_param->clear_layer(); + for (int i = 0; i < v1_net_param.layers_size(); ++i) { + if (!UpgradeV1LayerParameter(v1_net_param.layers(i), + net_param->add_layer())) { + LOG(ERROR) << "Upgrade of input layer " << i << " failed."; + is_fully_compatible = false; + } + } + return is_fully_compatible; +} + +bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, + LayerParameter* layer_param) { + layer_param->Clear(); + bool is_fully_compatible = true; + for (int i = 0; i < v1_layer_param.bottom_size(); ++i) { + layer_param->add_bottom(v1_layer_param.bottom(i)); + } + for (int i = 0; i < v1_layer_param.top_size(); ++i) { + layer_param->add_top(v1_layer_param.top(i)); + } + if (v1_layer_param.has_name()) { + layer_param->set_name(v1_layer_param.name()); + } + for (int i = 0; i < v1_layer_param.include_size(); ++i) { + layer_param->add_include()->CopyFrom(v1_layer_param.include(i)); + } + for (int i = 0; i < v1_layer_param.exclude_size(); ++i) { + layer_param->add_exclude()->CopyFrom(v1_layer_param.exclude(i)); + } + if (v1_layer_param.has_type()) { + layer_param->set_type(UpgradeV1LayerType(v1_layer_param.type())); + } + for (int i = 0; i < v1_layer_param.blobs_size(); ++i) { + layer_param->add_blobs()->CopyFrom(v1_layer_param.blobs(i)); + } + for (int i = 0; i < v1_layer_param.param_size(); ++i) { + while (layer_param->param_size() <= i) { layer_param->add_param(); } + layer_param->mutable_param(i)->set_name(v1_layer_param.param(i)); + } + ParamSpec_DimCheckMode mode; + for (int i = 0; i < v1_layer_param.blob_share_mode_size(); ++i) { + while (layer_param->param_size() <= i) { layer_param->add_param(); } + switch (v1_layer_param.blob_share_mode(i)) { + case V1LayerParameter_DimCheckMode_STRICT: + mode = ParamSpec_DimCheckMode_STRICT; + break; + case V1LayerParameter_DimCheckMode_PERMISSIVE: + mode = ParamSpec_DimCheckMode_PERMISSIVE; + break; + default: + LOG(FATAL) << "Unknown blob_share_mode: " + << v1_layer_param.blob_share_mode(i); + break; + } + layer_param->mutable_param(i)->set_share_mode(mode); + } + for (int i = 0; i < v1_layer_param.blobs_lr_size(); ++i) { + while (layer_param->param_size() <= i) { layer_param->add_param(); } + layer_param->mutable_param(i)->set_lr_mult(v1_layer_param.blobs_lr(i)); + } + for (int i = 0; i < v1_layer_param.weight_decay_size(); ++i) { + while (layer_param->param_size() <= i) { layer_param->add_param(); } + layer_param->mutable_param(i)->set_decay_mult( + v1_layer_param.weight_decay(i)); + } + for (int i = 0; i < v1_layer_param.loss_weight_size(); ++i) { + layer_param->add_loss_weight(v1_layer_param.loss_weight(i)); + } + if (v1_layer_param.has_accuracy_param()) { + layer_param->mutable_accuracy_param()->CopyFrom( + v1_layer_param.accuracy_param()); + } + if (v1_layer_param.has_argmax_param()) { + layer_param->mutable_argmax_param()->CopyFrom( + v1_layer_param.argmax_param()); + } + if (v1_layer_param.has_concat_param()) { + layer_param->mutable_concat_param()->CopyFrom( + v1_layer_param.concat_param()); + } + if (v1_layer_param.has_contrastive_loss_param()) { + layer_param->mutable_contrastive_loss_param()->CopyFrom( + v1_layer_param.contrastive_loss_param()); + } + if (v1_layer_param.has_convolution_param()) { + layer_param->mutable_convolution_param()->CopyFrom( + v1_layer_param.convolution_param()); + } + if (v1_layer_param.has_data_param()) { + layer_param->mutable_data_param()->CopyFrom( + v1_layer_param.data_param()); + } + if (v1_layer_param.has_dropout_param()) { + layer_param->mutable_dropout_param()->CopyFrom( + v1_layer_param.dropout_param()); + } + if (v1_layer_param.has_dummy_data_param()) { + layer_param->mutable_dummy_data_param()->CopyFrom( + v1_layer_param.dummy_data_param()); + } + if (v1_layer_param.has_eltwise_param()) { + layer_param->mutable_eltwise_param()->CopyFrom( + v1_layer_param.eltwise_param()); + } + if (v1_layer_param.has_exp_param()) { + layer_param->mutable_exp_param()->CopyFrom( + v1_layer_param.exp_param()); + } + if (v1_layer_param.has_hdf5_data_param()) { + layer_param->mutable_hdf5_data_param()->CopyFrom( + v1_layer_param.hdf5_data_param()); + } + if (v1_layer_param.has_hdf5_output_param()) { + layer_param->mutable_hdf5_output_param()->CopyFrom( + v1_layer_param.hdf5_output_param()); + } + if (v1_layer_param.has_hinge_loss_param()) { + layer_param->mutable_hinge_loss_param()->CopyFrom( + v1_layer_param.hinge_loss_param()); + } + if (v1_layer_param.has_image_data_param()) { + layer_param->mutable_image_data_param()->CopyFrom( + v1_layer_param.image_data_param()); + } + if (v1_layer_param.has_infogain_loss_param()) { + layer_param->mutable_infogain_loss_param()->CopyFrom( + v1_layer_param.infogain_loss_param()); + } + if (v1_layer_param.has_inner_product_param()) { + layer_param->mutable_inner_product_param()->CopyFrom( + v1_layer_param.inner_product_param()); + } + if (v1_layer_param.has_lrn_param()) { + layer_param->mutable_lrn_param()->CopyFrom( + v1_layer_param.lrn_param()); + } + if (v1_layer_param.has_memory_data_param()) { + layer_param->mutable_memory_data_param()->CopyFrom( + v1_layer_param.memory_data_param()); + } + if (v1_layer_param.has_mvn_param()) { + layer_param->mutable_mvn_param()->CopyFrom( + v1_layer_param.mvn_param()); + } + if (v1_layer_param.has_pooling_param()) { + layer_param->mutable_pooling_param()->CopyFrom( + v1_layer_param.pooling_param()); + } + if (v1_layer_param.has_power_param()) { + layer_param->mutable_power_param()->CopyFrom( + v1_layer_param.power_param()); + } + if (v1_layer_param.has_relu_param()) { + layer_param->mutable_relu_param()->CopyFrom( + v1_layer_param.relu_param()); + } + if (v1_layer_param.has_sigmoid_param()) { + layer_param->mutable_sigmoid_param()->CopyFrom( + v1_layer_param.sigmoid_param()); + } + if (v1_layer_param.has_softmax_param()) { + layer_param->mutable_softmax_param()->CopyFrom( + v1_layer_param.softmax_param()); + } + if (v1_layer_param.has_slice_param()) { + layer_param->mutable_slice_param()->CopyFrom( + v1_layer_param.slice_param()); + } + if (v1_layer_param.has_tanh_param()) { + layer_param->mutable_tanh_param()->CopyFrom( + v1_layer_param.tanh_param()); + } + if (v1_layer_param.has_threshold_param()) { + layer_param->mutable_threshold_param()->CopyFrom( + v1_layer_param.threshold_param()); + } + if (v1_layer_param.has_window_data_param()) { + layer_param->mutable_window_data_param()->CopyFrom( + v1_layer_param.window_data_param()); + } + if (v1_layer_param.has_transform_param()) { + layer_param->mutable_transform_param()->CopyFrom( + v1_layer_param.transform_param()); + } + if (v1_layer_param.has_loss_param()) { + layer_param->mutable_loss_param()->CopyFrom( + v1_layer_param.loss_param()); + } + if (v1_layer_param.has_layer()) { + LOG(ERROR) << "Input NetParameter has V0 layer -- ignoring."; + is_fully_compatible = false; + } + return is_fully_compatible; +} + +const char* UpgradeV1LayerType(const V1LayerParameter_LayerType type) { + switch (type) { + case V1LayerParameter_LayerType_NONE: + return ""; + case V1LayerParameter_LayerType_ABSVAL: + return "AbsVal"; + case V1LayerParameter_LayerType_ACCURACY: + return "Accuracy"; + case V1LayerParameter_LayerType_ARGMAX: + return "ArgMax"; + case V1LayerParameter_LayerType_BNLL: + return "BNLL"; + case V1LayerParameter_LayerType_CONCAT: + return "Concat"; + case V1LayerParameter_LayerType_CONTRASTIVE_LOSS: + return "ContrastiveLoss"; + case V1LayerParameter_LayerType_CONVOLUTION: + return "Convolution"; + case V1LayerParameter_LayerType_DECONVOLUTION: + return "Deconvolution"; + case V1LayerParameter_LayerType_DATA: + return "Data"; + case V1LayerParameter_LayerType_DROPOUT: + return "Dropout"; + case V1LayerParameter_LayerType_DUMMY_DATA: + return "DummyData"; + case V1LayerParameter_LayerType_EUCLIDEAN_LOSS: + return "EuclideanLoss"; + case V1LayerParameter_LayerType_ELTWISE: + return "Eltwise"; + case V1LayerParameter_LayerType_EXP: + return "Exp"; + case V1LayerParameter_LayerType_FLATTEN: + return "Flatten"; + case V1LayerParameter_LayerType_HDF5_DATA: + return "HDF5Data"; + case V1LayerParameter_LayerType_HDF5_OUTPUT: + return "HDF5Output"; + case V1LayerParameter_LayerType_HINGE_LOSS: + return "HingeLoss"; + case V1LayerParameter_LayerType_IM2COL: + return "Im2col"; + case V1LayerParameter_LayerType_IMAGE_DATA: + return "ImageData"; + case V1LayerParameter_LayerType_INFOGAIN_LOSS: + return "InfogainLoss"; + case V1LayerParameter_LayerType_INNER_PRODUCT: + return "InnerProduct"; + case V1LayerParameter_LayerType_LRN: + return "LRN"; + case V1LayerParameter_LayerType_MEMORY_DATA: + return "MemoryData"; + case V1LayerParameter_LayerType_MULTINOMIAL_LOGISTIC_LOSS: + return "MultinomialLogisticLoss"; + case V1LayerParameter_LayerType_MVN: + return "MVN"; + case V1LayerParameter_LayerType_POOLING: + return "Pooling"; + case V1LayerParameter_LayerType_POWER: + return "Power"; + case V1LayerParameter_LayerType_RELU: + return "ReLU"; + case V1LayerParameter_LayerType_SIGMOID: + return "Sigmoid"; + case V1LayerParameter_LayerType_SIGMOID_CROSS_ENTROPY_LOSS: + return "SigmoidCrossEntropyLoss"; + case V1LayerParameter_LayerType_SILENCE: + return "Silence"; + case V1LayerParameter_LayerType_SOFTMAX: + return "Softmax"; + case V1LayerParameter_LayerType_SOFTMAX_LOSS: + return "SoftmaxWithLoss"; + case V1LayerParameter_LayerType_SPLIT: + return "Split"; + case V1LayerParameter_LayerType_SLICE: + return "Slice"; + case V1LayerParameter_LayerType_TANH: + return "TanH"; + case V1LayerParameter_LayerType_WINDOW_DATA: + return "WindowData"; + case V1LayerParameter_LayerType_THRESHOLD: + return "Threshold"; + default: + LOG(FATAL) << "Unknown V1LayerParameter layer type: " << type; + return ""; + } +} + +void ReadNetParamsFromTextFileOrDie(const string& param_file, + NetParameter* param) { + CHECK(ReadProtoFromTextFile(param_file, param)) + << "Failed to parse NetParameter file: " << param_file; + UpgradeNetAsNeeded(param_file, param); +} + +void ReadNetParamsFromBinaryFileOrDie(const string& param_file, + NetParameter* param) { + CHECK(ReadProtoFromBinaryFile(param_file, param)) + << "Failed to parse NetParameter file: " << param_file; + UpgradeNetAsNeeded(param_file, param); +} + +} // namespace caffe diff --git a/src/gtest/CMakeLists.txt b/src/gtest/CMakeLists.txt new file mode 100755 index 0000000..ef7ff7e --- /dev/null +++ b/src/gtest/CMakeLists.txt @@ -0,0 +1,5 @@ +add_library(gtest STATIC EXCLUDE_FROM_ALL gtest.h gtest-all.cpp) +caffe_default_properties(gtest) + +#add_library(gtest_main gtest_main.cc) +#target_link_libraries(gtest_main gtest) diff --git a/src/gtest/gtest-all.cpp b/src/gtest/gtest-all.cpp new file mode 100755 index 0000000..9261974 --- /dev/null +++ b/src/gtest/gtest-all.cpp @@ -0,0 +1,9117 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// +// Google C++ Testing Framework (Google Test) +// +// Sometimes it's desirable to build Google Test by compiling a single file. +// This file serves this purpose. + +// This line ensures that gtest.h can be compiled on its own, even +// when it's fused. +#include "gtest/gtest.h" + +// The following lines pull in the real gtest *.cc files. +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) + +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Utilities for testing Google Test itself and code that uses Google Test +// (e.g. frameworks built on top of Google Test). + +#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_ +#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_ + + +namespace testing { + +// This helper class can be used to mock out Google Test failure reporting +// so that we can test Google Test or code that builds on Google Test. +// +// An object of this class appends a TestPartResult object to the +// TestPartResultArray object given in the constructor whenever a Google Test +// failure is reported. It can either intercept only failures that are +// generated in the same thread that created this object or it can intercept +// all generated failures. The scope of this mock object can be controlled with +// the second argument to the two arguments constructor. +class GTEST_API_ ScopedFakeTestPartResultReporter + : public TestPartResultReporterInterface { + public: + // The two possible mocking modes of this object. + enum InterceptMode { + INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures. + INTERCEPT_ALL_THREADS // Intercepts all failures. + }; + + // The c'tor sets this object as the test part result reporter used + // by Google Test. The 'result' parameter specifies where to report the + // results. This reporter will only catch failures generated in the current + // thread. DEPRECATED + explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result); + + // Same as above, but you can choose the interception scope of this object. + ScopedFakeTestPartResultReporter(InterceptMode intercept_mode, + TestPartResultArray* result); + + // The d'tor restores the previous test part result reporter. + virtual ~ScopedFakeTestPartResultReporter(); + + // Appends the TestPartResult object to the TestPartResultArray + // received in the constructor. + // + // This method is from the TestPartResultReporterInterface + // interface. + virtual void ReportTestPartResult(const TestPartResult& result); + private: + void Init(); + + const InterceptMode intercept_mode_; + TestPartResultReporterInterface* old_reporter_; + TestPartResultArray* const result_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter); +}; + +namespace internal { + +// A helper class for implementing EXPECT_FATAL_FAILURE() and +// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given +// TestPartResultArray contains exactly one failure that has the given +// type and contains the given substring. If that's not the case, a +// non-fatal failure will be generated. +class GTEST_API_ SingleFailureChecker { + public: + // The constructor remembers the arguments. + SingleFailureChecker(const TestPartResultArray* results, + TestPartResult::Type type, + const string& substr); + ~SingleFailureChecker(); + private: + const TestPartResultArray* const results_; + const TestPartResult::Type type_; + const string substr_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker); +}; + +} // namespace internal + +} // namespace testing + +// A set of macros for testing Google Test assertions or code that's expected +// to generate Google Test fatal failures. It verifies that the given +// statement will cause exactly one fatal Google Test failure with 'substr' +// being part of the failure message. +// +// There are two different versions of this macro. EXPECT_FATAL_FAILURE only +// affects and considers failures generated in the current thread and +// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads. +// +// The verification of the assertion is done correctly even when the statement +// throws an exception or aborts the current function. +// +// Known restrictions: +// - 'statement' cannot reference local non-static variables or +// non-static members of the current object. +// - 'statement' cannot return a value. +// - You cannot stream a failure message to this macro. +// +// Note that even though the implementations of the following two +// macros are much alike, we cannot refactor them to use a common +// helper macro, due to some peculiarity in how the preprocessor +// works. The AcceptsMacroThatExpandsToUnprotectedComma test in +// gtest_unittest.cc will fail to compile if we do that. +#define EXPECT_FATAL_FAILURE(statement, substr) \ + do { \ + class GTestExpectFatalFailureHelper {\ + public:\ + static void Execute() { statement; }\ + };\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\ + GTestExpectFatalFailureHelper::Execute();\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ + do { \ + class GTestExpectFatalFailureHelper {\ + public:\ + static void Execute() { statement; }\ + };\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ALL_THREADS, >est_failures);\ + GTestExpectFatalFailureHelper::Execute();\ + }\ + } while (::testing::internal::AlwaysFalse()) + +// A macro for testing Google Test assertions or code that's expected to +// generate Google Test non-fatal failures. It asserts that the given +// statement will cause exactly one non-fatal Google Test failure with 'substr' +// being part of the failure message. +// +// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only +// affects and considers failures generated in the current thread and +// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads. +// +// 'statement' is allowed to reference local variables and members of +// the current object. +// +// The verification of the assertion is done correctly even when the statement +// throws an exception or aborts the current function. +// +// Known restrictions: +// - You cannot stream a failure message to this macro. +// +// Note that even though the implementations of the following two +// macros are much alike, we cannot refactor them to use a common +// helper macro, due to some peculiarity in how the preprocessor +// works. If we do that, the code won't compile when the user gives +// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that +// expands to code containing an unprotected comma. The +// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc +// catches that. +// +// For the same reason, we have to write +// if (::testing::internal::AlwaysTrue()) { statement; } +// instead of +// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) +// to avoid an MSVC warning on unreachable code. +#define EXPECT_NONFATAL_FAILURE(statement, substr) \ + do {\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kNonFatalFailure, \ + (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\ + if (::testing::internal::AlwaysTrue()) { statement; }\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ + do {\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kNonFatalFailure, \ + (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS,\ + >est_failures);\ + if (::testing::internal::AlwaysTrue()) { statement; }\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include // NOLINT +#include +#include + +#if GTEST_OS_LINUX + +// TODO(kenton@google.com): Use autoconf to detect availability of +// gettimeofday(). +# define GTEST_HAS_GETTIMEOFDAY_ 1 + +# include // NOLINT +# include // NOLINT +# include // NOLINT +// Declares vsnprintf(). This header is not available on Windows. +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include + +#elif GTEST_OS_SYMBIAN +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT + +#elif GTEST_OS_ZOS +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT + +// On z/OS we additionally need strings.h for strcasecmp. +# include // NOLINT + +#elif GTEST_OS_WINDOWS_MOBILE // We are on Windows CE. + +# include // NOLINT + +#elif GTEST_OS_WINDOWS // We are on Windows proper. + +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include // NOLINT + +# if GTEST_OS_WINDOWS_MINGW +// MinGW has gettimeofday() but not _ftime64(). +// TODO(kenton@google.com): Use autoconf to detect availability of +// gettimeofday(). +// TODO(kenton@google.com): There are other ways to get the time on +// Windows, like GetTickCount() or GetSystemTimeAsFileTime(). MinGW +// supports these. consider using them instead. +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT +# endif // GTEST_OS_WINDOWS_MINGW + +// cpplint thinks that the header is already included, so we want to +// silence it. +# include // NOLINT + +#else + +// Assume other platforms have gettimeofday(). +// TODO(kenton@google.com): Use autoconf to detect availability of +// gettimeofday(). +# define GTEST_HAS_GETTIMEOFDAY_ 1 + +// cpplint thinks that the header is already included, so we want to +// silence it. +# include // NOLINT +# include // NOLINT + +#endif // GTEST_OS_LINUX + +#if GTEST_HAS_EXCEPTIONS +# include +#endif + +#if GTEST_CAN_STREAM_RESULTS_ +# include // NOLINT +# include // NOLINT +#endif + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Utility functions and classes used by the Google C++ testing framework. +// +// Author: wan@google.com (Zhanyong Wan) +// +// This file contains purely Google Test's internal implementation. Please +// DO NOT #INCLUDE IT IN A USER PROGRAM. + +#ifndef GTEST_SRC_GTEST_INTERNAL_INL_H_ +#define GTEST_SRC_GTEST_INTERNAL_INL_H_ + +// GTEST_IMPLEMENTATION_ is defined to 1 iff the current translation unit is +// part of Google Test's implementation; otherwise it's undefined. +#if !GTEST_IMPLEMENTATION_ +// A user is trying to include this from his code - just say no. +# error "gtest-internal-inl.h is part of Google Test's internal implementation." +# error "It must not be included except by Google Test itself." +#endif // GTEST_IMPLEMENTATION_ + +#ifndef _WIN32_WCE +# include +#endif // !_WIN32_WCE +#include +#include // For strtoll/_strtoul64/malloc/free. +#include // For memmove. + +#include +#include +#include + + +#if GTEST_OS_WINDOWS +# include // NOLINT +#endif // GTEST_OS_WINDOWS + + +namespace testing { + +// Declares the flags. +// +// We don't want the users to modify this flag in the code, but want +// Google Test's own unit tests to be able to access it. Therefore we +// declare it here as opposed to in gtest.h. +GTEST_DECLARE_bool_(death_test_use_fork); + +namespace internal { + +// The value of GetTestTypeId() as seen from within the Google Test +// library. This is solely for testing GetTestTypeId(). +GTEST_API_ extern const TypeId kTestTypeIdInGoogleTest; + +// Names of the flags (needed for parsing Google Test flags). +const char kAlsoRunDisabledTestsFlag[] = "also_run_disabled_tests"; +const char kBreakOnFailureFlag[] = "break_on_failure"; +const char kCatchExceptionsFlag[] = "catch_exceptions"; +const char kColorFlag[] = "color"; +const char kFilterFlag[] = "filter"; +const char kListTestsFlag[] = "list_tests"; +const char kOutputFlag[] = "output"; +const char kPrintTimeFlag[] = "print_time"; +const char kRandomSeedFlag[] = "random_seed"; +const char kRepeatFlag[] = "repeat"; +const char kShuffleFlag[] = "shuffle"; +const char kStackTraceDepthFlag[] = "stack_trace_depth"; +const char kStreamResultToFlag[] = "stream_result_to"; +const char kThrowOnFailureFlag[] = "throw_on_failure"; + +// A valid random seed must be in [1, kMaxRandomSeed]. +const int kMaxRandomSeed = 99999; + +// g_help_flag is true iff the --help flag or an equivalent form is +// specified on the command line. +GTEST_API_ extern bool g_help_flag; + +// Returns the current time in milliseconds. +GTEST_API_ TimeInMillis GetTimeInMillis(); + +// Returns true iff Google Test should use colors in the output. +GTEST_API_ bool ShouldUseColor(bool stdout_is_tty); + +// Formats the given time in milliseconds as seconds. +GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms); + +// Parses a string for an Int32 flag, in the form of "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +GTEST_API_ bool ParseInt32Flag( + const char* str, const char* flag, Int32* value); + +// Returns a random seed in range [1, kMaxRandomSeed] based on the +// given --gtest_random_seed flag value. +inline int GetRandomSeedFromFlag(Int32 random_seed_flag) { + const unsigned int raw_seed = (random_seed_flag == 0) ? + static_cast(GetTimeInMillis()) : + static_cast(random_seed_flag); + + // Normalizes the actual seed to range [1, kMaxRandomSeed] such that + // it's easy to type. + const int normalized_seed = + static_cast((raw_seed - 1U) % + static_cast(kMaxRandomSeed)) + 1; + return normalized_seed; +} + +// Returns the first valid random seed after 'seed'. The behavior is +// undefined if 'seed' is invalid. The seed after kMaxRandomSeed is +// considered to be 1. +inline int GetNextRandomSeed(int seed) { + GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed) + << "Invalid random seed " << seed << " - must be in [1, " + << kMaxRandomSeed << "]."; + const int next_seed = seed + 1; + return (next_seed > kMaxRandomSeed) ? 1 : next_seed; +} + +// This class saves the values of all Google Test flags in its c'tor, and +// restores them in its d'tor. +class GTestFlagSaver { + public: + // The c'tor. + GTestFlagSaver() { + also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests); + break_on_failure_ = GTEST_FLAG(break_on_failure); + catch_exceptions_ = GTEST_FLAG(catch_exceptions); + color_ = GTEST_FLAG(color); + death_test_style_ = GTEST_FLAG(death_test_style); + death_test_use_fork_ = GTEST_FLAG(death_test_use_fork); + filter_ = GTEST_FLAG(filter); + internal_run_death_test_ = GTEST_FLAG(internal_run_death_test); + list_tests_ = GTEST_FLAG(list_tests); + output_ = GTEST_FLAG(output); + print_time_ = GTEST_FLAG(print_time); + random_seed_ = GTEST_FLAG(random_seed); + repeat_ = GTEST_FLAG(repeat); + shuffle_ = GTEST_FLAG(shuffle); + stack_trace_depth_ = GTEST_FLAG(stack_trace_depth); + stream_result_to_ = GTEST_FLAG(stream_result_to); + throw_on_failure_ = GTEST_FLAG(throw_on_failure); + } + + // The d'tor is not virtual. DO NOT INHERIT FROM THIS CLASS. + ~GTestFlagSaver() { + GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_; + GTEST_FLAG(break_on_failure) = break_on_failure_; + GTEST_FLAG(catch_exceptions) = catch_exceptions_; + GTEST_FLAG(color) = color_; + GTEST_FLAG(death_test_style) = death_test_style_; + GTEST_FLAG(death_test_use_fork) = death_test_use_fork_; + GTEST_FLAG(filter) = filter_; + GTEST_FLAG(internal_run_death_test) = internal_run_death_test_; + GTEST_FLAG(list_tests) = list_tests_; + GTEST_FLAG(output) = output_; + GTEST_FLAG(print_time) = print_time_; + GTEST_FLAG(random_seed) = random_seed_; + GTEST_FLAG(repeat) = repeat_; + GTEST_FLAG(shuffle) = shuffle_; + GTEST_FLAG(stack_trace_depth) = stack_trace_depth_; + GTEST_FLAG(stream_result_to) = stream_result_to_; + GTEST_FLAG(throw_on_failure) = throw_on_failure_; + } + private: + // Fields for saving the original values of flags. + bool also_run_disabled_tests_; + bool break_on_failure_; + bool catch_exceptions_; + String color_; + String death_test_style_; + bool death_test_use_fork_; + String filter_; + String internal_run_death_test_; + bool list_tests_; + String output_; + bool print_time_; + internal::Int32 random_seed_; + internal::Int32 repeat_; + bool shuffle_; + internal::Int32 stack_trace_depth_; + String stream_result_to_; + bool throw_on_failure_; +} GTEST_ATTRIBUTE_UNUSED_; + +// Converts a Unicode code point to a narrow string in UTF-8 encoding. +// code_point parameter is of type UInt32 because wchar_t may not be +// wide enough to contain a code point. +// The output buffer str must containt at least 32 characters. +// The function returns the address of the output buffer. +// If the code_point is not a valid Unicode code point +// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. +GTEST_API_ char* CodePointToUtf8(UInt32 code_point, char* str); + +// Converts a wide string to a narrow string in UTF-8 encoding. +// The wide string is assumed to have the following encoding: +// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS) +// UTF-32 if sizeof(wchar_t) == 4 (on Linux) +// Parameter str points to a null-terminated wide string. +// Parameter num_chars may additionally limit the number +// of wchar_t characters processed. -1 is used when the entire string +// should be processed. +// If the string contains code points that are not valid Unicode code points +// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding +// and contains invalid UTF-16 surrogate pairs, values in those pairs +// will be encoded as individual Unicode characters from Basic Normal Plane. +GTEST_API_ String WideStringToUtf8(const wchar_t* str, int num_chars); + +// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file +// if the variable is present. If a file already exists at this location, this +// function will write over it. If the variable is present, but the file cannot +// be created, prints an error and exits. +void WriteToShardStatusFileIfNeeded(); + +// Checks whether sharding is enabled by examining the relevant +// environment variable values. If the variables are present, +// but inconsistent (e.g., shard_index >= total_shards), prints +// an error and exits. If in_subprocess_for_death_test, sharding is +// disabled because it must only be applied to the original test +// process. Otherwise, we could filter out death tests we intended to execute. +GTEST_API_ bool ShouldShard(const char* total_shards_str, + const char* shard_index_str, + bool in_subprocess_for_death_test); + +// Parses the environment variable var as an Int32. If it is unset, +// returns default_val. If it is not an Int32, prints an error and +// and aborts. +GTEST_API_ Int32 Int32FromEnvOrDie(const char* env_var, Int32 default_val); + +// Given the total number of shards, the shard index, and the test id, +// returns true iff the test should be run on this shard. The test id is +// some arbitrary but unique non-negative integer assigned to each test +// method. Assumes that 0 <= shard_index < total_shards. +GTEST_API_ bool ShouldRunTestOnShard( + int total_shards, int shard_index, int test_id); + +// STL container utilities. + +// Returns the number of elements in the given container that satisfy +// the given predicate. +template +inline int CountIf(const Container& c, Predicate predicate) { + // Implemented as an explicit loop since std::count_if() in libCstd on + // Solaris has a non-standard signature. + int count = 0; + for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) { + if (predicate(*it)) + ++count; + } + return count; +} + +// Applies a function/functor to each element in the container. +template +void ForEach(const Container& c, Functor functor) { + std::for_each(c.begin(), c.end(), functor); +} + +// Returns the i-th element of the vector, or default_value if i is not +// in range [0, v.size()). +template +inline E GetElementOr(const std::vector& v, int i, E default_value) { + return (i < 0 || i >= static_cast(v.size())) ? default_value : v[i]; +} + +// Performs an in-place shuffle of a range of the vector's elements. +// 'begin' and 'end' are element indices as an STL-style range; +// i.e. [begin, end) are shuffled, where 'end' == size() means to +// shuffle to the end of the vector. +template +void ShuffleRange(internal::Random* random, int begin, int end, + std::vector* v) { + const int size = static_cast(v->size()); + GTEST_CHECK_(0 <= begin && begin <= size) + << "Invalid shuffle range start " << begin << ": must be in range [0, " + << size << "]."; + GTEST_CHECK_(begin <= end && end <= size) + << "Invalid shuffle range finish " << end << ": must be in range [" + << begin << ", " << size << "]."; + + // Fisher-Yates shuffle, from + // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle + for (int range_width = end - begin; range_width >= 2; range_width--) { + const int last_in_range = begin + range_width - 1; + const int selected = begin + random->Generate(range_width); + std::swap((*v)[selected], (*v)[last_in_range]); + } +} + +// Performs an in-place shuffle of the vector's elements. +template +inline void Shuffle(internal::Random* random, std::vector* v) { + ShuffleRange(random, 0, static_cast(v->size()), v); +} + +// A function for deleting an object. Handy for being used as a +// functor. +template +static void Delete(T* x) { + delete x; +} + +// A predicate that checks the key of a TestProperty against a known key. +// +// TestPropertyKeyIs is copyable. +class TestPropertyKeyIs { + public: + // Constructor. + // + // TestPropertyKeyIs has NO default constructor. + explicit TestPropertyKeyIs(const char* key) + : key_(key) {} + + // Returns true iff the test name of test property matches on key_. + bool operator()(const TestProperty& test_property) const { + return String(test_property.key()).Compare(key_) == 0; + } + + private: + String key_; +}; + +// Class UnitTestOptions. +// +// This class contains functions for processing options the user +// specifies when running the tests. It has only static members. +// +// In most cases, the user can specify an option using either an +// environment variable or a command line flag. E.g. you can set the +// test filter using either GTEST_FILTER or --gtest_filter. If both +// the variable and the flag are present, the latter overrides the +// former. +class GTEST_API_ UnitTestOptions { + public: + // Functions for processing the gtest_output flag. + + // Returns the output format, or "" for normal printed output. + static String GetOutputFormat(); + + // Returns the absolute path of the requested output file, or the + // default (test_detail.xml in the original working directory) if + // none was explicitly specified. + static String GetAbsolutePathToOutputFile(); + + // Functions for processing the gtest_filter flag. + + // Returns true iff the wildcard pattern matches the string. The + // first ':' or '\0' character in pattern marks the end of it. + // + // This recursive algorithm isn't very efficient, but is clear and + // works well enough for matching test names, which are short. + static bool PatternMatchesString(const char *pattern, const char *str); + + // Returns true iff the user-specified filter matches the test case + // name and the test name. + static bool FilterMatchesTest(const String &test_case_name, + const String &test_name); + +#if GTEST_OS_WINDOWS + // Function for supporting the gtest_catch_exception flag. + + // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the + // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise. + // This function is useful as an __except condition. + static int GTestShouldProcessSEH(DWORD exception_code); +#endif // GTEST_OS_WINDOWS + + // Returns true if "name" matches the ':' separated list of glob-style + // filters in "filter". + static bool MatchesFilter(const String& name, const char* filter); +}; + +// Returns the current application's name, removing directory path if that +// is present. Used by UnitTestOptions::GetOutputFile. +GTEST_API_ FilePath GetCurrentExecutableName(); + +// The role interface for getting the OS stack trace as a string. +class OsStackTraceGetterInterface { + public: + OsStackTraceGetterInterface() {} + virtual ~OsStackTraceGetterInterface() {} + + // Returns the current OS stack trace as a String. Parameters: + // + // max_depth - the maximum number of stack frames to be included + // in the trace. + // skip_count - the number of top frames to be skipped; doesn't count + // against max_depth. + virtual String CurrentStackTrace(int max_depth, int skip_count) = 0; + + // UponLeavingGTest() should be called immediately before Google Test calls + // user code. It saves some information about the current stack that + // CurrentStackTrace() will use to find and hide Google Test stack frames. + virtual void UponLeavingGTest() = 0; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface); +}; + +// A working implementation of the OsStackTraceGetterInterface interface. +class OsStackTraceGetter : public OsStackTraceGetterInterface { + public: + OsStackTraceGetter() : caller_frame_(NULL) {} + virtual String CurrentStackTrace(int max_depth, int skip_count); + virtual void UponLeavingGTest(); + + // This string is inserted in place of stack frames that are part of + // Google Test's implementation. + static const char* const kElidedFramesMarker; + + private: + Mutex mutex_; // protects all internal state + + // We save the stack frame below the frame that calls user code. + // We do this because the address of the frame immediately below + // the user code changes between the call to UponLeavingGTest() + // and any calls to CurrentStackTrace() from within the user code. + void* caller_frame_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter); +}; + +// Information about a Google Test trace point. +struct TraceInfo { + const char* file; + int line; + String message; +}; + +// This is the default global test part result reporter used in UnitTestImpl. +// This class should only be used by UnitTestImpl. +class DefaultGlobalTestPartResultReporter + : public TestPartResultReporterInterface { + public: + explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test); + // Implements the TestPartResultReporterInterface. Reports the test part + // result in the current test. + virtual void ReportTestPartResult(const TestPartResult& result); + + private: + UnitTestImpl* const unit_test_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter); +}; + +// This is the default per thread test part result reporter used in +// UnitTestImpl. This class should only be used by UnitTestImpl. +class DefaultPerThreadTestPartResultReporter + : public TestPartResultReporterInterface { + public: + explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test); + // Implements the TestPartResultReporterInterface. The implementation just + // delegates to the current global test part result reporter of *unit_test_. + virtual void ReportTestPartResult(const TestPartResult& result); + + private: + UnitTestImpl* const unit_test_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter); +}; + +// The private implementation of the UnitTest class. We don't protect +// the methods under a mutex, as this class is not accessible by a +// user and the UnitTest class that delegates work to this class does +// proper locking. +class GTEST_API_ UnitTestImpl { + public: + explicit UnitTestImpl(UnitTest* parent); + virtual ~UnitTestImpl(); + + // There are two different ways to register your own TestPartResultReporter. + // You can register your own repoter to listen either only for test results + // from the current thread or for results from all threads. + // By default, each per-thread test result repoter just passes a new + // TestPartResult to the global test result reporter, which registers the + // test part result for the currently running test. + + // Returns the global test part result reporter. + TestPartResultReporterInterface* GetGlobalTestPartResultReporter(); + + // Sets the global test part result reporter. + void SetGlobalTestPartResultReporter( + TestPartResultReporterInterface* reporter); + + // Returns the test part result reporter for the current thread. + TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread(); + + // Sets the test part result reporter for the current thread. + void SetTestPartResultReporterForCurrentThread( + TestPartResultReporterInterface* reporter); + + // Gets the number of successful test cases. + int successful_test_case_count() const; + + // Gets the number of failed test cases. + int failed_test_case_count() const; + + // Gets the number of all test cases. + int total_test_case_count() const; + + // Gets the number of all test cases that contain at least one test + // that should run. + int test_case_to_run_count() const; + + // Gets the number of successful tests. + int successful_test_count() const; + + // Gets the number of failed tests. + int failed_test_count() const; + + // Gets the number of disabled tests. + int disabled_test_count() const; + + // Gets the number of all tests. + int total_test_count() const; + + // Gets the number of tests that should run. + int test_to_run_count() const; + + // Gets the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns true iff the unit test passed (i.e. all test cases passed). + bool Passed() const { return !Failed(); } + + // Returns true iff the unit test failed (i.e. some test case failed + // or something outside of all tests failed). + bool Failed() const { + return failed_test_case_count() > 0 || ad_hoc_test_result()->Failed(); + } + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + const TestCase* GetTestCase(int i) const { + const int index = GetElementOr(test_case_indices_, i, -1); + return index < 0 ? NULL : test_cases_[i]; + } + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + TestCase* GetMutableTestCase(int i) { + const int index = GetElementOr(test_case_indices_, i, -1); + return index < 0 ? NULL : test_cases_[index]; + } + + // Provides access to the event listener list. + TestEventListeners* listeners() { return &listeners_; } + + // Returns the TestResult for the test that's currently running, or + // the TestResult for the ad hoc test if no test is running. + TestResult* current_test_result(); + + // Returns the TestResult for the ad hoc test. + const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; } + + // Sets the OS stack trace getter. + // + // Does nothing if the input and the current OS stack trace getter + // are the same; otherwise, deletes the old getter and makes the + // input the current getter. + void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter); + + // Returns the current OS stack trace getter if it is not NULL; + // otherwise, creates an OsStackTraceGetter, makes it the current + // getter, and returns it. + OsStackTraceGetterInterface* os_stack_trace_getter(); + + // Returns the current OS stack trace as a String. + // + // The maximum number of stack frames to be included is specified by + // the gtest_stack_trace_depth flag. The skip_count parameter + // specifies the number of top frames to be skipped, which doesn't + // count against the number of frames to be included. + // + // For example, if Foo() calls Bar(), which in turn calls + // CurrentOsStackTraceExceptTop(1), Foo() will be included in the + // trace but Bar() and CurrentOsStackTraceExceptTop() won't. + String CurrentOsStackTraceExceptTop(int skip_count); + + // Finds and returns a TestCase with the given name. If one doesn't + // exist, creates one and returns it. + // + // Arguments: + // + // test_case_name: name of the test case + // type_param: the name of the test's type parameter, or NULL if + // this is not a typed or a type-parameterized test. + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + TestCase* GetTestCase(const char* test_case_name, + const char* type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc); + + // Adds a TestInfo to the unit test. + // + // Arguments: + // + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + // test_info: the TestInfo object + void AddTestInfo(Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc, + TestInfo* test_info) { + // In order to support thread-safe death tests, we need to + // remember the original working directory when the test program + // was first invoked. We cannot do this in RUN_ALL_TESTS(), as + // the user may have changed the current directory before calling + // RUN_ALL_TESTS(). Therefore we capture the current directory in + // AddTestInfo(), which is called to register a TEST or TEST_F + // before main() is reached. + if (original_working_dir_.IsEmpty()) { + original_working_dir_.Set(FilePath::GetCurrentDir()); + GTEST_CHECK_(!original_working_dir_.IsEmpty()) + << "Failed to get the current working directory."; + } + + GetTestCase(test_info->test_case_name(), + test_info->type_param(), + set_up_tc, + tear_down_tc)->AddTestInfo(test_info); + } + +#if GTEST_HAS_PARAM_TEST + // Returns ParameterizedTestCaseRegistry object used to keep track of + // value-parameterized tests and instantiate and register them. + internal::ParameterizedTestCaseRegistry& parameterized_test_registry() { + return parameterized_test_registry_; + } +#endif // GTEST_HAS_PARAM_TEST + + // Sets the TestCase object for the test that's currently running. + void set_current_test_case(TestCase* a_current_test_case) { + current_test_case_ = a_current_test_case; + } + + // Sets the TestInfo object for the test that's currently running. If + // current_test_info is NULL, the assertion results will be stored in + // ad_hoc_test_result_. + void set_current_test_info(TestInfo* a_current_test_info) { + current_test_info_ = a_current_test_info; + } + + // Registers all parameterized tests defined using TEST_P and + // INSTANTIATE_TEST_CASE_P, creating regular tests for each test/parameter + // combination. This method can be called more then once; it has guards + // protecting from registering the tests more then once. If + // value-parameterized tests are disabled, RegisterParameterizedTests is + // present but does nothing. + void RegisterParameterizedTests(); + + // Runs all tests in this UnitTest object, prints the result, and + // returns true if all tests are successful. If any exception is + // thrown during a test, this test is considered to be failed, but + // the rest of the tests will still be run. + bool RunAllTests(); + + // Clears the results of all tests, except the ad hoc tests. + void ClearNonAdHocTestResult() { + ForEach(test_cases_, TestCase::ClearTestCaseResult); + } + + // Clears the results of ad-hoc test assertions. + void ClearAdHocTestResult() { + ad_hoc_test_result_.Clear(); + } + + enum ReactionToSharding { + HONOR_SHARDING_PROTOCOL, + IGNORE_SHARDING_PROTOCOL + }; + + // Matches the full name of each test against the user-specified + // filter to decide whether the test should run, then records the + // result in each TestCase and TestInfo object. + // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests + // based on sharding variables in the environment. + // Returns the number of tests that should run. + int FilterTests(ReactionToSharding shard_tests); + + // Prints the names of the tests matching the user-specified filter flag. + void ListTestsMatchingFilter(); + + const TestCase* current_test_case() const { return current_test_case_; } + TestInfo* current_test_info() { return current_test_info_; } + const TestInfo* current_test_info() const { return current_test_info_; } + + // Returns the vector of environments that need to be set-up/torn-down + // before/after the tests are run. + std::vector& environments() { return environments_; } + + // Getters for the per-thread Google Test trace stack. + std::vector& gtest_trace_stack() { + return *(gtest_trace_stack_.pointer()); + } + const std::vector& gtest_trace_stack() const { + return gtest_trace_stack_.get(); + } + +#if GTEST_HAS_DEATH_TEST + void InitDeathTestSubprocessControlInfo() { + internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag()); + } + // Returns a pointer to the parsed --gtest_internal_run_death_test + // flag, or NULL if that flag was not specified. + // This information is useful only in a death test child process. + // Must not be called before a call to InitGoogleTest. + const InternalRunDeathTestFlag* internal_run_death_test_flag() const { + return internal_run_death_test_flag_.get(); + } + + // Returns a pointer to the current death test factory. + internal::DeathTestFactory* death_test_factory() { + return death_test_factory_.get(); + } + + void SuppressTestEventsIfInSubprocess(); + + friend class ReplaceDeathTestFactory; +#endif // GTEST_HAS_DEATH_TEST + + // Initializes the event listener performing XML output as specified by + // UnitTestOptions. Must not be called before InitGoogleTest. + void ConfigureXmlOutput(); + +#if GTEST_CAN_STREAM_RESULTS_ + // Initializes the event listener for streaming test results to a socket. + // Must not be called before InitGoogleTest. + void ConfigureStreamingOutput(); +#endif + + // Performs initialization dependent upon flag values obtained in + // ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to + // ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest + // this function is also called from RunAllTests. Since this function can be + // called more than once, it has to be idempotent. + void PostFlagParsingInit(); + + // Gets the random seed used at the start of the current test iteration. + int random_seed() const { return random_seed_; } + + // Gets the random number generator. + internal::Random* random() { return &random_; } + + // Shuffles all test cases, and the tests within each test case, + // making sure that death tests are still run first. + void ShuffleTests(); + + // Restores the test cases and tests to their order before the first shuffle. + void UnshuffleTests(); + + // Returns the value of GTEST_FLAG(catch_exceptions) at the moment + // UnitTest::Run() starts. + bool catch_exceptions() const { return catch_exceptions_; } + + private: + friend class ::testing::UnitTest; + + // Used by UnitTest::Run() to capture the state of + // GTEST_FLAG(catch_exceptions) at the moment it starts. + void set_catch_exceptions(bool value) { catch_exceptions_ = value; } + + // The UnitTest object that owns this implementation object. + UnitTest* const parent_; + + // The working directory when the first TEST() or TEST_F() was + // executed. + internal::FilePath original_working_dir_; + + // The default test part result reporters. + DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_; + DefaultPerThreadTestPartResultReporter + default_per_thread_test_part_result_reporter_; + + // Points to (but doesn't own) the global test part result reporter. + TestPartResultReporterInterface* global_test_part_result_repoter_; + + // Protects read and write access to global_test_part_result_reporter_. + internal::Mutex global_test_part_result_reporter_mutex_; + + // Points to (but doesn't own) the per-thread test part result reporter. + internal::ThreadLocal + per_thread_test_part_result_reporter_; + + // The vector of environments that need to be set-up/torn-down + // before/after the tests are run. + std::vector environments_; + + // The vector of TestCases in their original order. It owns the + // elements in the vector. + std::vector test_cases_; + + // Provides a level of indirection for the test case list to allow + // easy shuffling and restoring the test case order. The i-th + // element of this vector is the index of the i-th test case in the + // shuffled order. + std::vector test_case_indices_; + +#if GTEST_HAS_PARAM_TEST + // ParameterizedTestRegistry object used to register value-parameterized + // tests. + internal::ParameterizedTestCaseRegistry parameterized_test_registry_; + + // Indicates whether RegisterParameterizedTests() has been called already. + bool parameterized_tests_registered_; +#endif // GTEST_HAS_PARAM_TEST + + // Index of the last death test case registered. Initially -1. + int last_death_test_case_; + + // This points to the TestCase for the currently running test. It + // changes as Google Test goes through one test case after another. + // When no test is running, this is set to NULL and Google Test + // stores assertion results in ad_hoc_test_result_. Initially NULL. + TestCase* current_test_case_; + + // This points to the TestInfo for the currently running test. It + // changes as Google Test goes through one test after another. When + // no test is running, this is set to NULL and Google Test stores + // assertion results in ad_hoc_test_result_. Initially NULL. + TestInfo* current_test_info_; + + // Normally, a user only writes assertions inside a TEST or TEST_F, + // or inside a function called by a TEST or TEST_F. Since Google + // Test keeps track of which test is current running, it can + // associate such an assertion with the test it belongs to. + // + // If an assertion is encountered when no TEST or TEST_F is running, + // Google Test attributes the assertion result to an imaginary "ad hoc" + // test, and records the result in ad_hoc_test_result_. + TestResult ad_hoc_test_result_; + + // The list of event listeners that can be used to track events inside + // Google Test. + TestEventListeners listeners_; + + // The OS stack trace getter. Will be deleted when the UnitTest + // object is destructed. By default, an OsStackTraceGetter is used, + // but the user can set this field to use a custom getter if that is + // desired. + OsStackTraceGetterInterface* os_stack_trace_getter_; + + // True iff PostFlagParsingInit() has been called. + bool post_flag_parse_init_performed_; + + // The random number seed used at the beginning of the test run. + int random_seed_; + + // Our random number generator. + internal::Random random_; + + // How long the test took to run, in milliseconds. + TimeInMillis elapsed_time_; + +#if GTEST_HAS_DEATH_TEST + // The decomposed components of the gtest_internal_run_death_test flag, + // parsed when RUN_ALL_TESTS is called. + internal::scoped_ptr internal_run_death_test_flag_; + internal::scoped_ptr death_test_factory_; +#endif // GTEST_HAS_DEATH_TEST + + // A per-thread stack of traces created by the SCOPED_TRACE() macro. + internal::ThreadLocal > gtest_trace_stack_; + + // The value of GTEST_FLAG(catch_exceptions) at the moment RunAllTests() + // starts. + bool catch_exceptions_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl); +}; // class UnitTestImpl + +// Convenience function for accessing the global UnitTest +// implementation object. +inline UnitTestImpl* GetUnitTestImpl() { + return UnitTest::GetInstance()->impl(); +} + +#if GTEST_USES_SIMPLE_RE + +// Internal helper functions for implementing the simple regular +// expression matcher. +GTEST_API_ bool IsInSet(char ch, const char* str); +GTEST_API_ bool IsAsciiDigit(char ch); +GTEST_API_ bool IsAsciiPunct(char ch); +GTEST_API_ bool IsRepeat(char ch); +GTEST_API_ bool IsAsciiWhiteSpace(char ch); +GTEST_API_ bool IsAsciiWordChar(char ch); +GTEST_API_ bool IsValidEscape(char ch); +GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch); +GTEST_API_ bool ValidateRegex(const char* regex); +GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str); +GTEST_API_ bool MatchRepetitionAndRegexAtHead( + bool escaped, char ch, char repeat, const char* regex, const char* str); +GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str); + +#endif // GTEST_USES_SIMPLE_RE + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. +GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv); +GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv); + +#if GTEST_HAS_DEATH_TEST + +// Returns the message describing the last system error, regardless of the +// platform. +GTEST_API_ String GetLastErrnoDescription(); + +# if GTEST_OS_WINDOWS +// Provides leak-safe Windows kernel handle ownership. +class AutoHandle { + public: + AutoHandle() : handle_(INVALID_HANDLE_VALUE) {} + explicit AutoHandle(HANDLE handle) : handle_(handle) {} + + ~AutoHandle() { Reset(); } + + HANDLE Get() const { return handle_; } + void Reset() { Reset(INVALID_HANDLE_VALUE); } + void Reset(HANDLE handle) { + if (handle != handle_) { + if (handle_ != INVALID_HANDLE_VALUE) + ::CloseHandle(handle_); + handle_ = handle; + } + } + + private: + HANDLE handle_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle); +}; +# endif // GTEST_OS_WINDOWS + +// Attempts to parse a string into a positive integer pointed to by the +// number parameter. Returns true if that is possible. +// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use +// it here. +template +bool ParseNaturalNumber(const ::std::string& str, Integer* number) { + // Fail fast if the given string does not begin with a digit; + // this bypasses strtoXXX's "optional leading whitespace and plus + // or minus sign" semantics, which are undesirable here. + if (str.empty() || !IsDigit(str[0])) { + return false; + } + errno = 0; + + char* end; + // BiggestConvertible is the largest integer type that system-provided + // string-to-number conversion routines can return. + +# if GTEST_OS_WINDOWS && !defined(__GNUC__) + + // MSVC and C++ Builder define __int64 instead of the standard long long. + typedef unsigned __int64 BiggestConvertible; + const BiggestConvertible parsed = _strtoui64(str.c_str(), &end, 10); + +# else + + typedef unsigned long long BiggestConvertible; // NOLINT + const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10); + +# endif // GTEST_OS_WINDOWS && !defined(__GNUC__) + + const bool parse_success = *end == '\0' && errno == 0; + + // TODO(vladl@google.com): Convert this to compile time assertion when it is + // available. + GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed)); + + const Integer result = static_cast(parsed); + if (parse_success && static_cast(result) == parsed) { + *number = result; + return true; + } + return false; +} +#endif // GTEST_HAS_DEATH_TEST + +// TestResult contains some private methods that should be hidden from +// Google Test user but are required for testing. This class allow our tests +// to access them. +// +// This class is supplied only for the purpose of testing Google Test's own +// constructs. Do not use it in user tests, either directly or indirectly. +class TestResultAccessor { + public: + static void RecordProperty(TestResult* test_result, + const TestProperty& property) { + test_result->RecordProperty(property); + } + + static void ClearTestPartResults(TestResult* test_result) { + test_result->ClearTestPartResults(); + } + + static const std::vector& test_part_results( + const TestResult& test_result) { + return test_result.test_part_results(); + } +}; + +} // namespace internal +} // namespace testing + +#endif // GTEST_SRC_GTEST_INTERNAL_INL_H_ +#undef GTEST_IMPLEMENTATION_ + +#if GTEST_OS_WINDOWS +# define vsnprintf _vsnprintf +#endif // GTEST_OS_WINDOWS + +namespace testing { + +using internal::CountIf; +using internal::ForEach; +using internal::GetElementOr; +using internal::Shuffle; + +// Constants. + +// A test whose test case name or test name matches this filter is +// disabled and not run. +static const char kDisableTestFilter[] = "DISABLED_*:*/DISABLED_*"; + +// A test case whose name matches this filter is considered a death +// test case and will be run before test cases whose name doesn't +// match this filter. +static const char kDeathTestCaseFilter[] = "*DeathTest:*DeathTest/*"; + +// A test filter that matches everything. +static const char kUniversalFilter[] = "*"; + +// The default output file for XML output. +static const char kDefaultOutputFile[] = "test_detail.xml"; + +// The environment variable name for the test shard index. +static const char kTestShardIndex[] = "GTEST_SHARD_INDEX"; +// The environment variable name for the total number of test shards. +static const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS"; +// The environment variable name for the test shard status file. +static const char kTestShardStatusFile[] = "GTEST_SHARD_STATUS_FILE"; + +namespace internal { + +// The text used in failure messages to indicate the start of the +// stack trace. +const char kStackTraceMarker[] = "\nStack trace:\n"; + +// g_help_flag is true iff the --help flag or an equivalent form is +// specified on the command line. +bool g_help_flag = false; + +} // namespace internal + +GTEST_DEFINE_bool_( + also_run_disabled_tests, + internal::BoolFromGTestEnv("also_run_disabled_tests", false), + "Run disabled tests too, in addition to the tests normally being run."); + +GTEST_DEFINE_bool_( + break_on_failure, + internal::BoolFromGTestEnv("break_on_failure", false), + "True iff a failed assertion should be a debugger break-point."); + +GTEST_DEFINE_bool_( + catch_exceptions, + internal::BoolFromGTestEnv("catch_exceptions", true), + "True iff " GTEST_NAME_ + " should catch exceptions and treat them as test failures."); + +GTEST_DEFINE_string_( + color, + internal::StringFromGTestEnv("color", "auto"), + "Whether to use colors in the output. Valid values: yes, no, " + "and auto. 'auto' means to use colors if the output is " + "being sent to a terminal and the TERM environment variable " + "is set to xterm, xterm-color, xterm-256color, linux or cygwin."); + +GTEST_DEFINE_string_( + filter, + internal::StringFromGTestEnv("filter", kUniversalFilter), + "A colon-separated list of glob (not regex) patterns " + "for filtering the tests to run, optionally followed by a " + "'-' and a : separated list of negative patterns (tests to " + "exclude). A test is run if it matches one of the positive " + "patterns and does not match any of the negative patterns."); + +GTEST_DEFINE_bool_(list_tests, false, + "List all tests without running them."); + +GTEST_DEFINE_string_( + output, + internal::StringFromGTestEnv("output", ""), + "A format (currently must be \"xml\"), optionally followed " + "by a colon and an output file name or directory. A directory " + "is indicated by a trailing pathname separator. " + "Examples: \"xml:filename.xml\", \"xml::directoryname/\". " + "If a directory is specified, output files will be created " + "within that directory, with file-names based on the test " + "executable's name and, if necessary, made unique by adding " + "digits."); + +GTEST_DEFINE_bool_( + print_time, + internal::BoolFromGTestEnv("print_time", true), + "True iff " GTEST_NAME_ + " should display elapsed time in text output."); + +GTEST_DEFINE_int32_( + random_seed, + internal::Int32FromGTestEnv("random_seed", 0), + "Random number seed to use when shuffling test orders. Must be in range " + "[1, 99999], or 0 to use a seed based on the current time."); + +GTEST_DEFINE_int32_( + repeat, + internal::Int32FromGTestEnv("repeat", 1), + "How many times to repeat each test. Specify a negative number " + "for repeating forever. Useful for shaking out flaky tests."); + +GTEST_DEFINE_bool_( + show_internal_stack_frames, false, + "True iff " GTEST_NAME_ " should include internal stack frames when " + "printing test failure stack traces."); + +GTEST_DEFINE_bool_( + shuffle, + internal::BoolFromGTestEnv("shuffle", false), + "True iff " GTEST_NAME_ + " should randomize tests' order on every run."); + +GTEST_DEFINE_int32_( + stack_trace_depth, + internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth), + "The maximum number of stack frames to print when an " + "assertion fails. The valid range is 0 through 100, inclusive."); + +GTEST_DEFINE_string_( + stream_result_to, + internal::StringFromGTestEnv("stream_result_to", ""), + "This flag specifies the host name and the port number on which to stream " + "test results. Example: \"localhost:555\". The flag is effective only on " + "Linux."); + +GTEST_DEFINE_bool_( + throw_on_failure, + internal::BoolFromGTestEnv("throw_on_failure", false), + "When this flag is specified, a failed assertion will throw an exception " + "if exceptions are enabled or exit the program with a non-zero code " + "otherwise."); + +namespace internal { + +// Generates a random number from [0, range), using a Linear +// Congruential Generator (LCG). Crashes if 'range' is 0 or greater +// than kMaxRange. +UInt32 Random::Generate(UInt32 range) { + // These constants are the same as are used in glibc's rand(3). + state_ = (1103515245U*state_ + 12345U) % kMaxRange; + + GTEST_CHECK_(range > 0) + << "Cannot generate a number in the range [0, 0)."; + GTEST_CHECK_(range <= kMaxRange) + << "Generation of a number in [0, " << range << ") was requested, " + << "but this can only generate numbers in [0, " << kMaxRange << ")."; + + // Converting via modulus introduces a bit of downward bias, but + // it's simple, and a linear congruential generator isn't too good + // to begin with. + return state_ % range; +} + +// GTestIsInitialized() returns true iff the user has initialized +// Google Test. Useful for catching the user mistake of not initializing +// Google Test before calling RUN_ALL_TESTS(). +// +// A user must call testing::InitGoogleTest() to initialize Google +// Test. g_init_gtest_count is set to the number of times +// InitGoogleTest() has been called. We don't protect this variable +// under a mutex as it is only accessed in the main thread. +int g_init_gtest_count = 0; +static bool GTestIsInitialized() { return g_init_gtest_count != 0; } + +// Iterates over a vector of TestCases, keeping a running sum of the +// results of calling a given int-returning method on each. +// Returns the sum. +static int SumOverTestCaseList(const std::vector& case_list, + int (TestCase::*method)() const) { + int sum = 0; + for (size_t i = 0; i < case_list.size(); i++) { + sum += (case_list[i]->*method)(); + } + return sum; +} + +// Returns true iff the test case passed. +static bool TestCasePassed(const TestCase* test_case) { + return test_case->should_run() && test_case->Passed(); +} + +// Returns true iff the test case failed. +static bool TestCaseFailed(const TestCase* test_case) { + return test_case->should_run() && test_case->Failed(); +} + +// Returns true iff test_case contains at least one test that should +// run. +static bool ShouldRunTestCase(const TestCase* test_case) { + return test_case->should_run(); +} + +// AssertHelper constructor. +AssertHelper::AssertHelper(TestPartResult::Type type, + const char* file, + int line, + const char* message) + : data_(new AssertHelperData(type, file, line, message)) { +} + +AssertHelper::~AssertHelper() { + delete data_; +} + +// Message assignment, for assertion streaming support. +void AssertHelper::operator=(const Message& message) const { + UnitTest::GetInstance()-> + AddTestPartResult(data_->type, data_->file, data_->line, + AppendUserMessage(data_->message, message), + UnitTest::GetInstance()->impl() + ->CurrentOsStackTraceExceptTop(1) + // Skips the stack frame for this function itself. + ); // NOLINT +} + +// Mutex for linked pointers. +GTEST_DEFINE_STATIC_MUTEX_(g_linked_ptr_mutex); + +// Application pathname gotten in InitGoogleTest. +String g_executable_path; + +// Returns the current application's name, removing directory path if that +// is present. +FilePath GetCurrentExecutableName() { + FilePath result; + +#if GTEST_OS_WINDOWS + result.Set(FilePath(g_executable_path).RemoveExtension("exe")); +#else + result.Set(FilePath(g_executable_path)); +#endif // GTEST_OS_WINDOWS + + return result.RemoveDirectoryName(); +} + +// Functions for processing the gtest_output flag. + +// Returns the output format, or "" for normal printed output. +String UnitTestOptions::GetOutputFormat() { + const char* const gtest_output_flag = GTEST_FLAG(output).c_str(); + if (gtest_output_flag == NULL) return String(""); + + const char* const colon = strchr(gtest_output_flag, ':'); + return (colon == NULL) ? + String(gtest_output_flag) : + String(gtest_output_flag, colon - gtest_output_flag); +} + +// Returns the name of the requested output file, or the default if none +// was explicitly specified. +String UnitTestOptions::GetAbsolutePathToOutputFile() { + const char* const gtest_output_flag = GTEST_FLAG(output).c_str(); + if (gtest_output_flag == NULL) + return String(""); + + const char* const colon = strchr(gtest_output_flag, ':'); + if (colon == NULL) + return String(internal::FilePath::ConcatPaths( + internal::FilePath( + UnitTest::GetInstance()->original_working_dir()), + internal::FilePath(kDefaultOutputFile)).ToString() ); + + internal::FilePath output_name(colon + 1); + if (!output_name.IsAbsolutePath()) + // TODO(wan@google.com): on Windows \some\path is not an absolute + // path (as its meaning depends on the current drive), yet the + // following logic for turning it into an absolute path is wrong. + // Fix it. + output_name = internal::FilePath::ConcatPaths( + internal::FilePath(UnitTest::GetInstance()->original_working_dir()), + internal::FilePath(colon + 1)); + + if (!output_name.IsDirectory()) + return output_name.ToString(); + + internal::FilePath result(internal::FilePath::GenerateUniqueFileName( + output_name, internal::GetCurrentExecutableName(), + GetOutputFormat().c_str())); + return result.ToString(); +} + +// Returns true iff the wildcard pattern matches the string. The +// first ':' or '\0' character in pattern marks the end of it. +// +// This recursive algorithm isn't very efficient, but is clear and +// works well enough for matching test names, which are short. +bool UnitTestOptions::PatternMatchesString(const char *pattern, + const char *str) { + switch (*pattern) { + case '\0': + case ':': // Either ':' or '\0' marks the end of the pattern. + return *str == '\0'; + case '?': // Matches any single character. + return *str != '\0' && PatternMatchesString(pattern + 1, str + 1); + case '*': // Matches any string (possibly empty) of characters. + return (*str != '\0' && PatternMatchesString(pattern, str + 1)) || + PatternMatchesString(pattern + 1, str); + default: // Non-special character. Matches itself. + return *pattern == *str && + PatternMatchesString(pattern + 1, str + 1); + } +} + +bool UnitTestOptions::MatchesFilter(const String& name, const char* filter) { + const char *cur_pattern = filter; + for (;;) { + if (PatternMatchesString(cur_pattern, name.c_str())) { + return true; + } + + // Finds the next pattern in the filter. + cur_pattern = strchr(cur_pattern, ':'); + + // Returns if no more pattern can be found. + if (cur_pattern == NULL) { + return false; + } + + // Skips the pattern separater (the ':' character). + cur_pattern++; + } +} + +// TODO(keithray): move String function implementations to gtest-string.cc. + +// Returns true iff the user-specified filter matches the test case +// name and the test name. +bool UnitTestOptions::FilterMatchesTest(const String &test_case_name, + const String &test_name) { + const String& full_name = String::Format("%s.%s", + test_case_name.c_str(), + test_name.c_str()); + + // Split --gtest_filter at '-', if there is one, to separate into + // positive filter and negative filter portions + const char* const p = GTEST_FLAG(filter).c_str(); + const char* const dash = strchr(p, '-'); + String positive; + String negative; + if (dash == NULL) { + positive = GTEST_FLAG(filter).c_str(); // Whole string is a positive filter + negative = String(""); + } else { + positive = String(p, dash - p); // Everything up to the dash + negative = String(dash+1); // Everything after the dash + if (positive.empty()) { + // Treat '-test1' as the same as '*-test1' + positive = kUniversalFilter; + } + } + + // A filter is a colon-separated list of patterns. It matches a + // test if any pattern in it matches the test. + return (MatchesFilter(full_name, positive.c_str()) && + !MatchesFilter(full_name, negative.c_str())); +} + +#if GTEST_HAS_SEH +// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the +// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise. +// This function is useful as an __except condition. +int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) { + // Google Test should handle a SEH exception if: + // 1. the user wants it to, AND + // 2. this is not a breakpoint exception, AND + // 3. this is not a C++ exception (VC++ implements them via SEH, + // apparently). + // + // SEH exception code for C++ exceptions. + // (see http://support.microsoft.com/kb/185294 for more information). + const DWORD kCxxExceptionCode = 0xe06d7363; + + bool should_handle = true; + + if (!GTEST_FLAG(catch_exceptions)) + should_handle = false; + else if (exception_code == EXCEPTION_BREAKPOINT) + should_handle = false; + else if (exception_code == kCxxExceptionCode) + should_handle = false; + + return should_handle ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH; +} +#endif // GTEST_HAS_SEH + +} // namespace internal + +// The c'tor sets this object as the test part result reporter used by +// Google Test. The 'result' parameter specifies where to report the +// results. Intercepts only failures from the current thread. +ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter( + TestPartResultArray* result) + : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD), + result_(result) { + Init(); +} + +// The c'tor sets this object as the test part result reporter used by +// Google Test. The 'result' parameter specifies where to report the +// results. +ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter( + InterceptMode intercept_mode, TestPartResultArray* result) + : intercept_mode_(intercept_mode), + result_(result) { + Init(); +} + +void ScopedFakeTestPartResultReporter::Init() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + if (intercept_mode_ == INTERCEPT_ALL_THREADS) { + old_reporter_ = impl->GetGlobalTestPartResultReporter(); + impl->SetGlobalTestPartResultReporter(this); + } else { + old_reporter_ = impl->GetTestPartResultReporterForCurrentThread(); + impl->SetTestPartResultReporterForCurrentThread(this); + } +} + +// The d'tor restores the test part result reporter used by Google Test +// before. +ScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + if (intercept_mode_ == INTERCEPT_ALL_THREADS) { + impl->SetGlobalTestPartResultReporter(old_reporter_); + } else { + impl->SetTestPartResultReporterForCurrentThread(old_reporter_); + } +} + +// Increments the test part result count and remembers the result. +// This method is from the TestPartResultReporterInterface interface. +void ScopedFakeTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + result_->Append(result); +} + +namespace internal { + +// Returns the type ID of ::testing::Test. We should always call this +// instead of GetTypeId< ::testing::Test>() to get the type ID of +// testing::Test. This is to work around a suspected linker bug when +// using Google Test as a framework on Mac OS X. The bug causes +// GetTypeId< ::testing::Test>() to return different values depending +// on whether the call is from the Google Test framework itself or +// from user test code. GetTestTypeId() is guaranteed to always +// return the same value, as it always calls GetTypeId<>() from the +// gtest.cc, which is within the Google Test framework. +TypeId GetTestTypeId() { + return GetTypeId(); +} + +// The value of GetTestTypeId() as seen from within the Google Test +// library. This is solely for testing GetTestTypeId(). +extern const TypeId kTestTypeIdInGoogleTest = GetTestTypeId(); + +// This predicate-formatter checks that 'results' contains a test part +// failure of the given type and that the failure message contains the +// given substring. +AssertionResult HasOneFailure(const char* /* results_expr */, + const char* /* type_expr */, + const char* /* substr_expr */, + const TestPartResultArray& results, + TestPartResult::Type type, + const string& substr) { + const String expected(type == TestPartResult::kFatalFailure ? + "1 fatal failure" : + "1 non-fatal failure"); + Message msg; + if (results.size() != 1) { + msg << "Expected: " << expected << "\n" + << " Actual: " << results.size() << " failures"; + for (int i = 0; i < results.size(); i++) { + msg << "\n" << results.GetTestPartResult(i); + } + return AssertionFailure() << msg; + } + + const TestPartResult& r = results.GetTestPartResult(0); + if (r.type() != type) { + return AssertionFailure() << "Expected: " << expected << "\n" + << " Actual:\n" + << r; + } + + if (strstr(r.message(), substr.c_str()) == NULL) { + return AssertionFailure() << "Expected: " << expected << " containing \"" + << substr << "\"\n" + << " Actual:\n" + << r; + } + + return AssertionSuccess(); +} + +// The constructor of SingleFailureChecker remembers where to look up +// test part results, what type of failure we expect, and what +// substring the failure message should contain. +SingleFailureChecker:: SingleFailureChecker( + const TestPartResultArray* results, + TestPartResult::Type type, + const string& substr) + : results_(results), + type_(type), + substr_(substr) {} + +// The destructor of SingleFailureChecker verifies that the given +// TestPartResultArray contains exactly one failure that has the given +// type and contains the given substring. If that's not the case, a +// non-fatal failure will be generated. +SingleFailureChecker::~SingleFailureChecker() { + EXPECT_PRED_FORMAT3(HasOneFailure, *results_, type_, substr_); +} + +DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter( + UnitTestImpl* unit_test) : unit_test_(unit_test) {} + +void DefaultGlobalTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + unit_test_->current_test_result()->AddTestPartResult(result); + unit_test_->listeners()->repeater()->OnTestPartResult(result); +} + +DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter( + UnitTestImpl* unit_test) : unit_test_(unit_test) {} + +void DefaultPerThreadTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + unit_test_->GetGlobalTestPartResultReporter()->ReportTestPartResult(result); +} + +// Returns the global test part result reporter. +TestPartResultReporterInterface* +UnitTestImpl::GetGlobalTestPartResultReporter() { + internal::MutexLock lock(&global_test_part_result_reporter_mutex_); + return global_test_part_result_repoter_; +} + +// Sets the global test part result reporter. +void UnitTestImpl::SetGlobalTestPartResultReporter( + TestPartResultReporterInterface* reporter) { + internal::MutexLock lock(&global_test_part_result_reporter_mutex_); + global_test_part_result_repoter_ = reporter; +} + +// Returns the test part result reporter for the current thread. +TestPartResultReporterInterface* +UnitTestImpl::GetTestPartResultReporterForCurrentThread() { + return per_thread_test_part_result_reporter_.get(); +} + +// Sets the test part result reporter for the current thread. +void UnitTestImpl::SetTestPartResultReporterForCurrentThread( + TestPartResultReporterInterface* reporter) { + per_thread_test_part_result_reporter_.set(reporter); +} + +// Gets the number of successful test cases. +int UnitTestImpl::successful_test_case_count() const { + return CountIf(test_cases_, TestCasePassed); +} + +// Gets the number of failed test cases. +int UnitTestImpl::failed_test_case_count() const { + return CountIf(test_cases_, TestCaseFailed); +} + +// Gets the number of all test cases. +int UnitTestImpl::total_test_case_count() const { + return static_cast(test_cases_.size()); +} + +// Gets the number of all test cases that contain at least one test +// that should run. +int UnitTestImpl::test_case_to_run_count() const { + return CountIf(test_cases_, ShouldRunTestCase); +} + +// Gets the number of successful tests. +int UnitTestImpl::successful_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::successful_test_count); +} + +// Gets the number of failed tests. +int UnitTestImpl::failed_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::failed_test_count); +} + +// Gets the number of disabled tests. +int UnitTestImpl::disabled_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::disabled_test_count); +} + +// Gets the number of all tests. +int UnitTestImpl::total_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::total_test_count); +} + +// Gets the number of tests that should run. +int UnitTestImpl::test_to_run_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::test_to_run_count); +} + +// Returns the current OS stack trace as a String. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// CurrentOsStackTraceExceptTop(1), Foo() will be included in the +// trace but Bar() and CurrentOsStackTraceExceptTop() won't. +String UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) { + (void)skip_count; + return String(""); +} + +// Returns the current time in milliseconds. +TimeInMillis GetTimeInMillis() { +#if GTEST_OS_WINDOWS_MOBILE || defined(__BORLANDC__) + // Difference between 1970-01-01 and 1601-01-01 in milliseconds. + // http://analogous.blogspot.com/2005/04/epoch.html + const TimeInMillis kJavaEpochToWinFileTimeDelta = + static_cast(116444736UL) * 100000UL; + const DWORD kTenthMicrosInMilliSecond = 10000; + + SYSTEMTIME now_systime; + FILETIME now_filetime; + ULARGE_INTEGER now_int64; + // TODO(kenton@google.com): Shouldn't this just use + // GetSystemTimeAsFileTime()? + GetSystemTime(&now_systime); + if (SystemTimeToFileTime(&now_systime, &now_filetime)) { + now_int64.LowPart = now_filetime.dwLowDateTime; + now_int64.HighPart = now_filetime.dwHighDateTime; + now_int64.QuadPart = (now_int64.QuadPart / kTenthMicrosInMilliSecond) - + kJavaEpochToWinFileTimeDelta; + return now_int64.QuadPart; + } + return 0; +#elif GTEST_OS_WINDOWS && !GTEST_HAS_GETTIMEOFDAY_ + __timeb64 now; + +# ifdef _MSC_VER + + // MSVC 8 deprecates _ftime64(), so we want to suppress warning 4996 + // (deprecated function) there. + // TODO(kenton@google.com): Use GetTickCount()? Or use + // SystemTimeToFileTime() +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4996) // Temporarily disables warning 4996. + _ftime64(&now); +# pragma warning(pop) // Restores the warning state. +# else + + _ftime64(&now); + +# endif // _MSC_VER + + return static_cast(now.time) * 1000 + now.millitm; +#elif GTEST_HAS_GETTIMEOFDAY_ + struct timeval now; + gettimeofday(&now, NULL); + return static_cast(now.tv_sec) * 1000 + now.tv_usec / 1000; +#else +# error "Don't know how to get the current time on your system." +#endif +} + +// Utilities + +// class String + +// Returns the input enclosed in double quotes if it's not NULL; +// otherwise returns "(null)". For example, "\"Hello\"" is returned +// for input "Hello". +// +// This is useful for printing a C string in the syntax of a literal. +// +// Known issue: escape sequences are not handled yet. +String String::ShowCStringQuoted(const char* c_str) { + return c_str ? String::Format("\"%s\"", c_str) : String("(null)"); +} + +// Copies at most length characters from str into a newly-allocated +// piece of memory of size length+1. The memory is allocated with new[]. +// A terminating null byte is written to the memory, and a pointer to it +// is returned. If str is NULL, NULL is returned. +static char* CloneString(const char* str, size_t length) { + if (str == NULL) { + return NULL; + } else { + char* const clone = new char[length + 1]; + posix::StrNCpy(clone, str, length); + clone[length] = '\0'; + return clone; + } +} + +// Clones a 0-terminated C string, allocating memory using new. The +// caller is responsible for deleting[] the return value. Returns the +// cloned string, or NULL if the input is NULL. +const char * String::CloneCString(const char* c_str) { + return (c_str == NULL) ? + NULL : CloneString(c_str, strlen(c_str)); +} + +#if GTEST_OS_WINDOWS_MOBILE +// Creates a UTF-16 wide string from the given ANSI string, allocating +// memory using new. The caller is responsible for deleting the return +// value using delete[]. Returns the wide string, or NULL if the +// input is NULL. +LPCWSTR String::AnsiToUtf16(const char* ansi) { + if (!ansi) return NULL; + const int length = strlen(ansi); + const int unicode_length = + MultiByteToWideChar(CP_ACP, 0, ansi, length, + NULL, 0); + WCHAR* unicode = new WCHAR[unicode_length + 1]; + MultiByteToWideChar(CP_ACP, 0, ansi, length, + unicode, unicode_length); + unicode[unicode_length] = 0; + return unicode; +} + +// Creates an ANSI string from the given wide string, allocating +// memory using new. The caller is responsible for deleting the return +// value using delete[]. Returns the ANSI string, or NULL if the +// input is NULL. +const char* String::Utf16ToAnsi(LPCWSTR utf16_str) { + if (!utf16_str) return NULL; + const int ansi_length = + WideCharToMultiByte(CP_ACP, 0, utf16_str, -1, + NULL, 0, NULL, NULL); + char* ansi = new char[ansi_length + 1]; + WideCharToMultiByte(CP_ACP, 0, utf16_str, -1, + ansi, ansi_length, NULL, NULL); + ansi[ansi_length] = 0; + return ansi; +} + +#endif // GTEST_OS_WINDOWS_MOBILE + +// Compares two C strings. Returns true iff they have the same content. +// +// Unlike strcmp(), this function can handle NULL argument(s). A NULL +// C string is considered different to any non-NULL C string, +// including the empty string. +bool String::CStringEquals(const char * lhs, const char * rhs) { + if ( lhs == NULL ) return rhs == NULL; + + if ( rhs == NULL ) return false; + + return strcmp(lhs, rhs) == 0; +} + +#if GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING + +// Converts an array of wide chars to a narrow string using the UTF-8 +// encoding, and streams the result to the given Message object. +static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length, + Message* msg) { + // TODO(wan): consider allowing a testing::String object to + // contain '\0'. This will make it behave more like std::string, + // and will allow ToUtf8String() to return the correct encoding + // for '\0' s.t. we can get rid of the conditional here (and in + // several other places). + for (size_t i = 0; i != length; ) { // NOLINT + if (wstr[i] != L'\0') { + *msg << WideStringToUtf8(wstr + i, static_cast(length - i)); + while (i != length && wstr[i] != L'\0') + i++; + } else { + *msg << '\0'; + i++; + } + } +} + +#endif // GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING + +} // namespace internal + +#if GTEST_HAS_STD_WSTRING +// Converts the given wide string to a narrow string using the UTF-8 +// encoding, and streams the result to this Message object. +Message& Message::operator <<(const ::std::wstring& wstr) { + internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this); + return *this; +} +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_GLOBAL_WSTRING +// Converts the given wide string to a narrow string using the UTF-8 +// encoding, and streams the result to this Message object. +Message& Message::operator <<(const ::wstring& wstr) { + internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this); + return *this; +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +// AssertionResult constructors. +// Used in EXPECT_TRUE/FALSE(assertion_result). +AssertionResult::AssertionResult(const AssertionResult& other) + : success_(other.success_), + message_(other.message_.get() != NULL ? + new ::std::string(*other.message_) : + static_cast< ::std::string*>(NULL)) { +} + +// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. +AssertionResult AssertionResult::operator!() const { + AssertionResult negation(!success_); + if (message_.get() != NULL) + negation << *message_; + return negation; +} + +// Makes a successful assertion result. +AssertionResult AssertionSuccess() { + return AssertionResult(true); +} + +// Makes a failed assertion result. +AssertionResult AssertionFailure() { + return AssertionResult(false); +} + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << message. +AssertionResult AssertionFailure(const Message& message) { + return AssertionFailure() << message; +} + +namespace internal { + +// Constructs and returns the message for an equality assertion +// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure. +// +// The first four parameters are the expressions used in the assertion +// and their values, as strings. For example, for ASSERT_EQ(foo, bar) +// where foo is 5 and bar is 6, we have: +// +// expected_expression: "foo" +// actual_expression: "bar" +// expected_value: "5" +// actual_value: "6" +// +// The ignoring_case parameter is true iff the assertion is a +// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will +// be inserted into the message. +AssertionResult EqFailure(const char* expected_expression, + const char* actual_expression, + const String& expected_value, + const String& actual_value, + bool ignoring_case) { + Message msg; + msg << "Value of: " << actual_expression; + if (actual_value != actual_expression) { + msg << "\n Actual: " << actual_value; + } + + msg << "\nExpected: " << expected_expression; + if (ignoring_case) { + msg << " (ignoring case)"; + } + if (expected_value != expected_expression) { + msg << "\nWhich is: " << expected_value; + } + + return AssertionFailure() << msg; +} + +// Constructs a failure message for Boolean assertions such as EXPECT_TRUE. +String GetBoolAssertionFailureMessage(const AssertionResult& assertion_result, + const char* expression_text, + const char* actual_predicate_value, + const char* expected_predicate_value) { + const char* actual_message = assertion_result.message(); + Message msg; + msg << "Value of: " << expression_text + << "\n Actual: " << actual_predicate_value; + if (actual_message[0] != '\0') + msg << " (" << actual_message << ")"; + msg << "\nExpected: " << expected_predicate_value; + return msg.GetString(); +} + +// Helper function for implementing ASSERT_NEAR. +AssertionResult DoubleNearPredFormat(const char* expr1, + const char* expr2, + const char* abs_error_expr, + double val1, + double val2, + double abs_error) { + const double diff = fabs(val1 - val2); + if (diff <= abs_error) return AssertionSuccess(); + + // TODO(wan): do not print the value of an expression if it's + // already a literal. + return AssertionFailure() + << "The difference between " << expr1 << " and " << expr2 + << " is " << diff << ", which exceeds " << abs_error_expr << ", where\n" + << expr1 << " evaluates to " << val1 << ",\n" + << expr2 << " evaluates to " << val2 << ", and\n" + << abs_error_expr << " evaluates to " << abs_error << "."; +} + + +// Helper template for implementing FloatLE() and DoubleLE(). +template +AssertionResult FloatingPointLE(const char* expr1, + const char* expr2, + RawType val1, + RawType val2) { + // Returns success if val1 is less than val2, + if (val1 < val2) { + return AssertionSuccess(); + } + + // or if val1 is almost equal to val2. + const FloatingPoint lhs(val1), rhs(val2); + if (lhs.AlmostEquals(rhs)) { + return AssertionSuccess(); + } + + // Note that the above two checks will both fail if either val1 or + // val2 is NaN, as the IEEE floating-point standard requires that + // any predicate involving a NaN must return false. + + ::std::stringstream val1_ss; + val1_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << val1; + + ::std::stringstream val2_ss; + val2_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << val2; + + return AssertionFailure() + << "Expected: (" << expr1 << ") <= (" << expr2 << ")\n" + << " Actual: " << StringStreamToString(&val1_ss) << " vs " + << StringStreamToString(&val2_ss); +} + +} // namespace internal + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +AssertionResult FloatLE(const char* expr1, const char* expr2, + float val1, float val2) { + return internal::FloatingPointLE(expr1, expr2, val1, val2); +} + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +AssertionResult DoubleLE(const char* expr1, const char* expr2, + double val1, double val2) { + return internal::FloatingPointLE(expr1, expr2, val1, val2); +} + +namespace internal { + +// The helper function for {ASSERT|EXPECT}_EQ with int or enum +// arguments. +AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual) { + if (expected == actual) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + FormatForComparisonFailureMessage(expected, actual), + FormatForComparisonFailureMessage(actual, expected), + false); +} + +// A macro for implementing the helper functions needed to implement +// ASSERT_?? and EXPECT_?? with integer or enum arguments. It is here +// just to avoid copy-and-paste of similar code. +#define GTEST_IMPL_CMP_HELPER_(op_name, op)\ +AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \ + BiggestInt val1, BiggestInt val2) {\ + if (val1 op val2) {\ + return AssertionSuccess();\ + } else {\ + return AssertionFailure() \ + << "Expected: (" << expr1 << ") " #op " (" << expr2\ + << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\ + << " vs " << FormatForComparisonFailureMessage(val2, val1);\ + }\ +} + +// Implements the helper function for {ASSERT|EXPECT}_NE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(NE, !=) +// Implements the helper function for {ASSERT|EXPECT}_LE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(LE, <=) +// Implements the helper function for {ASSERT|EXPECT}_LT with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(LT, < ) +// Implements the helper function for {ASSERT|EXPECT}_GE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(GE, >=) +// Implements the helper function for {ASSERT|EXPECT}_GT with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(GT, > ) + +#undef GTEST_IMPL_CMP_HELPER_ + +// The helper function for {ASSERT|EXPECT}_STREQ. +AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual) { + if (String::CStringEquals(expected, actual)) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + String::ShowCStringQuoted(expected), + String::ShowCStringQuoted(actual), + false); +} + +// The helper function for {ASSERT|EXPECT}_STRCASEEQ. +AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual) { + if (String::CaseInsensitiveCStringEquals(expected, actual)) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + String::ShowCStringQuoted(expected), + String::ShowCStringQuoted(actual), + true); +} + +// The helper function for {ASSERT|EXPECT}_STRNE. +AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2) { + if (!String::CStringEquals(s1, s2)) { + return AssertionSuccess(); + } else { + return AssertionFailure() << "Expected: (" << s1_expression << ") != (" + << s2_expression << "), actual: \"" + << s1 << "\" vs \"" << s2 << "\""; + } +} + +// The helper function for {ASSERT|EXPECT}_STRCASENE. +AssertionResult CmpHelperSTRCASENE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2) { + if (!String::CaseInsensitiveCStringEquals(s1, s2)) { + return AssertionSuccess(); + } else { + return AssertionFailure() + << "Expected: (" << s1_expression << ") != (" + << s2_expression << ") (ignoring case), actual: \"" + << s1 << "\" vs \"" << s2 << "\""; + } +} + +} // namespace internal + +namespace { + +// Helper functions for implementing IsSubString() and IsNotSubstring(). + +// This group of overloaded functions return true iff needle is a +// substring of haystack. NULL is considered a substring of itself +// only. + +bool IsSubstringPred(const char* needle, const char* haystack) { + if (needle == NULL || haystack == NULL) + return needle == haystack; + + return strstr(haystack, needle) != NULL; +} + +bool IsSubstringPred(const wchar_t* needle, const wchar_t* haystack) { + if (needle == NULL || haystack == NULL) + return needle == haystack; + + return wcsstr(haystack, needle) != NULL; +} + +// StringType here can be either ::std::string or ::std::wstring. +template +bool IsSubstringPred(const StringType& needle, + const StringType& haystack) { + return haystack.find(needle) != StringType::npos; +} + +// This function implements either IsSubstring() or IsNotSubstring(), +// depending on the value of the expected_to_be_substring parameter. +// StringType here can be const char*, const wchar_t*, ::std::string, +// or ::std::wstring. +template +AssertionResult IsSubstringImpl( + bool expected_to_be_substring, + const char* needle_expr, const char* haystack_expr, + const StringType& needle, const StringType& haystack) { + if (IsSubstringPred(needle, haystack) == expected_to_be_substring) + return AssertionSuccess(); + + const bool is_wide_string = sizeof(needle[0]) > 1; + const char* const begin_string_quote = is_wide_string ? "L\"" : "\""; + return AssertionFailure() + << "Value of: " << needle_expr << "\n" + << " Actual: " << begin_string_quote << needle << "\"\n" + << "Expected: " << (expected_to_be_substring ? "" : "not ") + << "a substring of " << haystack_expr << "\n" + << "Which is: " << begin_string_quote << haystack << "\""; +} + +} // namespace + +// IsSubstring() and IsNotSubstring() check whether needle is a +// substring of haystack (NULL is considered a substring of itself +// only), and return an appropriate error message when they fail. + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +#if GTEST_HAS_STD_WSTRING +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} +#endif // GTEST_HAS_STD_WSTRING + +namespace internal { + +#if GTEST_OS_WINDOWS + +namespace { + +// Helper function for IsHRESULT{SuccessFailure} predicates +AssertionResult HRESULTFailureHelper(const char* expr, + const char* expected, + long hr) { // NOLINT +# if GTEST_OS_WINDOWS_MOBILE + + // Windows CE doesn't support FormatMessage. + const char error_text[] = ""; + +# else + + // Looks up the human-readable system message for the HRESULT code + // and since we're not passing any params to FormatMessage, we don't + // want inserts expanded. + const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS; + const DWORD kBufSize = 4096; // String::Format can't exceed this length. + // Gets the system's human readable message string for this HRESULT. + char error_text[kBufSize] = { '\0' }; + DWORD message_length = ::FormatMessageA(kFlags, + 0, // no source, we're asking system + hr, // the error + 0, // no line width restrictions + error_text, // output buffer + kBufSize, // buf size + NULL); // no arguments for inserts + // Trims tailing white space (FormatMessage leaves a trailing cr-lf) + for (; message_length && IsSpace(error_text[message_length - 1]); + --message_length) { + error_text[message_length - 1] = '\0'; + } + +# endif // GTEST_OS_WINDOWS_MOBILE + + const String error_hex(String::Format("0x%08X ", hr)); + return ::testing::AssertionFailure() + << "Expected: " << expr << " " << expected << ".\n" + << " Actual: " << error_hex << error_text << "\n"; +} + +} // namespace + +AssertionResult IsHRESULTSuccess(const char* expr, long hr) { // NOLINT + if (SUCCEEDED(hr)) { + return AssertionSuccess(); + } + return HRESULTFailureHelper(expr, "succeeds", hr); +} + +AssertionResult IsHRESULTFailure(const char* expr, long hr) { // NOLINT + if (FAILED(hr)) { + return AssertionSuccess(); + } + return HRESULTFailureHelper(expr, "fails", hr); +} + +#endif // GTEST_OS_WINDOWS + +// Utility functions for encoding Unicode text (wide strings) in +// UTF-8. + +// A Unicode code-point can have upto 21 bits, and is encoded in UTF-8 +// like this: +// +// Code-point length Encoding +// 0 - 7 bits 0xxxxxxx +// 8 - 11 bits 110xxxxx 10xxxxxx +// 12 - 16 bits 1110xxxx 10xxxxxx 10xxxxxx +// 17 - 21 bits 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + +// The maximum code-point a one-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint1 = (static_cast(1) << 7) - 1; + +// The maximum code-point a two-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint2 = (static_cast(1) << (5 + 6)) - 1; + +// The maximum code-point a three-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint3 = (static_cast(1) << (4 + 2*6)) - 1; + +// The maximum code-point a four-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint4 = (static_cast(1) << (3 + 3*6)) - 1; + +// Chops off the n lowest bits from a bit pattern. Returns the n +// lowest bits. As a side effect, the original bit pattern will be +// shifted to the right by n bits. +inline UInt32 ChopLowBits(UInt32* bits, int n) { + const UInt32 low_bits = *bits & ((static_cast(1) << n) - 1); + *bits >>= n; + return low_bits; +} + +// Converts a Unicode code point to a narrow string in UTF-8 encoding. +// code_point parameter is of type UInt32 because wchar_t may not be +// wide enough to contain a code point. +// The output buffer str must containt at least 32 characters. +// The function returns the address of the output buffer. +// If the code_point is not a valid Unicode code point +// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. +char* CodePointToUtf8(UInt32 code_point, char* str) { + if (code_point <= kMaxCodePoint1) { + str[1] = '\0'; + str[0] = static_cast(code_point); // 0xxxxxxx + } else if (code_point <= kMaxCodePoint2) { + str[2] = '\0'; + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xC0 | code_point); // 110xxxxx + } else if (code_point <= kMaxCodePoint3) { + str[3] = '\0'; + str[2] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xE0 | code_point); // 1110xxxx + } else if (code_point <= kMaxCodePoint4) { + str[4] = '\0'; + str[3] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[2] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xF0 | code_point); // 11110xxx + } else { + // The longest string String::Format can produce when invoked + // with these parameters is 28 character long (not including + // the terminating nul character). We are asking for 32 character + // buffer just in case. This is also enough for strncpy to + // null-terminate the destination string. + posix::StrNCpy( + str, String::Format("(Invalid Unicode 0x%X)", code_point).c_str(), 32); + str[31] = '\0'; // Makes sure no change in the format to strncpy leaves + // the result unterminated. + } + return str; +} + +// The following two functions only make sense if the the system +// uses UTF-16 for wide string encoding. All supported systems +// with 16 bit wchar_t (Windows, Cygwin, Symbian OS) do use UTF-16. + +// Determines if the arguments constitute UTF-16 surrogate pair +// and thus should be combined into a single Unicode code point +// using CreateCodePointFromUtf16SurrogatePair. +inline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) { + return sizeof(wchar_t) == 2 && + (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00; +} + +// Creates a Unicode code point from UTF16 surrogate pair. +inline UInt32 CreateCodePointFromUtf16SurrogatePair(wchar_t first, + wchar_t second) { + const UInt32 mask = (1 << 10) - 1; + return (sizeof(wchar_t) == 2) ? + (((first & mask) << 10) | (second & mask)) + 0x10000 : + // This function should not be called when the condition is + // false, but we provide a sensible default in case it is. + static_cast(first); +} + +// Converts a wide string to a narrow string in UTF-8 encoding. +// The wide string is assumed to have the following encoding: +// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS) +// UTF-32 if sizeof(wchar_t) == 4 (on Linux) +// Parameter str points to a null-terminated wide string. +// Parameter num_chars may additionally limit the number +// of wchar_t characters processed. -1 is used when the entire string +// should be processed. +// If the string contains code points that are not valid Unicode code points +// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding +// and contains invalid UTF-16 surrogate pairs, values in those pairs +// will be encoded as individual Unicode characters from Basic Normal Plane. +String WideStringToUtf8(const wchar_t* str, int num_chars) { + if (num_chars == -1) + num_chars = static_cast(wcslen(str)); + + ::std::stringstream stream; + for (int i = 0; i < num_chars; ++i) { + UInt32 unicode_code_point; + + if (str[i] == L'\0') { + break; + } else if (i + 1 < num_chars && IsUtf16SurrogatePair(str[i], str[i + 1])) { + unicode_code_point = CreateCodePointFromUtf16SurrogatePair(str[i], + str[i + 1]); + i++; + } else { + unicode_code_point = static_cast(str[i]); + } + + char buffer[32]; // CodePointToUtf8 requires a buffer this big. + stream << CodePointToUtf8(unicode_code_point, buffer); + } + return StringStreamToString(&stream); +} + +// Converts a wide C string to a String using the UTF-8 encoding. +// NULL will be converted to "(null)". +String String::ShowWideCString(const wchar_t * wide_c_str) { + if (wide_c_str == NULL) return String("(null)"); + + return String(internal::WideStringToUtf8(wide_c_str, -1).c_str()); +} + +// Similar to ShowWideCString(), except that this function encloses +// the converted string in double quotes. +String String::ShowWideCStringQuoted(const wchar_t* wide_c_str) { + if (wide_c_str == NULL) return String("(null)"); + + return String::Format("L\"%s\"", + String::ShowWideCString(wide_c_str).c_str()); +} + +// Compares two wide C strings. Returns true iff they have the same +// content. +// +// Unlike wcscmp(), this function can handle NULL argument(s). A NULL +// C string is considered different to any non-NULL C string, +// including the empty string. +bool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) { + if (lhs == NULL) return rhs == NULL; + + if (rhs == NULL) return false; + + return wcscmp(lhs, rhs) == 0; +} + +// Helper function for *_STREQ on wide strings. +AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const wchar_t* expected, + const wchar_t* actual) { + if (String::WideCStringEquals(expected, actual)) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + String::ShowWideCStringQuoted(expected), + String::ShowWideCStringQuoted(actual), + false); +} + +// Helper function for *_STRNE on wide strings. +AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const wchar_t* s1, + const wchar_t* s2) { + if (!String::WideCStringEquals(s1, s2)) { + return AssertionSuccess(); + } + + return AssertionFailure() << "Expected: (" << s1_expression << ") != (" + << s2_expression << "), actual: " + << String::ShowWideCStringQuoted(s1) + << " vs " << String::ShowWideCStringQuoted(s2); +} + +// Compares two C strings, ignoring case. Returns true iff they have +// the same content. +// +// Unlike strcasecmp(), this function can handle NULL argument(s). A +// NULL C string is considered different to any non-NULL C string, +// including the empty string. +bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) { + if (lhs == NULL) + return rhs == NULL; + if (rhs == NULL) + return false; + return posix::StrCaseCmp(lhs, rhs) == 0; +} + + // Compares two wide C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike wcscasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL wide C string, + // including the empty string. + // NB: The implementations on different platforms slightly differ. + // On windows, this method uses _wcsicmp which compares according to LC_CTYPE + // environment variable. On GNU platform this method uses wcscasecmp + // which compares according to LC_CTYPE category of the current locale. + // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the + // current locale. +bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs, + const wchar_t* rhs) { + if (lhs == NULL) return rhs == NULL; + + if (rhs == NULL) return false; + +#if GTEST_OS_WINDOWS + return _wcsicmp(lhs, rhs) == 0; +#elif GTEST_OS_LINUX && !GTEST_OS_LINUX_ANDROID + return wcscasecmp(lhs, rhs) == 0; +#else + // Android, Mac OS X and Cygwin don't define wcscasecmp. + // Other unknown OSes may not define it either. + wint_t left, right; + do { + left = towlower(*lhs++); + right = towlower(*rhs++); + } while (left && left == right); + return left == right; +#endif // OS selector +} + +// Compares this with another String. +// Returns < 0 if this is less than rhs, 0 if this is equal to rhs, or > 0 +// if this is greater than rhs. +int String::Compare(const String & rhs) const { + const char* const lhs_c_str = c_str(); + const char* const rhs_c_str = rhs.c_str(); + + if (lhs_c_str == NULL) { + return rhs_c_str == NULL ? 0 : -1; // NULL < anything except NULL + } else if (rhs_c_str == NULL) { + return 1; + } + + const size_t shorter_str_len = + length() <= rhs.length() ? length() : rhs.length(); + for (size_t i = 0; i != shorter_str_len; i++) { + if (lhs_c_str[i] < rhs_c_str[i]) { + return -1; + } else if (lhs_c_str[i] > rhs_c_str[i]) { + return 1; + } + } + return (length() < rhs.length()) ? -1 : + (length() > rhs.length()) ? 1 : 0; +} + +// Returns true iff this String ends with the given suffix. *Any* +// String is considered to end with a NULL or empty suffix. +bool String::EndsWith(const char* suffix) const { + if (suffix == NULL || CStringEquals(suffix, "")) return true; + + if (c_str() == NULL) return false; + + const size_t this_len = strlen(c_str()); + const size_t suffix_len = strlen(suffix); + return (this_len >= suffix_len) && + CStringEquals(c_str() + this_len - suffix_len, suffix); +} + +// Returns true iff this String ends with the given suffix, ignoring case. +// Any String is considered to end with a NULL or empty suffix. +bool String::EndsWithCaseInsensitive(const char* suffix) const { + if (suffix == NULL || CStringEquals(suffix, "")) return true; + + if (c_str() == NULL) return false; + + const size_t this_len = strlen(c_str()); + const size_t suffix_len = strlen(suffix); + return (this_len >= suffix_len) && + CaseInsensitiveCStringEquals(c_str() + this_len - suffix_len, suffix); +} + +// Formats a list of arguments to a String, using the same format +// spec string as for printf. +// +// We do not use the StringPrintf class as it is not universally +// available. +// +// The result is limited to 4096 characters (including the tailing 0). +// If 4096 characters are not enough to format the input, or if +// there's an error, "" is +// returned. +String String::Format(const char * format, ...) { + va_list args; + va_start(args, format); + + char buffer[4096]; + const int kBufferSize = sizeof(buffer)/sizeof(buffer[0]); + + // MSVC 8 deprecates vsnprintf(), so we want to suppress warning + // 4996 (deprecated function) there. +#ifdef _MSC_VER // We are using MSVC. +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4996) // Temporarily disables warning 4996. + + const int size = vsnprintf(buffer, kBufferSize, format, args); + +# pragma warning(pop) // Restores the warning state. +#else // We are not using MSVC. + const int size = vsnprintf(buffer, kBufferSize, format, args); +#endif // _MSC_VER + va_end(args); + + // vsnprintf()'s behavior is not portable. When the buffer is not + // big enough, it returns a negative value in MSVC, and returns the + // needed buffer size on Linux. When there is an output error, it + // always returns a negative value. For simplicity, we lump the two + // error cases together. + if (size < 0 || size >= kBufferSize) { + return String(""); + } else { + return String(buffer, size); + } +} + +// Converts the buffer in a stringstream to a String, converting NUL +// bytes to "\\0" along the way. +String StringStreamToString(::std::stringstream* ss) { + const ::std::string& str = ss->str(); + const char* const start = str.c_str(); + const char* const end = start + str.length(); + + // We need to use a helper stringstream to do this transformation + // because String doesn't support push_back(). + ::std::stringstream helper; + for (const char* ch = start; ch != end; ++ch) { + if (*ch == '\0') { + helper << "\\0"; // Replaces NUL with "\\0"; + } else { + helper.put(*ch); + } + } + + return String(helper.str().c_str()); +} + +// Appends the user-supplied message to the Google-Test-generated message. +String AppendUserMessage(const String& gtest_msg, + const Message& user_msg) { + // Appends the user message if it's non-empty. + const String user_msg_string = user_msg.GetString(); + if (user_msg_string.empty()) { + return gtest_msg; + } + + Message msg; + msg << gtest_msg << "\n" << user_msg_string; + + return msg.GetString(); +} + +} // namespace internal + +// class TestResult + +// Creates an empty TestResult. +TestResult::TestResult() + : death_test_count_(0), + elapsed_time_(0) { +} + +// D'tor. +TestResult::~TestResult() { +} + +// Returns the i-th test part result among all the results. i can +// range from 0 to total_part_count() - 1. If i is not in that range, +// aborts the program. +const TestPartResult& TestResult::GetTestPartResult(int i) const { + if (i < 0 || i >= total_part_count()) + internal::posix::Abort(); + return test_part_results_.at(i); +} + +// Returns the i-th test property. i can range from 0 to +// test_property_count() - 1. If i is not in that range, aborts the +// program. +const TestProperty& TestResult::GetTestProperty(int i) const { + if (i < 0 || i >= test_property_count()) + internal::posix::Abort(); + return test_properties_.at(i); +} + +// Clears the test part results. +void TestResult::ClearTestPartResults() { + test_part_results_.clear(); +} + +// Adds a test part result to the list. +void TestResult::AddTestPartResult(const TestPartResult& test_part_result) { + test_part_results_.push_back(test_part_result); +} + +// Adds a test property to the list. If a property with the same key as the +// supplied property is already represented, the value of this test_property +// replaces the old value for that key. +void TestResult::RecordProperty(const TestProperty& test_property) { + if (!ValidateTestProperty(test_property)) { + return; + } + internal::MutexLock lock(&test_properites_mutex_); + const std::vector::iterator property_with_matching_key = + std::find_if(test_properties_.begin(), test_properties_.end(), + internal::TestPropertyKeyIs(test_property.key())); + if (property_with_matching_key == test_properties_.end()) { + test_properties_.push_back(test_property); + return; + } + property_with_matching_key->SetValue(test_property.value()); +} + +// Adds a failure if the key is a reserved attribute of Google Test +// testcase tags. Returns true if the property is valid. +bool TestResult::ValidateTestProperty(const TestProperty& test_property) { + internal::String key(test_property.key()); + if (key == "name" || key == "status" || key == "time" || key == "classname") { + ADD_FAILURE() + << "Reserved key used in RecordProperty(): " + << key + << " ('name', 'status', 'time', and 'classname' are reserved by " + << GTEST_NAME_ << ")"; + return false; + } + return true; +} + +// Clears the object. +void TestResult::Clear() { + test_part_results_.clear(); + test_properties_.clear(); + death_test_count_ = 0; + elapsed_time_ = 0; +} + +// Returns true iff the test failed. +bool TestResult::Failed() const { + for (int i = 0; i < total_part_count(); ++i) { + if (GetTestPartResult(i).failed()) + return true; + } + return false; +} + +// Returns true iff the test part fatally failed. +static bool TestPartFatallyFailed(const TestPartResult& result) { + return result.fatally_failed(); +} + +// Returns true iff the test fatally failed. +bool TestResult::HasFatalFailure() const { + return CountIf(test_part_results_, TestPartFatallyFailed) > 0; +} + +// Returns true iff the test part non-fatally failed. +static bool TestPartNonfatallyFailed(const TestPartResult& result) { + return result.nonfatally_failed(); +} + +// Returns true iff the test has a non-fatal failure. +bool TestResult::HasNonfatalFailure() const { + return CountIf(test_part_results_, TestPartNonfatallyFailed) > 0; +} + +// Gets the number of all test parts. This is the sum of the number +// of successful test parts and the number of failed test parts. +int TestResult::total_part_count() const { + return static_cast(test_part_results_.size()); +} + +// Returns the number of the test properties. +int TestResult::test_property_count() const { + return static_cast(test_properties_.size()); +} + +// class Test + +// Creates a Test object. + +// The c'tor saves the values of all Google Test flags. +Test::Test() + : gtest_flag_saver_(new internal::GTestFlagSaver) { +} + +// The d'tor restores the values of all Google Test flags. +Test::~Test() { + delete gtest_flag_saver_; +} + +// Sets up the test fixture. +// +// A sub-class may override this. +void Test::SetUp() { +} + +// Tears down the test fixture. +// +// A sub-class may override this. +void Test::TearDown() { +} + +// Allows user supplied key value pairs to be recorded for later output. +void Test::RecordProperty(const char* key, const char* value) { + UnitTest::GetInstance()->RecordPropertyForCurrentTest(key, value); +} + +// Allows user supplied key value pairs to be recorded for later output. +void Test::RecordProperty(const char* key, int value) { + Message value_message; + value_message << value; + RecordProperty(key, value_message.GetString().c_str()); +} + +namespace internal { + +void ReportFailureInUnknownLocation(TestPartResult::Type result_type, + const String& message) { + // This function is a friend of UnitTest and as such has access to + // AddTestPartResult. + UnitTest::GetInstance()->AddTestPartResult( + result_type, + NULL, // No info about the source file where the exception occurred. + -1, // We have no info on which line caused the exception. + message, + String()); // No stack trace, either. +} + +} // namespace internal + +// Google Test requires all tests in the same test case to use the same test +// fixture class. This function checks if the current test has the +// same fixture class as the first test in the current test case. If +// yes, it returns true; otherwise it generates a Google Test failure and +// returns false. +bool Test::HasSameFixtureClass() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + const TestCase* const test_case = impl->current_test_case(); + + // Info about the first test in the current test case. + const TestInfo* const first_test_info = test_case->test_info_list()[0]; + const internal::TypeId first_fixture_id = first_test_info->fixture_class_id_; + const char* const first_test_name = first_test_info->name(); + + // Info about the current test. + const TestInfo* const this_test_info = impl->current_test_info(); + const internal::TypeId this_fixture_id = this_test_info->fixture_class_id_; + const char* const this_test_name = this_test_info->name(); + + if (this_fixture_id != first_fixture_id) { + // Is the first test defined using TEST? + const bool first_is_TEST = first_fixture_id == internal::GetTestTypeId(); + // Is this test defined using TEST? + const bool this_is_TEST = this_fixture_id == internal::GetTestTypeId(); + + if (first_is_TEST || this_is_TEST) { + // The user mixed TEST and TEST_F in this test case - we'll tell + // him/her how to fix it. + + // Gets the name of the TEST and the name of the TEST_F. Note + // that first_is_TEST and this_is_TEST cannot both be true, as + // the fixture IDs are different for the two tests. + const char* const TEST_name = + first_is_TEST ? first_test_name : this_test_name; + const char* const TEST_F_name = + first_is_TEST ? this_test_name : first_test_name; + + ADD_FAILURE() + << "All tests in the same test case must use the same test fixture\n" + << "class, so mixing TEST_F and TEST in the same test case is\n" + << "illegal. In test case " << this_test_info->test_case_name() + << ",\n" + << "test " << TEST_F_name << " is defined using TEST_F but\n" + << "test " << TEST_name << " is defined using TEST. You probably\n" + << "want to change the TEST to TEST_F or move it to another test\n" + << "case."; + } else { + // The user defined two fixture classes with the same name in + // two namespaces - we'll tell him/her how to fix it. + ADD_FAILURE() + << "All tests in the same test case must use the same test fixture\n" + << "class. However, in test case " + << this_test_info->test_case_name() << ",\n" + << "you defined test " << first_test_name + << " and test " << this_test_name << "\n" + << "using two different test fixture classes. This can happen if\n" + << "the two classes are from different namespaces or translation\n" + << "units and have the same name. You should probably rename one\n" + << "of the classes to put the tests into different test cases."; + } + return false; + } + + return true; +} + +#if GTEST_HAS_SEH + +// Adds an "exception thrown" fatal failure to the current test. This +// function returns its result via an output parameter pointer because VC++ +// prohibits creation of objects with destructors on stack in functions +// using __try (see error C2712). +static internal::String* FormatSehExceptionMessage(DWORD exception_code, + const char* location) { + Message message; + message << "SEH exception with code 0x" << std::setbase(16) << + exception_code << std::setbase(10) << " thrown in " << location << "."; + + return new internal::String(message.GetString()); +} + +#endif // GTEST_HAS_SEH + +#if GTEST_HAS_EXCEPTIONS + +// Adds an "exception thrown" fatal failure to the current test. +static internal::String FormatCxxExceptionMessage(const char* description, + const char* location) { + Message message; + if (description != NULL) { + message << "C++ exception with description \"" << description << "\""; + } else { + message << "Unknown C++ exception"; + } + message << " thrown in " << location << "."; + + return message.GetString(); +} + +static internal::String PrintTestPartResultToString( + const TestPartResult& test_part_result); + +// A failed Google Test assertion will throw an exception of this type when +// GTEST_FLAG(throw_on_failure) is true (if exceptions are enabled). We +// derive it from std::runtime_error, which is for errors presumably +// detectable only at run time. Since std::runtime_error inherits from +// std::exception, many testing frameworks know how to extract and print the +// message inside it. +class GoogleTestFailureException : public ::std::runtime_error { + public: + explicit GoogleTestFailureException(const TestPartResult& failure) + : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {} +}; +#endif // GTEST_HAS_EXCEPTIONS + +namespace internal { +// We put these helper functions in the internal namespace as IBM's xlC +// compiler rejects the code if they were declared static. + +// Runs the given method and handles SEH exceptions it throws, when +// SEH is supported; returns the 0-value for type Result in case of an +// SEH exception. (Microsoft compilers cannot handle SEH and C++ +// exceptions in the same function. Therefore, we provide a separate +// wrapper function for handling SEH exceptions.) +template +Result HandleSehExceptionsInMethodIfSupported( + T* object, Result (T::*method)(), const char* location) { +#if GTEST_HAS_SEH + __try { + return (object->*method)(); + } __except (internal::UnitTestOptions::GTestShouldProcessSEH( // NOLINT + GetExceptionCode())) { + // We create the exception message on the heap because VC++ prohibits + // creation of objects with destructors on stack in functions using __try + // (see error C2712). + internal::String* exception_message = FormatSehExceptionMessage( + GetExceptionCode(), location); + internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure, + *exception_message); + delete exception_message; + return static_cast(0); + } +#else + (void)location; + return (object->*method)(); +#endif // GTEST_HAS_SEH +} + +// Runs the given method and catches and reports C++ and/or SEH-style +// exceptions, if they are supported; returns the 0-value for type +// Result in case of an SEH exception. +template +Result HandleExceptionsInMethodIfSupported( + T* object, Result (T::*method)(), const char* location) { + // NOTE: The user code can affect the way in which Google Test handles + // exceptions by setting GTEST_FLAG(catch_exceptions), but only before + // RUN_ALL_TESTS() starts. It is technically possible to check the flag + // after the exception is caught and either report or re-throw the + // exception based on the flag's value: + // + // try { + // // Perform the test method. + // } catch (...) { + // if (GTEST_FLAG(catch_exceptions)) + // // Report the exception as failure. + // else + // throw; // Re-throws the original exception. + // } + // + // However, the purpose of this flag is to allow the program to drop into + // the debugger when the exception is thrown. On most platforms, once the + // control enters the catch block, the exception origin information is + // lost and the debugger will stop the program at the point of the + // re-throw in this function -- instead of at the point of the original + // throw statement in the code under test. For this reason, we perform + // the check early, sacrificing the ability to affect Google Test's + // exception handling in the method where the exception is thrown. + if (internal::GetUnitTestImpl()->catch_exceptions()) { +#if GTEST_HAS_EXCEPTIONS + try { + return HandleSehExceptionsInMethodIfSupported(object, method, location); + } catch (const GoogleTestFailureException&) { // NOLINT + // This exception doesn't originate in code under test. It makes no + // sense to report it as a test failure. + throw; + } catch (const std::exception& e) { // NOLINT + internal::ReportFailureInUnknownLocation( + TestPartResult::kFatalFailure, + FormatCxxExceptionMessage(e.what(), location)); + } catch (...) { // NOLINT + internal::ReportFailureInUnknownLocation( + TestPartResult::kFatalFailure, + FormatCxxExceptionMessage(NULL, location)); + } + return static_cast(0); +#else + return HandleSehExceptionsInMethodIfSupported(object, method, location); +#endif // GTEST_HAS_EXCEPTIONS + } else { + return (object->*method)(); + } +} + +} // namespace internal + +// Runs the test and updates the test result. +void Test::Run() { + if (!HasSameFixtureClass()) return; + + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported(this, &Test::SetUp, "SetUp()"); + // We will run the test only if SetUp() was successful. + if (!HasFatalFailure()) { + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &Test::TestBody, "the test body"); + } + + // However, we want to clean up as much as possible. Hence we will + // always call TearDown(), even if SetUp() or the test body has + // failed. + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &Test::TearDown, "TearDown()"); +} + +// Returns true iff the current test has a fatal failure. +bool Test::HasFatalFailure() { + return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure(); +} + +// Returns true iff the current test has a non-fatal failure. +bool Test::HasNonfatalFailure() { + return internal::GetUnitTestImpl()->current_test_result()-> + HasNonfatalFailure(); +} + +// class TestInfo + +// Constructs a TestInfo object. It assumes ownership of the test factory +// object. +// TODO(vladl@google.com): Make a_test_case_name and a_name const string&'s +// to signify they cannot be NULLs. +TestInfo::TestInfo(const char* a_test_case_name, + const char* a_name, + const char* a_type_param, + const char* a_value_param, + internal::TypeId fixture_class_id, + internal::TestFactoryBase* factory) + : test_case_name_(a_test_case_name), + name_(a_name), + type_param_(a_type_param ? new std::string(a_type_param) : NULL), + value_param_(a_value_param ? new std::string(a_value_param) : NULL), + fixture_class_id_(fixture_class_id), + should_run_(false), + is_disabled_(false), + matches_filter_(false), + factory_(factory), + result_() {} + +// Destructs a TestInfo object. +TestInfo::~TestInfo() { delete factory_; } + +namespace internal { + +// Creates a new TestInfo object and registers it with Google Test; +// returns the created object. +// +// Arguments: +// +// test_case_name: name of the test case +// name: name of the test +// type_param: the name of the test's type parameter, or NULL if +// this is not a typed or a type-parameterized test. +// value_param: text representation of the test's value parameter, +// or NULL if this is not a value-parameterized test. +// fixture_class_id: ID of the test fixture class +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +// factory: pointer to the factory that creates a test object. +// The newly created TestInfo instance will assume +// ownership of the factory object. +TestInfo* MakeAndRegisterTestInfo( + const char* test_case_name, const char* name, + const char* type_param, + const char* value_param, + TypeId fixture_class_id, + SetUpTestCaseFunc set_up_tc, + TearDownTestCaseFunc tear_down_tc, + TestFactoryBase* factory) { + TestInfo* const test_info = + new TestInfo(test_case_name, name, type_param, value_param, + fixture_class_id, factory); + GetUnitTestImpl()->AddTestInfo(set_up_tc, tear_down_tc, test_info); + return test_info; +} + +#if GTEST_HAS_PARAM_TEST +void ReportInvalidTestCaseType(const char* test_case_name, + const char* file, int line) { + Message errors; + errors + << "Attempted redefinition of test case " << test_case_name << ".\n" + << "All tests in the same test case must use the same test fixture\n" + << "class. However, in test case " << test_case_name << ", you tried\n" + << "to define a test using a fixture class different from the one\n" + << "used earlier. This can happen if the two fixture classes are\n" + << "from different namespaces and have the same name. You should\n" + << "probably rename one of the classes to put the tests into different\n" + << "test cases."; + + fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(), + errors.GetString().c_str()); +} +#endif // GTEST_HAS_PARAM_TEST + +} // namespace internal + +namespace { + +// A predicate that checks the test name of a TestInfo against a known +// value. +// +// This is used for implementation of the TestCase class only. We put +// it in the anonymous namespace to prevent polluting the outer +// namespace. +// +// TestNameIs is copyable. +class TestNameIs { + public: + // Constructor. + // + // TestNameIs has NO default constructor. + explicit TestNameIs(const char* name) + : name_(name) {} + + // Returns true iff the test name of test_info matches name_. + bool operator()(const TestInfo * test_info) const { + return test_info && internal::String(test_info->name()).Compare(name_) == 0; + } + + private: + internal::String name_; +}; + +} // namespace + +namespace internal { + +// This method expands all parameterized tests registered with macros TEST_P +// and INSTANTIATE_TEST_CASE_P into regular tests and registers those. +// This will be done just once during the program runtime. +void UnitTestImpl::RegisterParameterizedTests() { +#if GTEST_HAS_PARAM_TEST + if (!parameterized_tests_registered_) { + parameterized_test_registry_.RegisterTests(); + parameterized_tests_registered_ = true; + } +#endif +} + +} // namespace internal + +// Creates the test object, runs it, records its result, and then +// deletes it. +void TestInfo::Run() { + if (!should_run_) return; + + // Tells UnitTest where to store test result. + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->set_current_test_info(this); + + TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); + + // Notifies the unit test event listeners that a test is about to start. + repeater->OnTestStart(*this); + + const TimeInMillis start = internal::GetTimeInMillis(); + + impl->os_stack_trace_getter()->UponLeavingGTest(); + + // Creates the test object. + Test* const test = internal::HandleExceptionsInMethodIfSupported( + factory_, &internal::TestFactoryBase::CreateTest, + "the test fixture's constructor"); + + // Runs the test only if the test object was created and its + // constructor didn't generate a fatal failure. + if ((test != NULL) && !Test::HasFatalFailure()) { + // This doesn't throw as all user code that can throw are wrapped into + // exception handling code. + test->Run(); + } + + // Deletes the test object. + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + test, &Test::DeleteSelf_, "the test fixture's destructor"); + + result_.set_elapsed_time(internal::GetTimeInMillis() - start); + + // Notifies the unit test event listener that a test has just finished. + repeater->OnTestEnd(*this); + + // Tells UnitTest to stop associating assertion results to this + // test. + impl->set_current_test_info(NULL); +} + +// class TestCase + +// Gets the number of successful tests in this test case. +int TestCase::successful_test_count() const { + return CountIf(test_info_list_, TestPassed); +} + +// Gets the number of failed tests in this test case. +int TestCase::failed_test_count() const { + return CountIf(test_info_list_, TestFailed); +} + +int TestCase::disabled_test_count() const { + return CountIf(test_info_list_, TestDisabled); +} + +// Get the number of tests in this test case that should run. +int TestCase::test_to_run_count() const { + return CountIf(test_info_list_, ShouldRunTest); +} + +// Gets the number of all tests. +int TestCase::total_test_count() const { + return static_cast(test_info_list_.size()); +} + +// Creates a TestCase with the given name. +// +// Arguments: +// +// name: name of the test case +// a_type_param: the name of the test case's type parameter, or NULL if +// this is not a typed or a type-parameterized test case. +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +TestCase::TestCase(const char* a_name, const char* a_type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc) + : name_(a_name), + type_param_(a_type_param ? new std::string(a_type_param) : NULL), + set_up_tc_(set_up_tc), + tear_down_tc_(tear_down_tc), + should_run_(false), + elapsed_time_(0) { +} + +// Destructor of TestCase. +TestCase::~TestCase() { + // Deletes every Test in the collection. + ForEach(test_info_list_, internal::Delete); +} + +// Returns the i-th test among all the tests. i can range from 0 to +// total_test_count() - 1. If i is not in that range, returns NULL. +const TestInfo* TestCase::GetTestInfo(int i) const { + const int index = GetElementOr(test_indices_, i, -1); + return index < 0 ? NULL : test_info_list_[index]; +} + +// Returns the i-th test among all the tests. i can range from 0 to +// total_test_count() - 1. If i is not in that range, returns NULL. +TestInfo* TestCase::GetMutableTestInfo(int i) { + const int index = GetElementOr(test_indices_, i, -1); + return index < 0 ? NULL : test_info_list_[index]; +} + +// Adds a test to this test case. Will delete the test upon +// destruction of the TestCase object. +void TestCase::AddTestInfo(TestInfo * test_info) { + test_info_list_.push_back(test_info); + test_indices_.push_back(static_cast(test_indices_.size())); +} + +// Runs every test in this TestCase. +void TestCase::Run() { + if (!should_run_) return; + + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->set_current_test_case(this); + + TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); + + repeater->OnTestCaseStart(*this); + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &TestCase::RunSetUpTestCase, "SetUpTestCase()"); + + const internal::TimeInMillis start = internal::GetTimeInMillis(); + for (int i = 0; i < total_test_count(); i++) { + GetMutableTestInfo(i)->Run(); + } + elapsed_time_ = internal::GetTimeInMillis() - start; + + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &TestCase::RunTearDownTestCase, "TearDownTestCase()"); + + repeater->OnTestCaseEnd(*this); + impl->set_current_test_case(NULL); +} + +// Clears the results of all tests in this test case. +void TestCase::ClearResult() { + ForEach(test_info_list_, TestInfo::ClearTestResult); +} + +// Shuffles the tests in this test case. +void TestCase::ShuffleTests(internal::Random* random) { + Shuffle(random, &test_indices_); +} + +// Restores the test order to before the first shuffle. +void TestCase::UnshuffleTests() { + for (size_t i = 0; i < test_indices_.size(); i++) { + test_indices_[i] = static_cast(i); + } +} + +// Formats a countable noun. Depending on its quantity, either the +// singular form or the plural form is used. e.g. +// +// FormatCountableNoun(1, "formula", "formuli") returns "1 formula". +// FormatCountableNoun(5, "book", "books") returns "5 books". +static internal::String FormatCountableNoun(int count, + const char * singular_form, + const char * plural_form) { + return internal::String::Format("%d %s", count, + count == 1 ? singular_form : plural_form); +} + +// Formats the count of tests. +static internal::String FormatTestCount(int test_count) { + return FormatCountableNoun(test_count, "test", "tests"); +} + +// Formats the count of test cases. +static internal::String FormatTestCaseCount(int test_case_count) { + return FormatCountableNoun(test_case_count, "test case", "test cases"); +} + +// Converts a TestPartResult::Type enum to human-friendly string +// representation. Both kNonFatalFailure and kFatalFailure are translated +// to "Failure", as the user usually doesn't care about the difference +// between the two when viewing the test result. +static const char * TestPartResultTypeToString(TestPartResult::Type type) { + switch (type) { + case TestPartResult::kSuccess: + return "Success"; + + case TestPartResult::kNonFatalFailure: + case TestPartResult::kFatalFailure: +#ifdef _MSC_VER + return "error: "; +#else + return "Failure\n"; +#endif + default: + return "Unknown result type"; + } +} + +// Prints a TestPartResult to a String. +static internal::String PrintTestPartResultToString( + const TestPartResult& test_part_result) { + return (Message() + << internal::FormatFileLocation(test_part_result.file_name(), + test_part_result.line_number()) + << " " << TestPartResultTypeToString(test_part_result.type()) + << test_part_result.message()).GetString(); +} + +// Prints a TestPartResult. +static void PrintTestPartResult(const TestPartResult& test_part_result) { + const internal::String& result = + PrintTestPartResultToString(test_part_result); + printf("%s\n", result.c_str()); + fflush(stdout); + // If the test program runs in Visual Studio or a debugger, the + // following statements add the test part result message to the Output + // window such that the user can double-click on it to jump to the + // corresponding source code location; otherwise they do nothing. +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + // We don't call OutputDebugString*() on Windows Mobile, as printing + // to stdout is done by OutputDebugString() there already - we don't + // want the same message printed twice. + ::OutputDebugStringA(result.c_str()); + ::OutputDebugStringA("\n"); +#endif +} + +// class PrettyUnitTestResultPrinter + +namespace internal { + +enum GTestColor { + COLOR_DEFAULT, + COLOR_RED, + COLOR_GREEN, + COLOR_YELLOW +}; + +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + +// Returns the character attribute for the given color. +WORD GetColorAttribute(GTestColor color) { + switch (color) { + case COLOR_RED: return FOREGROUND_RED; + case COLOR_GREEN: return FOREGROUND_GREEN; + case COLOR_YELLOW: return FOREGROUND_RED | FOREGROUND_GREEN; + default: return 0; + } +} + +#else + +// Returns the ANSI color code for the given color. COLOR_DEFAULT is +// an invalid input. +const char* GetAnsiColorCode(GTestColor color) { + switch (color) { + case COLOR_RED: return "1"; + case COLOR_GREEN: return "2"; + case COLOR_YELLOW: return "3"; + default: return NULL; + }; +} + +#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + +// Returns true iff Google Test should use colors in the output. +bool ShouldUseColor(bool stdout_is_tty) { + const char* const gtest_color = GTEST_FLAG(color).c_str(); + + if (String::CaseInsensitiveCStringEquals(gtest_color, "auto")) { +#if GTEST_OS_WINDOWS + // On Windows the TERM variable is usually not set, but the + // console there does support colors. + return stdout_is_tty; +#else + // On non-Windows platforms, we rely on the TERM variable. + const char* const term = posix::GetEnv("TERM"); + const bool term_supports_color = + String::CStringEquals(term, "xterm") || + String::CStringEquals(term, "xterm-color") || + String::CStringEquals(term, "xterm-256color") || + String::CStringEquals(term, "screen") || + String::CStringEquals(term, "linux") || + String::CStringEquals(term, "cygwin"); + return stdout_is_tty && term_supports_color; +#endif // GTEST_OS_WINDOWS + } + + return String::CaseInsensitiveCStringEquals(gtest_color, "yes") || + String::CaseInsensitiveCStringEquals(gtest_color, "true") || + String::CaseInsensitiveCStringEquals(gtest_color, "t") || + String::CStringEquals(gtest_color, "1"); + // We take "yes", "true", "t", and "1" as meaning "yes". If the + // value is neither one of these nor "auto", we treat it as "no" to + // be conservative. +} + +// Helpers for printing colored strings to stdout. Note that on Windows, we +// cannot simply emit special characters and have the terminal change colors. +// This routine must actually emit the characters rather than return a string +// that would be colored when printed, as can be done on Linux. +void ColoredPrintf(GTestColor color, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + +#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS + const bool use_color = false; +#else + static const bool in_color_mode = + ShouldUseColor(posix::IsATTY(posix::FileNo(stdout)) != 0); + const bool use_color = in_color_mode && (color != COLOR_DEFAULT); +#endif // GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS + // The '!= 0' comparison is necessary to satisfy MSVC 7.1. + + if (!use_color) { + vprintf(fmt, args); + va_end(args); + return; + } + +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); + + // Gets the current text color. + CONSOLE_SCREEN_BUFFER_INFO buffer_info; + GetConsoleScreenBufferInfo(stdout_handle, &buffer_info); + const WORD old_color_attrs = buffer_info.wAttributes; + + // We need to flush the stream buffers into the console before each + // SetConsoleTextAttribute call lest it affect the text that is already + // printed but has not yet reached the console. + fflush(stdout); + SetConsoleTextAttribute(stdout_handle, + GetColorAttribute(color) | FOREGROUND_INTENSITY); + vprintf(fmt, args); + + fflush(stdout); + // Restores the text color. + SetConsoleTextAttribute(stdout_handle, old_color_attrs); +#else + printf("\033[0;3%sm", GetAnsiColorCode(color)); + vprintf(fmt, args); + printf("\033[m"); // Resets the terminal to default. +#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + va_end(args); +} + +void PrintFullTestCommentIfPresent(const TestInfo& test_info) { + const char* const type_param = test_info.type_param(); + const char* const value_param = test_info.value_param(); + + if (type_param != NULL || value_param != NULL) { + printf(", where "); + if (type_param != NULL) { + printf("TypeParam = %s", type_param); + if (value_param != NULL) + printf(" and "); + } + if (value_param != NULL) { + printf("GetParam() = %s", value_param); + } + } +} + +// This class implements the TestEventListener interface. +// +// Class PrettyUnitTestResultPrinter is copyable. +class PrettyUnitTestResultPrinter : public TestEventListener { + public: + PrettyUnitTestResultPrinter() {} + static void PrintTestName(const char * test_case, const char * test) { + printf("%s.%s", test_case, test); + } + + // The following methods override what's in the TestEventListener class. + virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration); + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test); + virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestCaseStart(const TestCase& test_case); + virtual void OnTestStart(const TestInfo& test_info); + virtual void OnTestPartResult(const TestPartResult& result); + virtual void OnTestEnd(const TestInfo& test_info); + virtual void OnTestCaseEnd(const TestCase& test_case); + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test); + virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {} + + private: + static void PrintFailedTests(const UnitTest& unit_test); + + internal::String test_case_name_; +}; + + // Fired before each iteration of tests starts. +void PrettyUnitTestResultPrinter::OnTestIterationStart( + const UnitTest& unit_test, int iteration) { + if (GTEST_FLAG(repeat) != 1) + printf("\nRepeating all tests (iteration %d) . . .\n\n", iteration + 1); + + const char* const filter = GTEST_FLAG(filter).c_str(); + + // Prints the filter if it's not *. This reminds the user that some + // tests may be skipped. + if (!internal::String::CStringEquals(filter, kUniversalFilter)) { + ColoredPrintf(COLOR_YELLOW, + "Note: %s filter = %s\n", GTEST_NAME_, filter); + } + + if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) { + const Int32 shard_index = Int32FromEnvOrDie(kTestShardIndex, -1); + ColoredPrintf(COLOR_YELLOW, + "Note: This is test shard %d of %s.\n", + static_cast(shard_index) + 1, + internal::posix::GetEnv(kTestTotalShards)); + } + + if (GTEST_FLAG(shuffle)) { + ColoredPrintf(COLOR_YELLOW, + "Note: Randomizing tests' orders with a seed of %d .\n", + unit_test.random_seed()); + } + + ColoredPrintf(COLOR_GREEN, "[==========] "); + printf("Running %s from %s.\n", + FormatTestCount(unit_test.test_to_run_count()).c_str(), + FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str()); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart( + const UnitTest& /*unit_test*/) { + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("Global test environment set-up.\n"); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) { + test_case_name_ = test_case.name(); + const internal::String counts = + FormatCountableNoun(test_case.test_to_run_count(), "test", "tests"); + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("%s from %s", counts.c_str(), test_case_name_.c_str()); + if (test_case.type_param() == NULL) { + printf("\n"); + } else { + printf(", where TypeParam = %s\n", test_case.type_param()); + } + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) { + ColoredPrintf(COLOR_GREEN, "[ RUN ] "); + PrintTestName(test_case_name_.c_str(), test_info.name()); + printf("\n"); + fflush(stdout); +} + +// Called after an assertion failure. +void PrettyUnitTestResultPrinter::OnTestPartResult( + const TestPartResult& result) { + // If the test part succeeded, we don't need to do anything. + if (result.type() == TestPartResult::kSuccess) + return; + + // Print failure message from the assertion (e.g. expected this and got that). + PrintTestPartResult(result); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) { + if (test_info.result()->Passed()) { + ColoredPrintf(COLOR_GREEN, "[ OK ] "); + } else { + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + } + PrintTestName(test_case_name_.c_str(), test_info.name()); + if (test_info.result()->Failed()) + PrintFullTestCommentIfPresent(test_info); + + if (GTEST_FLAG(print_time)) { + printf(" (%s ms)\n", internal::StreamableToString( + test_info.result()->elapsed_time()).c_str()); + } else { + printf("\n"); + } + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) { + if (!GTEST_FLAG(print_time)) return; + + test_case_name_ = test_case.name(); + const internal::String counts = + FormatCountableNoun(test_case.test_to_run_count(), "test", "tests"); + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("%s from %s (%s ms total)\n\n", + counts.c_str(), test_case_name_.c_str(), + internal::StreamableToString(test_case.elapsed_time()).c_str()); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnEnvironmentsTearDownStart( + const UnitTest& /*unit_test*/) { + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("Global test environment tear-down\n"); + fflush(stdout); +} + +// Internal helper for printing the list of failed tests. +void PrettyUnitTestResultPrinter::PrintFailedTests(const UnitTest& unit_test) { + const int failed_test_count = unit_test.failed_test_count(); + if (failed_test_count == 0) { + return; + } + + for (int i = 0; i < unit_test.total_test_case_count(); ++i) { + const TestCase& test_case = *unit_test.GetTestCase(i); + if (!test_case.should_run() || (test_case.failed_test_count() == 0)) { + continue; + } + for (int j = 0; j < test_case.total_test_count(); ++j) { + const TestInfo& test_info = *test_case.GetTestInfo(j); + if (!test_info.should_run() || test_info.result()->Passed()) { + continue; + } + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + printf("%s.%s", test_case.name(), test_info.name()); + PrintFullTestCommentIfPresent(test_info); + printf("\n"); + } + } +} + +void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, + int /*iteration*/) { + ColoredPrintf(COLOR_GREEN, "[==========] "); + printf("%s from %s ran.", + FormatTestCount(unit_test.test_to_run_count()).c_str(), + FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str()); + if (GTEST_FLAG(print_time)) { + printf(" (%s ms total)", + internal::StreamableToString(unit_test.elapsed_time()).c_str()); + } + printf("\n"); + ColoredPrintf(COLOR_GREEN, "[ PASSED ] "); + printf("%s.\n", FormatTestCount(unit_test.successful_test_count()).c_str()); + + int num_failures = unit_test.failed_test_count(); + if (!unit_test.Passed()) { + const int failed_test_count = unit_test.failed_test_count(); + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + printf("%s, listed below:\n", FormatTestCount(failed_test_count).c_str()); + PrintFailedTests(unit_test); + printf("\n%2d FAILED %s\n", num_failures, + num_failures == 1 ? "TEST" : "TESTS"); + } + + int num_disabled = unit_test.disabled_test_count(); + if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) { + if (!num_failures) { + printf("\n"); // Add a spacer if no FAILURE banner is displayed. + } + ColoredPrintf(COLOR_YELLOW, + " YOU HAVE %d DISABLED %s\n\n", + num_disabled, + num_disabled == 1 ? "TEST" : "TESTS"); + } + // Ensure that Google Test output is printed before, e.g., heapchecker output. + fflush(stdout); +} + +// End PrettyUnitTestResultPrinter + +// class TestEventRepeater +// +// This class forwards events to other event listeners. +class TestEventRepeater : public TestEventListener { + public: + TestEventRepeater() : forwarding_enabled_(true) {} + virtual ~TestEventRepeater(); + void Append(TestEventListener *listener); + TestEventListener* Release(TestEventListener* listener); + + // Controls whether events will be forwarded to listeners_. Set to false + // in death test child processes. + bool forwarding_enabled() const { return forwarding_enabled_; } + void set_forwarding_enabled(bool enable) { forwarding_enabled_ = enable; } + + virtual void OnTestProgramStart(const UnitTest& unit_test); + virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration); + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test); + virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test); + virtual void OnTestCaseStart(const TestCase& test_case); + virtual void OnTestStart(const TestInfo& test_info); + virtual void OnTestPartResult(const TestPartResult& result); + virtual void OnTestEnd(const TestInfo& test_info); + virtual void OnTestCaseEnd(const TestCase& test_case); + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test); + virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test); + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + virtual void OnTestProgramEnd(const UnitTest& unit_test); + + private: + // Controls whether events will be forwarded to listeners_. Set to false + // in death test child processes. + bool forwarding_enabled_; + // The list of listeners that receive events. + std::vector listeners_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventRepeater); +}; + +TestEventRepeater::~TestEventRepeater() { + ForEach(listeners_, Delete); +} + +void TestEventRepeater::Append(TestEventListener *listener) { + listeners_.push_back(listener); +} + +// TODO(vladl@google.com): Factor the search functionality into Vector::Find. +TestEventListener* TestEventRepeater::Release(TestEventListener *listener) { + for (size_t i = 0; i < listeners_.size(); ++i) { + if (listeners_[i] == listener) { + listeners_.erase(listeners_.begin() + i); + return listener; + } + } + + return NULL; +} + +// Since most methods are very similar, use macros to reduce boilerplate. +// This defines a member that forwards the call to all listeners. +#define GTEST_REPEATER_METHOD_(Name, Type) \ +void TestEventRepeater::Name(const Type& parameter) { \ + if (forwarding_enabled_) { \ + for (size_t i = 0; i < listeners_.size(); i++) { \ + listeners_[i]->Name(parameter); \ + } \ + } \ +} +// This defines a member that forwards the call to all listeners in reverse +// order. +#define GTEST_REVERSE_REPEATER_METHOD_(Name, Type) \ +void TestEventRepeater::Name(const Type& parameter) { \ + if (forwarding_enabled_) { \ + for (int i = static_cast(listeners_.size()) - 1; i >= 0; i--) { \ + listeners_[i]->Name(parameter); \ + } \ + } \ +} + +GTEST_REPEATER_METHOD_(OnTestProgramStart, UnitTest) +GTEST_REPEATER_METHOD_(OnEnvironmentsSetUpStart, UnitTest) +GTEST_REPEATER_METHOD_(OnTestCaseStart, TestCase) +GTEST_REPEATER_METHOD_(OnTestStart, TestInfo) +GTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult) +GTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsTearDownEnd, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnTestEnd, TestInfo) +GTEST_REVERSE_REPEATER_METHOD_(OnTestCaseEnd, TestCase) +GTEST_REVERSE_REPEATER_METHOD_(OnTestProgramEnd, UnitTest) + +#undef GTEST_REPEATER_METHOD_ +#undef GTEST_REVERSE_REPEATER_METHOD_ + +void TestEventRepeater::OnTestIterationStart(const UnitTest& unit_test, + int iteration) { + if (forwarding_enabled_) { + for (size_t i = 0; i < listeners_.size(); i++) { + listeners_[i]->OnTestIterationStart(unit_test, iteration); + } + } +} + +void TestEventRepeater::OnTestIterationEnd(const UnitTest& unit_test, + int iteration) { + if (forwarding_enabled_) { + for (int i = static_cast(listeners_.size()) - 1; i >= 0; i--) { + listeners_[i]->OnTestIterationEnd(unit_test, iteration); + } + } +} + +// End TestEventRepeater + +// This class generates an XML output file. +class XmlUnitTestResultPrinter : public EmptyTestEventListener { + public: + explicit XmlUnitTestResultPrinter(const char* output_file); + + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + + private: + // Is c a whitespace character that is normalized to a space character + // when it appears in an XML attribute value? + static bool IsNormalizableWhitespace(char c) { + return c == 0x9 || c == 0xA || c == 0xD; + } + + // May c appear in a well-formed XML document? + static bool IsValidXmlCharacter(char c) { + return IsNormalizableWhitespace(c) || c >= 0x20; + } + + // Returns an XML-escaped copy of the input string str. If + // is_attribute is true, the text is meant to appear as an attribute + // value, and normalizable whitespace is preserved by replacing it + // with character references. + static String EscapeXml(const char* str, bool is_attribute); + + // Returns the given string with all characters invalid in XML removed. + static string RemoveInvalidXmlCharacters(const string& str); + + // Convenience wrapper around EscapeXml when str is an attribute value. + static String EscapeXmlAttribute(const char* str) { + return EscapeXml(str, true); + } + + // Convenience wrapper around EscapeXml when str is not an attribute value. + static String EscapeXmlText(const char* str) { return EscapeXml(str, false); } + + // Streams an XML CDATA section, escaping invalid CDATA sequences as needed. + static void OutputXmlCDataSection(::std::ostream* stream, const char* data); + + // Streams an XML representation of a TestInfo object. + static void OutputXmlTestInfo(::std::ostream* stream, + const char* test_case_name, + const TestInfo& test_info); + + // Prints an XML representation of a TestCase object + static void PrintXmlTestCase(FILE* out, const TestCase& test_case); + + // Prints an XML summary of unit_test to output stream out. + static void PrintXmlUnitTest(FILE* out, const UnitTest& unit_test); + + // Produces a string representing the test properties in a result as space + // delimited XML attributes based on the property key="value" pairs. + // When the String is not empty, it includes a space at the beginning, + // to delimit this attribute from prior attributes. + static String TestPropertiesAsXmlAttributes(const TestResult& result); + + // The output file. + const String output_file_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter); +}; + +// Creates a new XmlUnitTestResultPrinter. +XmlUnitTestResultPrinter::XmlUnitTestResultPrinter(const char* output_file) + : output_file_(output_file) { + if (output_file_.c_str() == NULL || output_file_.empty()) { + fprintf(stderr, "XML output file may not be null\n"); + fflush(stderr); + exit(EXIT_FAILURE); + } +} + +// Called after the unit test ends. +void XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, + int /*iteration*/) { + FILE* xmlout = NULL; + FilePath output_file(output_file_); + FilePath output_dir(output_file.RemoveFileName()); + + if (output_dir.CreateDirectoriesRecursively()) { + xmlout = posix::FOpen(output_file_.c_str(), "w"); + } + if (xmlout == NULL) { + // TODO(wan): report the reason of the failure. + // + // We don't do it for now as: + // + // 1. There is no urgent need for it. + // 2. It's a bit involved to make the errno variable thread-safe on + // all three operating systems (Linux, Windows, and Mac OS). + // 3. To interpret the meaning of errno in a thread-safe way, + // we need the strerror_r() function, which is not available on + // Windows. + fprintf(stderr, + "Unable to open file \"%s\"\n", + output_file_.c_str()); + fflush(stderr); + exit(EXIT_FAILURE); + } + PrintXmlUnitTest(xmlout, unit_test); + fclose(xmlout); +} + +// Returns an XML-escaped copy of the input string str. If is_attribute +// is true, the text is meant to appear as an attribute value, and +// normalizable whitespace is preserved by replacing it with character +// references. +// +// Invalid XML characters in str, if any, are stripped from the output. +// It is expected that most, if not all, of the text processed by this +// module will consist of ordinary English text. +// If this module is ever modified to produce version 1.1 XML output, +// most invalid characters can be retained using character references. +// TODO(wan): It might be nice to have a minimally invasive, human-readable +// escaping scheme for invalid characters, rather than dropping them. +String XmlUnitTestResultPrinter::EscapeXml(const char* str, bool is_attribute) { + Message m; + + if (str != NULL) { + for (const char* src = str; *src; ++src) { + switch (*src) { + case '<': + m << "<"; + break; + case '>': + m << ">"; + break; + case '&': + m << "&"; + break; + case '\'': + if (is_attribute) + m << "'"; + else + m << '\''; + break; + case '"': + if (is_attribute) + m << """; + else + m << '"'; + break; + default: + if (IsValidXmlCharacter(*src)) { + if (is_attribute && IsNormalizableWhitespace(*src)) + m << String::Format("&#x%02X;", unsigned(*src)); + else + m << *src; + } + break; + } + } + } + + return m.GetString(); +} + +// Returns the given string with all characters invalid in XML removed. +// Currently invalid characters are dropped from the string. An +// alternative is to replace them with certain characters such as . or ?. +string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters(const string& str) { + string output; + output.reserve(str.size()); + for (string::const_iterator it = str.begin(); it != str.end(); ++it) + if (IsValidXmlCharacter(*it)) + output.push_back(*it); + + return output; +} + +// The following routines generate an XML representation of a UnitTest +// object. +// +// This is how Google Test concepts map to the DTD: +// +// <-- corresponds to a UnitTest object +// <-- corresponds to a TestCase object +// <-- corresponds to a TestInfo object +// ... +// ... +// ... +// <-- individual assertion failures +// +// +// + +// Formats the given time in milliseconds as seconds. +std::string FormatTimeInMillisAsSeconds(TimeInMillis ms) { + ::std::stringstream ss; + ss << ms/1000.0; + return ss.str(); +} + +// Streams an XML CDATA section, escaping invalid CDATA sequences as needed. +void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream, + const char* data) { + const char* segment = data; + *stream << ""); + if (next_segment != NULL) { + stream->write( + segment, static_cast(next_segment - segment)); + *stream << "]]>]]>"); + } else { + *stream << segment; + break; + } + } + *stream << "]]>"; +} + +// Prints an XML representation of a TestInfo object. +// TODO(wan): There is also value in printing properties with the plain printer. +void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream, + const char* test_case_name, + const TestInfo& test_info) { + const TestResult& result = *test_info.result(); + *stream << " \n"; + *stream << " "; + const string location = internal::FormatCompilerIndependentFileLocation( + part.file_name(), part.line_number()); + const string message = location + "\n" + part.message(); + OutputXmlCDataSection(stream, + RemoveInvalidXmlCharacters(message).c_str()); + *stream << "\n"; + } + } + + if (failures == 0) + *stream << " />\n"; + else + *stream << " \n"; +} + +// Prints an XML representation of a TestCase object +void XmlUnitTestResultPrinter::PrintXmlTestCase(FILE* out, + const TestCase& test_case) { + fprintf(out, + " \n", + FormatTimeInMillisAsSeconds(test_case.elapsed_time()).c_str()); + for (int i = 0; i < test_case.total_test_count(); ++i) { + ::std::stringstream stream; + OutputXmlTestInfo(&stream, test_case.name(), *test_case.GetTestInfo(i)); + fprintf(out, "%s", StringStreamToString(&stream).c_str()); + } + fprintf(out, " \n"); +} + +// Prints an XML summary of unit_test to output stream out. +void XmlUnitTestResultPrinter::PrintXmlUnitTest(FILE* out, + const UnitTest& unit_test) { + fprintf(out, "\n"); + fprintf(out, + "\n"); + for (int i = 0; i < unit_test.total_test_case_count(); ++i) + PrintXmlTestCase(out, *unit_test.GetTestCase(i)); + fprintf(out, "\n"); +} + +// Produces a string representing the test properties in a result as space +// delimited XML attributes based on the property key="value" pairs. +String XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes( + const TestResult& result) { + Message attributes; + for (int i = 0; i < result.test_property_count(); ++i) { + const TestProperty& property = result.GetTestProperty(i); + attributes << " " << property.key() << "=" + << "\"" << EscapeXmlAttribute(property.value()) << "\""; + } + return attributes.GetString(); +} + +// End XmlUnitTestResultPrinter + +#if GTEST_CAN_STREAM_RESULTS_ + +// Streams test results to the given port on the given host machine. +class StreamingListener : public EmptyTestEventListener { + public: + // Escapes '=', '&', '%', and '\n' characters in str as "%xx". + static string UrlEncode(const char* str); + + StreamingListener(const string& host, const string& port) + : sockfd_(-1), host_name_(host), port_num_(port) { + MakeConnection(); + Send("gtest_streaming_protocol_version=1.0\n"); + } + + virtual ~StreamingListener() { + if (sockfd_ != -1) + CloseConnection(); + } + + void OnTestProgramStart(const UnitTest& /* unit_test */) { + Send("event=TestProgramStart\n"); + } + + void OnTestProgramEnd(const UnitTest& unit_test) { + // Note that Google Test current only report elapsed time for each + // test iteration, not for the entire test program. + Send(String::Format("event=TestProgramEnd&passed=%d\n", + unit_test.Passed())); + + // Notify the streaming server to stop. + CloseConnection(); + } + + void OnTestIterationStart(const UnitTest& /* unit_test */, int iteration) { + Send(String::Format("event=TestIterationStart&iteration=%d\n", + iteration)); + } + + void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) { + Send(String::Format("event=TestIterationEnd&passed=%d&elapsed_time=%sms\n", + unit_test.Passed(), + StreamableToString(unit_test.elapsed_time()).c_str())); + } + + void OnTestCaseStart(const TestCase& test_case) { + Send(String::Format("event=TestCaseStart&name=%s\n", test_case.name())); + } + + void OnTestCaseEnd(const TestCase& test_case) { + Send(String::Format("event=TestCaseEnd&passed=%d&elapsed_time=%sms\n", + test_case.Passed(), + StreamableToString(test_case.elapsed_time()).c_str())); + } + + void OnTestStart(const TestInfo& test_info) { + Send(String::Format("event=TestStart&name=%s\n", test_info.name())); + } + + void OnTestEnd(const TestInfo& test_info) { + Send(String::Format( + "event=TestEnd&passed=%d&elapsed_time=%sms\n", + (test_info.result())->Passed(), + StreamableToString((test_info.result())->elapsed_time()).c_str())); + } + + void OnTestPartResult(const TestPartResult& test_part_result) { + const char* file_name = test_part_result.file_name(); + if (file_name == NULL) + file_name = ""; + Send(String::Format("event=TestPartResult&file=%s&line=%d&message=", + UrlEncode(file_name).c_str(), + test_part_result.line_number())); + Send(UrlEncode(test_part_result.message()) + "\n"); + } + + private: + // Creates a client socket and connects to the server. + void MakeConnection(); + + // Closes the socket. + void CloseConnection() { + GTEST_CHECK_(sockfd_ != -1) + << "CloseConnection() can be called only when there is a connection."; + + close(sockfd_); + sockfd_ = -1; + } + + // Sends a string to the socket. + void Send(const string& message) { + GTEST_CHECK_(sockfd_ != -1) + << "Send() can be called only when there is a connection."; + + const int len = static_cast(message.length()); + if (write(sockfd_, message.c_str(), len) != len) { + GTEST_LOG_(WARNING) + << "stream_result_to: failed to stream to " + << host_name_ << ":" << port_num_; + } + } + + int sockfd_; // socket file descriptor + const string host_name_; + const string port_num_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener); +}; // class StreamingListener + +// Checks if str contains '=', '&', '%' or '\n' characters. If yes, +// replaces them by "%xx" where xx is their hexadecimal value. For +// example, replaces "=" with "%3D". This algorithm is O(strlen(str)) +// in both time and space -- important as the input str may contain an +// arbitrarily long test failure message and stack trace. +string StreamingListener::UrlEncode(const char* str) { + string result; + result.reserve(strlen(str) + 1); + for (char ch = *str; ch != '\0'; ch = *++str) { + switch (ch) { + case '%': + case '=': + case '&': + case '\n': + result.append(String::Format("%%%02x", static_cast(ch))); + break; + default: + result.push_back(ch); + break; + } + } + return result; +} + +void StreamingListener::MakeConnection() { + GTEST_CHECK_(sockfd_ == -1) + << "MakeConnection() can't be called when there is already a connection."; + + addrinfo hints; + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; // To allow both IPv4 and IPv6 addresses. + hints.ai_socktype = SOCK_STREAM; + addrinfo* servinfo = NULL; + + // Use the getaddrinfo() to get a linked list of IP addresses for + // the given host name. + const int error_num = getaddrinfo( + host_name_.c_str(), port_num_.c_str(), &hints, &servinfo); + if (error_num != 0) { + GTEST_LOG_(WARNING) << "stream_result_to: getaddrinfo() failed: " + << gai_strerror(error_num); + } + + // Loop through all the results and connect to the first we can. + for (addrinfo* cur_addr = servinfo; sockfd_ == -1 && cur_addr != NULL; + cur_addr = cur_addr->ai_next) { + sockfd_ = socket( + cur_addr->ai_family, cur_addr->ai_socktype, cur_addr->ai_protocol); + if (sockfd_ != -1) { + // Connect the client socket to the server socket. + if (connect(sockfd_, cur_addr->ai_addr, cur_addr->ai_addrlen) == -1) { + close(sockfd_); + sockfd_ = -1; + } + } + } + + freeaddrinfo(servinfo); // all done with this structure + + if (sockfd_ == -1) { + GTEST_LOG_(WARNING) << "stream_result_to: failed to connect to " + << host_name_ << ":" << port_num_; + } +} + +// End of class Streaming Listener +#endif // GTEST_CAN_STREAM_RESULTS__ + +// Class ScopedTrace + +// Pushes the given source file location and message onto a per-thread +// trace stack maintained by Google Test. +// L < UnitTest::mutex_ +ScopedTrace::ScopedTrace(const char* file, int line, const Message& message) { + TraceInfo trace; + trace.file = file; + trace.line = line; + trace.message = message.GetString(); + + UnitTest::GetInstance()->PushGTestTrace(trace); +} + +// Pops the info pushed by the c'tor. +// L < UnitTest::mutex_ +ScopedTrace::~ScopedTrace() { + UnitTest::GetInstance()->PopGTestTrace(); +} + + +// class OsStackTraceGetter + +// Returns the current OS stack trace as a String. Parameters: +// +// max_depth - the maximum number of stack frames to be included +// in the trace. +// skip_count - the number of top frames to be skipped; doesn't count +// against max_depth. +// +// L < mutex_ +// We use "L < mutex_" to denote that the function may acquire mutex_. +String OsStackTraceGetter::CurrentStackTrace(int, int) { + return String(""); +} + +// L < mutex_ +void OsStackTraceGetter::UponLeavingGTest() { +} + +const char* const +OsStackTraceGetter::kElidedFramesMarker = + "... " GTEST_NAME_ " internal frames ..."; + +} // namespace internal + +// class TestEventListeners + +TestEventListeners::TestEventListeners() + : repeater_(new internal::TestEventRepeater()), + default_result_printer_(NULL), + default_xml_generator_(NULL) { +} + +TestEventListeners::~TestEventListeners() { delete repeater_; } + +// Returns the standard listener responsible for the default console +// output. Can be removed from the listeners list to shut down default +// console output. Note that removing this object from the listener list +// with Release transfers its ownership to the user. +void TestEventListeners::Append(TestEventListener* listener) { + repeater_->Append(listener); +} + +// Removes the given event listener from the list and returns it. It then +// becomes the caller's responsibility to delete the listener. Returns +// NULL if the listener is not found in the list. +TestEventListener* TestEventListeners::Release(TestEventListener* listener) { + if (listener == default_result_printer_) + default_result_printer_ = NULL; + else if (listener == default_xml_generator_) + default_xml_generator_ = NULL; + return repeater_->Release(listener); +} + +// Returns repeater that broadcasts the TestEventListener events to all +// subscribers. +TestEventListener* TestEventListeners::repeater() { return repeater_; } + +// Sets the default_result_printer attribute to the provided listener. +// The listener is also added to the listener list and previous +// default_result_printer is removed from it and deleted. The listener can +// also be NULL in which case it will not be added to the list. Does +// nothing if the previous and the current listener objects are the same. +void TestEventListeners::SetDefaultResultPrinter(TestEventListener* listener) { + if (default_result_printer_ != listener) { + // It is an error to pass this method a listener that is already in the + // list. + delete Release(default_result_printer_); + default_result_printer_ = listener; + if (listener != NULL) + Append(listener); + } +} + +// Sets the default_xml_generator attribute to the provided listener. The +// listener is also added to the listener list and previous +// default_xml_generator is removed from it and deleted. The listener can +// also be NULL in which case it will not be added to the list. Does +// nothing if the previous and the current listener objects are the same. +void TestEventListeners::SetDefaultXmlGenerator(TestEventListener* listener) { + if (default_xml_generator_ != listener) { + // It is an error to pass this method a listener that is already in the + // list. + delete Release(default_xml_generator_); + default_xml_generator_ = listener; + if (listener != NULL) + Append(listener); + } +} + +// Controls whether events will be forwarded by the repeater to the +// listeners in the list. +bool TestEventListeners::EventForwardingEnabled() const { + return repeater_->forwarding_enabled(); +} + +void TestEventListeners::SuppressEventForwarding() { + repeater_->set_forwarding_enabled(false); +} + +// class UnitTest + +// Gets the singleton UnitTest object. The first time this method is +// called, a UnitTest object is constructed and returned. Consecutive +// calls will return the same object. +// +// We don't protect this under mutex_ as a user is not supposed to +// call this before main() starts, from which point on the return +// value will never change. +UnitTest * UnitTest::GetInstance() { + // When compiled with MSVC 7.1 in optimized mode, destroying the + // UnitTest object upon exiting the program messes up the exit code, + // causing successful tests to appear failed. We have to use a + // different implementation in this case to bypass the compiler bug. + // This implementation makes the compiler happy, at the cost of + // leaking the UnitTest object. + + // CodeGear C++Builder insists on a public destructor for the + // default implementation. Use this implementation to keep good OO + // design with private destructor. + +#if (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__) + static UnitTest* const instance = new UnitTest; + return instance; +#else + static UnitTest instance; + return &instance; +#endif // (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__) +} + +// Gets the number of successful test cases. +int UnitTest::successful_test_case_count() const { + return impl()->successful_test_case_count(); +} + +// Gets the number of failed test cases. +int UnitTest::failed_test_case_count() const { + return impl()->failed_test_case_count(); +} + +// Gets the number of all test cases. +int UnitTest::total_test_case_count() const { + return impl()->total_test_case_count(); +} + +// Gets the number of all test cases that contain at least one test +// that should run. +int UnitTest::test_case_to_run_count() const { + return impl()->test_case_to_run_count(); +} + +// Gets the number of successful tests. +int UnitTest::successful_test_count() const { + return impl()->successful_test_count(); +} + +// Gets the number of failed tests. +int UnitTest::failed_test_count() const { return impl()->failed_test_count(); } + +// Gets the number of disabled tests. +int UnitTest::disabled_test_count() const { + return impl()->disabled_test_count(); +} + +// Gets the number of all tests. +int UnitTest::total_test_count() const { return impl()->total_test_count(); } + +// Gets the number of tests that should run. +int UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); } + +// Gets the elapsed time, in milliseconds. +internal::TimeInMillis UnitTest::elapsed_time() const { + return impl()->elapsed_time(); +} + +// Returns true iff the unit test passed (i.e. all test cases passed). +bool UnitTest::Passed() const { return impl()->Passed(); } + +// Returns true iff the unit test failed (i.e. some test case failed +// or something outside of all tests failed). +bool UnitTest::Failed() const { return impl()->Failed(); } + +// Gets the i-th test case among all the test cases. i can range from 0 to +// total_test_case_count() - 1. If i is not in that range, returns NULL. +const TestCase* UnitTest::GetTestCase(int i) const { + return impl()->GetTestCase(i); +} + +// Gets the i-th test case among all the test cases. i can range from 0 to +// total_test_case_count() - 1. If i is not in that range, returns NULL. +TestCase* UnitTest::GetMutableTestCase(int i) { + return impl()->GetMutableTestCase(i); +} + +// Returns the list of event listeners that can be used to track events +// inside Google Test. +TestEventListeners& UnitTest::listeners() { + return *impl()->listeners(); +} + +// Registers and returns a global test environment. When a test +// program is run, all global test environments will be set-up in the +// order they were registered. After all tests in the program have +// finished, all global test environments will be torn-down in the +// *reverse* order they were registered. +// +// The UnitTest object takes ownership of the given environment. +// +// We don't protect this under mutex_, as we only support calling it +// from the main thread. +Environment* UnitTest::AddEnvironment(Environment* env) { + if (env == NULL) { + return NULL; + } + + impl_->environments().push_back(env); + return env; +} + +// Adds a TestPartResult to the current TestResult object. All Google Test +// assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call +// this to report their results. The user code should use the +// assertion macros instead of calling this directly. +// L < mutex_ +void UnitTest::AddTestPartResult(TestPartResult::Type result_type, + const char* file_name, + int line_number, + const internal::String& message, + const internal::String& os_stack_trace) { + Message msg; + msg << message; + + internal::MutexLock lock(&mutex_); + if (impl_->gtest_trace_stack().size() > 0) { + msg << "\n" << GTEST_NAME_ << " trace:"; + + for (int i = static_cast(impl_->gtest_trace_stack().size()); + i > 0; --i) { + const internal::TraceInfo& trace = impl_->gtest_trace_stack()[i - 1]; + msg << "\n" << internal::FormatFileLocation(trace.file, trace.line) + << " " << trace.message; + } + } + + if (os_stack_trace.c_str() != NULL && !os_stack_trace.empty()) { + msg << internal::kStackTraceMarker << os_stack_trace; + } + + const TestPartResult result = + TestPartResult(result_type, file_name, line_number, + msg.GetString().c_str()); + impl_->GetTestPartResultReporterForCurrentThread()-> + ReportTestPartResult(result); + + if (result_type != TestPartResult::kSuccess) { + // gtest_break_on_failure takes precedence over + // gtest_throw_on_failure. This allows a user to set the latter + // in the code (perhaps in order to use Google Test assertions + // with another testing framework) and specify the former on the + // command line for debugging. + if (GTEST_FLAG(break_on_failure)) { +#if GTEST_OS_WINDOWS + // Using DebugBreak on Windows allows gtest to still break into a debugger + // when a failure happens and both the --gtest_break_on_failure and + // the --gtest_catch_exceptions flags are specified. + DebugBreak(); +#else + // Dereference NULL through a volatile pointer to prevent the compiler + // from removing. We use this rather than abort() or __builtin_trap() for + // portability: Symbian doesn't implement abort() well, and some debuggers + // don't correctly trap abort(). + *static_cast(NULL) = 1; +#endif // GTEST_OS_WINDOWS + } else if (GTEST_FLAG(throw_on_failure)) { +#if GTEST_HAS_EXCEPTIONS + throw GoogleTestFailureException(result); +#else + // We cannot call abort() as it generates a pop-up in debug mode + // that cannot be suppressed in VC 7.1 or below. + exit(1); +#endif + } + } +} + +// Creates and adds a property to the current TestResult. If a property matching +// the supplied value already exists, updates its value instead. +void UnitTest::RecordPropertyForCurrentTest(const char* key, + const char* value) { + const TestProperty test_property(key, value); + impl_->current_test_result()->RecordProperty(test_property); +} + +// Runs all tests in this UnitTest object and prints the result. +// Returns 0 if successful, or 1 otherwise. +// +// We don't protect this under mutex_, as we only support calling it +// from the main thread. +int UnitTest::Run() { + // Captures the value of GTEST_FLAG(catch_exceptions). This value will be + // used for the duration of the program. + impl()->set_catch_exceptions(GTEST_FLAG(catch_exceptions)); + +#if GTEST_HAS_SEH + const bool in_death_test_child_process = + internal::GTEST_FLAG(internal_run_death_test).length() > 0; + + // Either the user wants Google Test to catch exceptions thrown by the + // tests or this is executing in the context of death test child + // process. In either case the user does not want to see pop-up dialogs + // about crashes - they are expected. + if (impl()->catch_exceptions() || in_death_test_child_process) { + +# if !GTEST_OS_WINDOWS_MOBILE + // SetErrorMode doesn't exist on CE. + SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT | + SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX); +# endif // !GTEST_OS_WINDOWS_MOBILE + +# if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE + // Death test children can be terminated with _abort(). On Windows, + // _abort() can show a dialog with a warning message. This forces the + // abort message to go to stderr instead. + _set_error_mode(_OUT_TO_STDERR); +# endif + +# if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE + // In the debug version, Visual Studio pops up a separate dialog + // offering a choice to debug the aborted program. We need to suppress + // this dialog or it will pop up for every EXPECT/ASSERT_DEATH statement + // executed. Google Test will notify the user of any unexpected + // failure via stderr. + // + // VC++ doesn't define _set_abort_behavior() prior to the version 8.0. + // Users of prior VC versions shall suffer the agony and pain of + // clicking through the countless debug dialogs. + // TODO(vladl@google.com): find a way to suppress the abort dialog() in the + // debug mode when compiled with VC 7.1 or lower. + if (!GTEST_FLAG(break_on_failure)) + _set_abort_behavior( + 0x0, // Clear the following flags: + _WRITE_ABORT_MSG | _CALL_REPORTFAULT); // pop-up window, core dump. +# endif + + } +#endif // GTEST_HAS_SEH + + return internal::HandleExceptionsInMethodIfSupported( + impl(), + &internal::UnitTestImpl::RunAllTests, + "auxiliary test code (environments or event listeners)") ? 0 : 1; +} + +// Returns the working directory when the first TEST() or TEST_F() was +// executed. +const char* UnitTest::original_working_dir() const { + return impl_->original_working_dir_.c_str(); +} + +// Returns the TestCase object for the test that's currently running, +// or NULL if no test is running. +// L < mutex_ +const TestCase* UnitTest::current_test_case() const { + internal::MutexLock lock(&mutex_); + return impl_->current_test_case(); +} + +// Returns the TestInfo object for the test that's currently running, +// or NULL if no test is running. +// L < mutex_ +const TestInfo* UnitTest::current_test_info() const { + internal::MutexLock lock(&mutex_); + return impl_->current_test_info(); +} + +// Returns the random seed used at the start of the current test run. +int UnitTest::random_seed() const { return impl_->random_seed(); } + +#if GTEST_HAS_PARAM_TEST +// Returns ParameterizedTestCaseRegistry object used to keep track of +// value-parameterized tests and instantiate and register them. +// L < mutex_ +internal::ParameterizedTestCaseRegistry& + UnitTest::parameterized_test_registry() { + return impl_->parameterized_test_registry(); +} +#endif // GTEST_HAS_PARAM_TEST + +// Creates an empty UnitTest. +UnitTest::UnitTest() { + impl_ = new internal::UnitTestImpl(this); +} + +// Destructor of UnitTest. +UnitTest::~UnitTest() { + delete impl_; +} + +// Pushes a trace defined by SCOPED_TRACE() on to the per-thread +// Google Test trace stack. +// L < mutex_ +void UnitTest::PushGTestTrace(const internal::TraceInfo& trace) { + internal::MutexLock lock(&mutex_); + impl_->gtest_trace_stack().push_back(trace); +} + +// Pops a trace from the per-thread Google Test trace stack. +// L < mutex_ +void UnitTest::PopGTestTrace() { + internal::MutexLock lock(&mutex_); + impl_->gtest_trace_stack().pop_back(); +} + +namespace internal { + +UnitTestImpl::UnitTestImpl(UnitTest* parent) + : parent_(parent), +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4355) // Temporarily disables warning 4355 + // (using this in initializer). + default_global_test_part_result_reporter_(this), + default_per_thread_test_part_result_reporter_(this), +# pragma warning(pop) // Restores the warning state again. +#else + default_global_test_part_result_reporter_(this), + default_per_thread_test_part_result_reporter_(this), +#endif // _MSC_VER + global_test_part_result_repoter_( + &default_global_test_part_result_reporter_), + per_thread_test_part_result_reporter_( + &default_per_thread_test_part_result_reporter_), +#if GTEST_HAS_PARAM_TEST + parameterized_test_registry_(), + parameterized_tests_registered_(false), +#endif // GTEST_HAS_PARAM_TEST + last_death_test_case_(-1), + current_test_case_(NULL), + current_test_info_(NULL), + ad_hoc_test_result_(), + os_stack_trace_getter_(NULL), + post_flag_parse_init_performed_(false), + random_seed_(0), // Will be overridden by the flag before first use. + random_(0), // Will be reseeded before first use. + elapsed_time_(0), +#if GTEST_HAS_DEATH_TEST + internal_run_death_test_flag_(NULL), + death_test_factory_(new DefaultDeathTestFactory), +#endif + // Will be overridden by the flag before first use. + catch_exceptions_(false) { + listeners()->SetDefaultResultPrinter(new PrettyUnitTestResultPrinter); +} + +UnitTestImpl::~UnitTestImpl() { + // Deletes every TestCase. + ForEach(test_cases_, internal::Delete); + + // Deletes every Environment. + ForEach(environments_, internal::Delete); + + delete os_stack_trace_getter_; +} + +#if GTEST_HAS_DEATH_TEST +// Disables event forwarding if the control is currently in a death test +// subprocess. Must not be called before InitGoogleTest. +void UnitTestImpl::SuppressTestEventsIfInSubprocess() { + if (internal_run_death_test_flag_.get() != NULL) + listeners()->SuppressEventForwarding(); +} +#endif // GTEST_HAS_DEATH_TEST + +// Initializes event listeners performing XML output as specified by +// UnitTestOptions. Must not be called before InitGoogleTest. +void UnitTestImpl::ConfigureXmlOutput() { + const String& output_format = UnitTestOptions::GetOutputFormat(); + if (output_format == "xml") { + listeners()->SetDefaultXmlGenerator(new XmlUnitTestResultPrinter( + UnitTestOptions::GetAbsolutePathToOutputFile().c_str())); + } else if (output_format != "") { + printf("WARNING: unrecognized output format \"%s\" ignored.\n", + output_format.c_str()); + fflush(stdout); + } +} + +#if GTEST_CAN_STREAM_RESULTS_ +// Initializes event listeners for streaming test results in String form. +// Must not be called before InitGoogleTest. +void UnitTestImpl::ConfigureStreamingOutput() { + const string& target = GTEST_FLAG(stream_result_to); + if (!target.empty()) { + const size_t pos = target.find(':'); + if (pos != string::npos) { + listeners()->Append(new StreamingListener(target.substr(0, pos), + target.substr(pos+1))); + } else { + printf("WARNING: unrecognized streaming target \"%s\" ignored.\n", + target.c_str()); + fflush(stdout); + } + } +} +#endif // GTEST_CAN_STREAM_RESULTS_ + +// Performs initialization dependent upon flag values obtained in +// ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to +// ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest +// this function is also called from RunAllTests. Since this function can be +// called more than once, it has to be idempotent. +void UnitTestImpl::PostFlagParsingInit() { + // Ensures that this function does not execute more than once. + if (!post_flag_parse_init_performed_) { + post_flag_parse_init_performed_ = true; + +#if GTEST_HAS_DEATH_TEST + InitDeathTestSubprocessControlInfo(); + SuppressTestEventsIfInSubprocess(); +#endif // GTEST_HAS_DEATH_TEST + + // Registers parameterized tests. This makes parameterized tests + // available to the UnitTest reflection API without running + // RUN_ALL_TESTS. + RegisterParameterizedTests(); + + // Configures listeners for XML output. This makes it possible for users + // to shut down the default XML output before invoking RUN_ALL_TESTS. + ConfigureXmlOutput(); + +#if GTEST_CAN_STREAM_RESULTS_ + // Configures listeners for streaming test results to the specified server. + ConfigureStreamingOutput(); +#endif // GTEST_CAN_STREAM_RESULTS_ + } +} + +// A predicate that checks the name of a TestCase against a known +// value. +// +// This is used for implementation of the UnitTest class only. We put +// it in the anonymous namespace to prevent polluting the outer +// namespace. +// +// TestCaseNameIs is copyable. +class TestCaseNameIs { + public: + // Constructor. + explicit TestCaseNameIs(const String& name) + : name_(name) {} + + // Returns true iff the name of test_case matches name_. + bool operator()(const TestCase* test_case) const { + return test_case != NULL && strcmp(test_case->name(), name_.c_str()) == 0; + } + + private: + String name_; +}; + +// Finds and returns a TestCase with the given name. If one doesn't +// exist, creates one and returns it. It's the CALLER'S +// RESPONSIBILITY to ensure that this function is only called WHEN THE +// TESTS ARE NOT SHUFFLED. +// +// Arguments: +// +// test_case_name: name of the test case +// type_param: the name of the test case's type parameter, or NULL if +// this is not a typed or a type-parameterized test case. +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +TestCase* UnitTestImpl::GetTestCase(const char* test_case_name, + const char* type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc) { + // Can we find a TestCase with the given name? + const std::vector::const_iterator test_case = + std::find_if(test_cases_.begin(), test_cases_.end(), + TestCaseNameIs(test_case_name)); + + if (test_case != test_cases_.end()) + return *test_case; + + // No. Let's create one. + TestCase* const new_test_case = + new TestCase(test_case_name, type_param, set_up_tc, tear_down_tc); + + // Is this a death test case? + if (internal::UnitTestOptions::MatchesFilter(String(test_case_name), + kDeathTestCaseFilter)) { + // Yes. Inserts the test case after the last death test case + // defined so far. This only works when the test cases haven't + // been shuffled. Otherwise we may end up running a death test + // after a non-death test. + ++last_death_test_case_; + test_cases_.insert(test_cases_.begin() + last_death_test_case_, + new_test_case); + } else { + // No. Appends to the end of the list. + test_cases_.push_back(new_test_case); + } + + test_case_indices_.push_back(static_cast(test_case_indices_.size())); + return new_test_case; +} + +// Helpers for setting up / tearing down the given environment. They +// are for use in the ForEach() function. +static void SetUpEnvironment(Environment* env) { env->SetUp(); } +static void TearDownEnvironment(Environment* env) { env->TearDown(); } + +// Runs all tests in this UnitTest object, prints the result, and +// returns true if all tests are successful. If any exception is +// thrown during a test, the test is considered to be failed, but the +// rest of the tests will still be run. +// +// When parameterized tests are enabled, it expands and registers +// parameterized tests first in RegisterParameterizedTests(). +// All other functions called from RunAllTests() may safely assume that +// parameterized tests are ready to be counted and run. +bool UnitTestImpl::RunAllTests() { + // Makes sure InitGoogleTest() was called. + if (!GTestIsInitialized()) { + printf("%s", + "\nThis test program did NOT call ::testing::InitGoogleTest " + "before calling RUN_ALL_TESTS(). Please fix it.\n"); + return false; + } + + // Do not run any test if the --help flag was specified. + if (g_help_flag) + return true; + + // Repeats the call to the post-flag parsing initialization in case the + // user didn't call InitGoogleTest. + PostFlagParsingInit(); + + // Even if sharding is not on, test runners may want to use the + // GTEST_SHARD_STATUS_FILE to query whether the test supports the sharding + // protocol. + internal::WriteToShardStatusFileIfNeeded(); + + // True iff we are in a subprocess for running a thread-safe-style + // death test. + bool in_subprocess_for_death_test = false; + +#if GTEST_HAS_DEATH_TEST + in_subprocess_for_death_test = (internal_run_death_test_flag_.get() != NULL); +#endif // GTEST_HAS_DEATH_TEST + + const bool should_shard = ShouldShard(kTestTotalShards, kTestShardIndex, + in_subprocess_for_death_test); + + // Compares the full test names with the filter to decide which + // tests to run. + const bool has_tests_to_run = FilterTests(should_shard + ? HONOR_SHARDING_PROTOCOL + : IGNORE_SHARDING_PROTOCOL) > 0; + + // Lists the tests and exits if the --gtest_list_tests flag was specified. + if (GTEST_FLAG(list_tests)) { + // This must be called *after* FilterTests() has been called. + ListTestsMatchingFilter(); + return true; + } + + random_seed_ = GTEST_FLAG(shuffle) ? + GetRandomSeedFromFlag(GTEST_FLAG(random_seed)) : 0; + + // True iff at least one test has failed. + bool failed = false; + + TestEventListener* repeater = listeners()->repeater(); + + repeater->OnTestProgramStart(*parent_); + + // How many times to repeat the tests? We don't want to repeat them + // when we are inside the subprocess of a death test. + const int repeat = in_subprocess_for_death_test ? 1 : GTEST_FLAG(repeat); + // Repeats forever if the repeat count is negative. + const bool forever = repeat < 0; + for (int i = 0; forever || i != repeat; i++) { + // We want to preserve failures generated by ad-hoc test + // assertions executed before RUN_ALL_TESTS(). + ClearNonAdHocTestResult(); + + const TimeInMillis start = GetTimeInMillis(); + + // Shuffles test cases and tests if requested. + if (has_tests_to_run && GTEST_FLAG(shuffle)) { + random()->Reseed(random_seed_); + // This should be done before calling OnTestIterationStart(), + // such that a test event listener can see the actual test order + // in the event. + ShuffleTests(); + } + + // Tells the unit test event listeners that the tests are about to start. + repeater->OnTestIterationStart(*parent_, i); + + // Runs each test case if there is at least one test to run. + if (has_tests_to_run) { + // Sets up all environments beforehand. + repeater->OnEnvironmentsSetUpStart(*parent_); + ForEach(environments_, SetUpEnvironment); + repeater->OnEnvironmentsSetUpEnd(*parent_); + + // Runs the tests only if there was no fatal failure during global + // set-up. + if (!Test::HasFatalFailure()) { + for (int test_index = 0; test_index < total_test_case_count(); + test_index++) { + GetMutableTestCase(test_index)->Run(); + } + } + + // Tears down all environments in reverse order afterwards. + repeater->OnEnvironmentsTearDownStart(*parent_); + std::for_each(environments_.rbegin(), environments_.rend(), + TearDownEnvironment); + repeater->OnEnvironmentsTearDownEnd(*parent_); + } + + elapsed_time_ = GetTimeInMillis() - start; + + // Tells the unit test event listener that the tests have just finished. + repeater->OnTestIterationEnd(*parent_, i); + + // Gets the result and clears it. + if (!Passed()) { + failed = true; + } + + // Restores the original test order after the iteration. This + // allows the user to quickly repro a failure that happens in the + // N-th iteration without repeating the first (N - 1) iterations. + // This is not enclosed in "if (GTEST_FLAG(shuffle)) { ... }", in + // case the user somehow changes the value of the flag somewhere + // (it's always safe to unshuffle the tests). + UnshuffleTests(); + + if (GTEST_FLAG(shuffle)) { + // Picks a new random seed for each iteration. + random_seed_ = GetNextRandomSeed(random_seed_); + } + } + + repeater->OnTestProgramEnd(*parent_); + + return !failed; +} + +// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file +// if the variable is present. If a file already exists at this location, this +// function will write over it. If the variable is present, but the file cannot +// be created, prints an error and exits. +void WriteToShardStatusFileIfNeeded() { + const char* const test_shard_file = posix::GetEnv(kTestShardStatusFile); + if (test_shard_file != NULL) { + FILE* const file = posix::FOpen(test_shard_file, "w"); + if (file == NULL) { + ColoredPrintf(COLOR_RED, + "Could not write to the test shard status file \"%s\" " + "specified by the %s environment variable.\n", + test_shard_file, kTestShardStatusFile); + fflush(stdout); + exit(EXIT_FAILURE); + } + fclose(file); + } +} + +// Checks whether sharding is enabled by examining the relevant +// environment variable values. If the variables are present, +// but inconsistent (i.e., shard_index >= total_shards), prints +// an error and exits. If in_subprocess_for_death_test, sharding is +// disabled because it must only be applied to the original test +// process. Otherwise, we could filter out death tests we intended to execute. +bool ShouldShard(const char* total_shards_env, + const char* shard_index_env, + bool in_subprocess_for_death_test) { + if (in_subprocess_for_death_test) { + return false; + } + + const Int32 total_shards = Int32FromEnvOrDie(total_shards_env, -1); + const Int32 shard_index = Int32FromEnvOrDie(shard_index_env, -1); + + if (total_shards == -1 && shard_index == -1) { + return false; + } else if (total_shards == -1 && shard_index != -1) { + const Message msg = Message() + << "Invalid environment variables: you have " + << kTestShardIndex << " = " << shard_index + << ", but have left " << kTestTotalShards << " unset.\n"; + ColoredPrintf(COLOR_RED, msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } else if (total_shards != -1 && shard_index == -1) { + const Message msg = Message() + << "Invalid environment variables: you have " + << kTestTotalShards << " = " << total_shards + << ", but have left " << kTestShardIndex << " unset.\n"; + ColoredPrintf(COLOR_RED, msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } else if (shard_index < 0 || shard_index >= total_shards) { + const Message msg = Message() + << "Invalid environment variables: we require 0 <= " + << kTestShardIndex << " < " << kTestTotalShards + << ", but you have " << kTestShardIndex << "=" << shard_index + << ", " << kTestTotalShards << "=" << total_shards << ".\n"; + ColoredPrintf(COLOR_RED, msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } + + return total_shards > 1; +} + +// Parses the environment variable var as an Int32. If it is unset, +// returns default_val. If it is not an Int32, prints an error +// and aborts. +Int32 Int32FromEnvOrDie(const char* var, Int32 default_val) { + const char* str_val = posix::GetEnv(var); + if (str_val == NULL) { + return default_val; + } + + Int32 result; + if (!ParseInt32(Message() << "The value of environment variable " << var, + str_val, &result)) { + exit(EXIT_FAILURE); + } + return result; +} + +// Given the total number of shards, the shard index, and the test id, +// returns true iff the test should be run on this shard. The test id is +// some arbitrary but unique non-negative integer assigned to each test +// method. Assumes that 0 <= shard_index < total_shards. +bool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) { + return (test_id % total_shards) == shard_index; +} + +// Compares the name of each test with the user-specified filter to +// decide whether the test should be run, then records the result in +// each TestCase and TestInfo object. +// If shard_tests == true, further filters tests based on sharding +// variables in the environment - see +// http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide. +// Returns the number of tests that should run. +int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) { + const Int32 total_shards = shard_tests == HONOR_SHARDING_PROTOCOL ? + Int32FromEnvOrDie(kTestTotalShards, -1) : -1; + const Int32 shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ? + Int32FromEnvOrDie(kTestShardIndex, -1) : -1; + + // num_runnable_tests are the number of tests that will + // run across all shards (i.e., match filter and are not disabled). + // num_selected_tests are the number of tests to be run on + // this shard. + int num_runnable_tests = 0; + int num_selected_tests = 0; + for (size_t i = 0; i < test_cases_.size(); i++) { + TestCase* const test_case = test_cases_[i]; + const String &test_case_name = test_case->name(); + test_case->set_should_run(false); + + for (size_t j = 0; j < test_case->test_info_list().size(); j++) { + TestInfo* const test_info = test_case->test_info_list()[j]; + const String test_name(test_info->name()); + // A test is disabled if test case name or test name matches + // kDisableTestFilter. + const bool is_disabled = + internal::UnitTestOptions::MatchesFilter(test_case_name, + kDisableTestFilter) || + internal::UnitTestOptions::MatchesFilter(test_name, + kDisableTestFilter); + test_info->is_disabled_ = is_disabled; + + const bool matches_filter = + internal::UnitTestOptions::FilterMatchesTest(test_case_name, + test_name); + test_info->matches_filter_ = matches_filter; + + const bool is_runnable = + (GTEST_FLAG(also_run_disabled_tests) || !is_disabled) && + matches_filter; + + const bool is_selected = is_runnable && + (shard_tests == IGNORE_SHARDING_PROTOCOL || + ShouldRunTestOnShard(total_shards, shard_index, + num_runnable_tests)); + + num_runnable_tests += is_runnable; + num_selected_tests += is_selected; + + test_info->should_run_ = is_selected; + test_case->set_should_run(test_case->should_run() || is_selected); + } + } + return num_selected_tests; +} + +// Prints the names of the tests matching the user-specified filter flag. +void UnitTestImpl::ListTestsMatchingFilter() { + for (size_t i = 0; i < test_cases_.size(); i++) { + const TestCase* const test_case = test_cases_[i]; + bool printed_test_case_name = false; + + for (size_t j = 0; j < test_case->test_info_list().size(); j++) { + const TestInfo* const test_info = + test_case->test_info_list()[j]; + if (test_info->matches_filter_) { + if (!printed_test_case_name) { + printed_test_case_name = true; + printf("%s.\n", test_case->name()); + } + printf(" %s\n", test_info->name()); + } + } + } + fflush(stdout); +} + +// Sets the OS stack trace getter. +// +// Does nothing if the input and the current OS stack trace getter are +// the same; otherwise, deletes the old getter and makes the input the +// current getter. +void UnitTestImpl::set_os_stack_trace_getter( + OsStackTraceGetterInterface* getter) { + if (os_stack_trace_getter_ != getter) { + delete os_stack_trace_getter_; + os_stack_trace_getter_ = getter; + } +} + +// Returns the current OS stack trace getter if it is not NULL; +// otherwise, creates an OsStackTraceGetter, makes it the current +// getter, and returns it. +OsStackTraceGetterInterface* UnitTestImpl::os_stack_trace_getter() { + if (os_stack_trace_getter_ == NULL) { + os_stack_trace_getter_ = new OsStackTraceGetter; + } + + return os_stack_trace_getter_; +} + +// Returns the TestResult for the test that's currently running, or +// the TestResult for the ad hoc test if no test is running. +TestResult* UnitTestImpl::current_test_result() { + return current_test_info_ ? + &(current_test_info_->result_) : &ad_hoc_test_result_; +} + +// Shuffles all test cases, and the tests within each test case, +// making sure that death tests are still run first. +void UnitTestImpl::ShuffleTests() { + // Shuffles the death test cases. + ShuffleRange(random(), 0, last_death_test_case_ + 1, &test_case_indices_); + + // Shuffles the non-death test cases. + ShuffleRange(random(), last_death_test_case_ + 1, + static_cast(test_cases_.size()), &test_case_indices_); + + // Shuffles the tests inside each test case. + for (size_t i = 0; i < test_cases_.size(); i++) { + test_cases_[i]->ShuffleTests(random()); + } +} + +// Restores the test cases and tests to their order before the first shuffle. +void UnitTestImpl::UnshuffleTests() { + for (size_t i = 0; i < test_cases_.size(); i++) { + // Unshuffles the tests in each test case. + test_cases_[i]->UnshuffleTests(); + // Resets the index of each test case. + test_case_indices_[i] = static_cast(i); + } +} + +// Returns the current OS stack trace as a String. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in +// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. +String GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/, + int skip_count) { + // We pass skip_count + 1 to skip this wrapper function in addition + // to what the user really wants to skip. + return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1); +} + +// Used by the GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_ macro to +// suppress unreachable code warnings. +namespace { +class ClassUniqueToAlwaysTrue {}; +} + +bool IsTrue(bool condition) { return condition; } + +bool AlwaysTrue() { +#if GTEST_HAS_EXCEPTIONS + // This condition is always false so AlwaysTrue() never actually throws, + // but it makes the compiler think that it may throw. + if (IsTrue(false)) + throw ClassUniqueToAlwaysTrue(); +#endif // GTEST_HAS_EXCEPTIONS + return true; +} + +// If *pstr starts with the given prefix, modifies *pstr to be right +// past the prefix and returns true; otherwise leaves *pstr unchanged +// and returns false. None of pstr, *pstr, and prefix can be NULL. +bool SkipPrefix(const char* prefix, const char** pstr) { + const size_t prefix_len = strlen(prefix); + if (strncmp(*pstr, prefix, prefix_len) == 0) { + *pstr += prefix_len; + return true; + } + return false; +} + +// Parses a string as a command line flag. The string should have +// the format "--flag=value". When def_optional is true, the "=value" +// part can be omitted. +// +// Returns the value of the flag, or NULL if the parsing failed. +const char* ParseFlagValue(const char* str, + const char* flag, + bool def_optional) { + // str and flag must not be NULL. + if (str == NULL || flag == NULL) return NULL; + + // The flag must start with "--" followed by GTEST_FLAG_PREFIX_. + const String flag_str = String::Format("--%s%s", GTEST_FLAG_PREFIX_, flag); + const size_t flag_len = flag_str.length(); + if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL; + + // Skips the flag name. + const char* flag_end = str + flag_len; + + // When def_optional is true, it's OK to not have a "=value" part. + if (def_optional && (flag_end[0] == '\0')) { + return flag_end; + } + + // If def_optional is true and there are more characters after the + // flag name, or if def_optional is false, there must be a '=' after + // the flag name. + if (flag_end[0] != '=') return NULL; + + // Returns the string after "=". + return flag_end + 1; +} + +// Parses a string for a bool flag, in the form of either +// "--flag=value" or "--flag". +// +// In the former case, the value is taken as true as long as it does +// not start with '0', 'f', or 'F'. +// +// In the latter case, the value is taken as true. +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseBoolFlag(const char* str, const char* flag, bool* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, true); + + // Aborts if the parsing failed. + if (value_str == NULL) return false; + + // Converts the string value to a bool. + *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F'); + return true; +} + +// Parses a string for an Int32 flag, in the form of +// "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseInt32Flag(const char* str, const char* flag, Int32* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == NULL) return false; + + // Sets *value to the value of the flag. + return ParseInt32(Message() << "The value of flag --" << flag, + value_str, value); +} + +// Parses a string for a string flag, in the form of +// "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseStringFlag(const char* str, const char* flag, String* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == NULL) return false; + + // Sets *value to the value of the flag. + *value = value_str; + return true; +} + +// Determines whether a string has a prefix that Google Test uses for its +// flags, i.e., starts with GTEST_FLAG_PREFIX_ or GTEST_FLAG_PREFIX_DASH_. +// If Google Test detects that a command line flag has its prefix but is not +// recognized, it will print its help message. Flags starting with +// GTEST_INTERNAL_PREFIX_ followed by "internal_" are considered Google Test +// internal flags and do not trigger the help message. +static bool HasGoogleTestFlagPrefix(const char* str) { + return (SkipPrefix("--", &str) || + SkipPrefix("-", &str) || + SkipPrefix("/", &str)) && + !SkipPrefix(GTEST_FLAG_PREFIX_ "internal_", &str) && + (SkipPrefix(GTEST_FLAG_PREFIX_, &str) || + SkipPrefix(GTEST_FLAG_PREFIX_DASH_, &str)); +} + +// Prints a string containing code-encoded text. The following escape +// sequences can be used in the string to control the text color: +// +// @@ prints a single '@' character. +// @R changes the color to red. +// @G changes the color to green. +// @Y changes the color to yellow. +// @D changes to the default terminal text color. +// +// TODO(wan@google.com): Write tests for this once we add stdout +// capturing to Google Test. +static void PrintColorEncoded(const char* str) { + GTestColor color = COLOR_DEFAULT; // The current color. + + // Conceptually, we split the string into segments divided by escape + // sequences. Then we print one segment at a time. At the end of + // each iteration, the str pointer advances to the beginning of the + // next segment. + for (;;) { + const char* p = strchr(str, '@'); + if (p == NULL) { + ColoredPrintf(color, "%s", str); + return; + } + + ColoredPrintf(color, "%s", String(str, p - str).c_str()); + + const char ch = p[1]; + str = p + 2; + if (ch == '@') { + ColoredPrintf(color, "@"); + } else if (ch == 'D') { + color = COLOR_DEFAULT; + } else if (ch == 'R') { + color = COLOR_RED; + } else if (ch == 'G') { + color = COLOR_GREEN; + } else if (ch == 'Y') { + color = COLOR_YELLOW; + } else { + --str; + } + } +} + +static const char kColorEncodedHelpMessage[] = +"This program contains tests written using " GTEST_NAME_ ". You can use the\n" +"following command line flags to control its behavior:\n" +"\n" +"Test Selection:\n" +" @G--" GTEST_FLAG_PREFIX_ "list_tests@D\n" +" List the names of all tests instead of running them. The name of\n" +" TEST(Foo, Bar) is \"Foo.Bar\".\n" +" @G--" GTEST_FLAG_PREFIX_ "filter=@YPOSTIVE_PATTERNS" + "[@G-@YNEGATIVE_PATTERNS]@D\n" +" Run only the tests whose name matches one of the positive patterns but\n" +" none of the negative patterns. '?' matches any single character; '*'\n" +" matches any substring; ':' separates two patterns.\n" +" @G--" GTEST_FLAG_PREFIX_ "also_run_disabled_tests@D\n" +" Run all disabled tests too.\n" +"\n" +"Test Execution:\n" +" @G--" GTEST_FLAG_PREFIX_ "repeat=@Y[COUNT]@D\n" +" Run the tests repeatedly; use a negative count to repeat forever.\n" +" @G--" GTEST_FLAG_PREFIX_ "shuffle@D\n" +" Randomize tests' orders on every iteration.\n" +" @G--" GTEST_FLAG_PREFIX_ "random_seed=@Y[NUMBER]@D\n" +" Random number seed to use for shuffling test orders (between 1 and\n" +" 99999, or 0 to use a seed based on the current time).\n" +"\n" +"Test Output:\n" +" @G--" GTEST_FLAG_PREFIX_ "color=@Y(@Gyes@Y|@Gno@Y|@Gauto@Y)@D\n" +" Enable/disable colored output. The default is @Gauto@D.\n" +" -@G-" GTEST_FLAG_PREFIX_ "print_time=0@D\n" +" Don't print the elapsed time of each test.\n" +" @G--" GTEST_FLAG_PREFIX_ "output=xml@Y[@G:@YDIRECTORY_PATH@G" + GTEST_PATH_SEP_ "@Y|@G:@YFILE_PATH]@D\n" +" Generate an XML report in the given directory or with the given file\n" +" name. @YFILE_PATH@D defaults to @Gtest_details.xml@D.\n" +#if GTEST_CAN_STREAM_RESULTS_ +" @G--" GTEST_FLAG_PREFIX_ "stream_result_to=@YHOST@G:@YPORT@D\n" +" Stream test results to the given server.\n" +#endif // GTEST_CAN_STREAM_RESULTS_ +"\n" +"Assertion Behavior:\n" +#if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS +" @G--" GTEST_FLAG_PREFIX_ "death_test_style=@Y(@Gfast@Y|@Gthreadsafe@Y)@D\n" +" Set the default death test style.\n" +#endif // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS +" @G--" GTEST_FLAG_PREFIX_ "break_on_failure@D\n" +" Turn assertion failures into debugger break-points.\n" +" @G--" GTEST_FLAG_PREFIX_ "throw_on_failure@D\n" +" Turn assertion failures into C++ exceptions.\n" +" @G--" GTEST_FLAG_PREFIX_ "catch_exceptions=0@D\n" +" Do not report exceptions as test failures. Instead, allow them\n" +" to crash the program or throw a pop-up (on Windows).\n" +"\n" +"Except for @G--" GTEST_FLAG_PREFIX_ "list_tests@D, you can alternatively set " + "the corresponding\n" +"environment variable of a flag (all letters in upper-case). For example, to\n" +"disable colored text output, you can either specify @G--" GTEST_FLAG_PREFIX_ + "color=no@D or set\n" +"the @G" GTEST_FLAG_PREFIX_UPPER_ "COLOR@D environment variable to @Gno@D.\n" +"\n" +"For more information, please read the " GTEST_NAME_ " documentation at\n" +"@G" GTEST_PROJECT_URL_ "@D. If you find a bug in " GTEST_NAME_ "\n" +"(not one in your own code or tests), please report it to\n" +"@G<" GTEST_DEV_EMAIL_ ">@D.\n"; + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. The type parameter CharType can be +// instantiated to either char or wchar_t. +template +void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) { + for (int i = 1; i < *argc; i++) { + const String arg_string = StreamableToString(argv[i]); + const char* const arg = arg_string.c_str(); + + using internal::ParseBoolFlag; + using internal::ParseInt32Flag; + using internal::ParseStringFlag; + + // Do we see a Google Test flag? + if (ParseBoolFlag(arg, kAlsoRunDisabledTestsFlag, + >EST_FLAG(also_run_disabled_tests)) || + ParseBoolFlag(arg, kBreakOnFailureFlag, + >EST_FLAG(break_on_failure)) || + ParseBoolFlag(arg, kCatchExceptionsFlag, + >EST_FLAG(catch_exceptions)) || + ParseStringFlag(arg, kColorFlag, >EST_FLAG(color)) || + ParseStringFlag(arg, kDeathTestStyleFlag, + >EST_FLAG(death_test_style)) || + ParseBoolFlag(arg, kDeathTestUseFork, + >EST_FLAG(death_test_use_fork)) || + ParseStringFlag(arg, kFilterFlag, >EST_FLAG(filter)) || + ParseStringFlag(arg, kInternalRunDeathTestFlag, + >EST_FLAG(internal_run_death_test)) || + ParseBoolFlag(arg, kListTestsFlag, >EST_FLAG(list_tests)) || + ParseStringFlag(arg, kOutputFlag, >EST_FLAG(output)) || + ParseBoolFlag(arg, kPrintTimeFlag, >EST_FLAG(print_time)) || + ParseInt32Flag(arg, kRandomSeedFlag, >EST_FLAG(random_seed)) || + ParseInt32Flag(arg, kRepeatFlag, >EST_FLAG(repeat)) || + ParseBoolFlag(arg, kShuffleFlag, >EST_FLAG(shuffle)) || + ParseInt32Flag(arg, kStackTraceDepthFlag, + >EST_FLAG(stack_trace_depth)) || + ParseStringFlag(arg, kStreamResultToFlag, + >EST_FLAG(stream_result_to)) || + ParseBoolFlag(arg, kThrowOnFailureFlag, + >EST_FLAG(throw_on_failure)) + ) { + // Yes. Shift the remainder of the argv list left by one. Note + // that argv has (*argc + 1) elements, the last one always being + // NULL. The following loop moves the trailing NULL element as + // well. + for (int j = i; j != *argc; j++) { + argv[j] = argv[j + 1]; + } + + // Decrements the argument count. + (*argc)--; + + // We also need to decrement the iterator as we just removed + // an element. + i--; + } else if (arg_string == "--help" || arg_string == "-h" || + arg_string == "-?" || arg_string == "/?" || + HasGoogleTestFlagPrefix(arg)) { + // Both help flag and unrecognized Google Test flags (excluding + // internal ones) trigger help display. + g_help_flag = true; + } + } + + if (g_help_flag) { + // We print the help here instead of in RUN_ALL_TESTS(), as the + // latter may not be called at all if the user is using Google + // Test with another testing framework. + PrintColorEncoded(kColorEncodedHelpMessage); + } +} + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. +void ParseGoogleTestFlagsOnly(int* argc, char** argv) { + ParseGoogleTestFlagsOnlyImpl(argc, argv); +} +void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv) { + ParseGoogleTestFlagsOnlyImpl(argc, argv); +} + +// The internal implementation of InitGoogleTest(). +// +// The type parameter CharType can be instantiated to either char or +// wchar_t. +template +void InitGoogleTestImpl(int* argc, CharType** argv) { + g_init_gtest_count++; + + // We don't want to run the initialization code twice. + if (g_init_gtest_count != 1) return; + + if (*argc <= 0) return; + + internal::g_executable_path = internal::StreamableToString(argv[0]); + +#if GTEST_HAS_DEATH_TEST + + g_argvs.clear(); + for (int i = 0; i != *argc; i++) { + g_argvs.push_back(StreamableToString(argv[i])); + } + +#endif // GTEST_HAS_DEATH_TEST + + ParseGoogleTestFlagsOnly(argc, argv); + GetUnitTestImpl()->PostFlagParsingInit(); +} + +} // namespace internal + +// Initializes Google Test. This must be called before calling +// RUN_ALL_TESTS(). In particular, it parses a command line for the +// flags that Google Test recognizes. Whenever a Google Test flag is +// seen, it is removed from argv, and *argc is decremented. +// +// No value is returned. Instead, the Google Test flag variables are +// updated. +// +// Calling the function for the second time has no user-visible effect. +void InitGoogleTest(int* argc, char** argv) { + internal::InitGoogleTestImpl(argc, argv); +} + +// This overloaded version can be used in Windows programs compiled in +// UNICODE mode. +void InitGoogleTest(int* argc, wchar_t** argv) { + internal::InitGoogleTestImpl(argc, argv); +} + +} // namespace testing +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan), vladl@google.com (Vlad Losev) +// +// This file implements death tests. + + +#if GTEST_HAS_DEATH_TEST + +# if GTEST_OS_MAC +# include +# endif // GTEST_OS_MAC + +# include +# include +# include +# include + +# if GTEST_OS_WINDOWS +# include +# else +# include +# include +# endif // GTEST_OS_WINDOWS + +#endif // GTEST_HAS_DEATH_TEST + + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#undef GTEST_IMPLEMENTATION_ + +namespace testing { + +// Constants. + +// The default death test style. +static const char kDefaultDeathTestStyle[] = "fast"; + +GTEST_DEFINE_string_( + death_test_style, + internal::StringFromGTestEnv("death_test_style", kDefaultDeathTestStyle), + "Indicates how to run a death test in a forked child process: " + "\"threadsafe\" (child process re-executes the test binary " + "from the beginning, running only the specific death test) or " + "\"fast\" (child process runs the death test immediately " + "after forking)."); + +GTEST_DEFINE_bool_( + death_test_use_fork, + internal::BoolFromGTestEnv("death_test_use_fork", false), + "Instructs to use fork()/_exit() instead of clone() in death tests. " + "Ignored and always uses fork() on POSIX systems where clone() is not " + "implemented. Useful when running under valgrind or similar tools if " + "those do not support clone(). Valgrind 3.3.1 will just fail if " + "it sees an unsupported combination of clone() flags. " + "It is not recommended to use this flag w/o valgrind though it will " + "work in 99% of the cases. Once valgrind is fixed, this flag will " + "most likely be removed."); + +namespace internal { +GTEST_DEFINE_string_( + internal_run_death_test, "", + "Indicates the file, line number, temporal index of " + "the single death test to run, and a file descriptor to " + "which a success code may be sent, all separated by " + "colons. This flag is specified if and only if the current " + "process is a sub-process launched for running a thread-safe " + "death test. FOR INTERNAL USE ONLY."); +} // namespace internal + +#if GTEST_HAS_DEATH_TEST + +// ExitedWithCode constructor. +ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) { +} + +// ExitedWithCode function-call operator. +bool ExitedWithCode::operator()(int exit_status) const { +# if GTEST_OS_WINDOWS + + return exit_status == exit_code_; + +# else + + return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_; + +# endif // GTEST_OS_WINDOWS +} + +# if !GTEST_OS_WINDOWS +// KilledBySignal constructor. +KilledBySignal::KilledBySignal(int signum) : signum_(signum) { +} + +// KilledBySignal function-call operator. +bool KilledBySignal::operator()(int exit_status) const { + return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_; +} +# endif // !GTEST_OS_WINDOWS + +namespace internal { + +// Utilities needed for death tests. + +// Generates a textual description of a given exit code, in the format +// specified by wait(2). +static String ExitSummary(int exit_code) { + Message m; + +# if GTEST_OS_WINDOWS + + m << "Exited with exit status " << exit_code; + +# else + + if (WIFEXITED(exit_code)) { + m << "Exited with exit status " << WEXITSTATUS(exit_code); + } else if (WIFSIGNALED(exit_code)) { + m << "Terminated by signal " << WTERMSIG(exit_code); + } +# ifdef WCOREDUMP + if (WCOREDUMP(exit_code)) { + m << " (core dumped)"; + } +# endif +# endif // GTEST_OS_WINDOWS + + return m.GetString(); +} + +// Returns true if exit_status describes a process that was terminated +// by a signal, or exited normally with a nonzero exit code. +bool ExitedUnsuccessfully(int exit_status) { + return !ExitedWithCode(0)(exit_status); +} + +# if !GTEST_OS_WINDOWS +// Generates a textual failure message when a death test finds more than +// one thread running, or cannot determine the number of threads, prior +// to executing the given statement. It is the responsibility of the +// caller not to pass a thread_count of 1. +static String DeathTestThreadWarning(size_t thread_count) { + Message msg; + msg << "Death tests use fork(), which is unsafe particularly" + << " in a threaded context. For this test, " << GTEST_NAME_ << " "; + if (thread_count == 0) + msg << "couldn't detect the number of threads."; + else + msg << "detected " << thread_count << " threads."; + return msg.GetString(); +} +# endif // !GTEST_OS_WINDOWS + +// Flag characters for reporting a death test that did not die. +static const char kDeathTestLived = 'L'; +static const char kDeathTestReturned = 'R'; +static const char kDeathTestThrew = 'T'; +static const char kDeathTestInternalError = 'I'; + +// An enumeration describing all of the possible ways that a death test can +// conclude. DIED means that the process died while executing the test +// code; LIVED means that process lived beyond the end of the test code; +// RETURNED means that the test statement attempted to execute a return +// statement, which is not allowed; THREW means that the test statement +// returned control by throwing an exception. IN_PROGRESS means the test +// has not yet concluded. +// TODO(vladl@google.com): Unify names and possibly values for +// AbortReason, DeathTestOutcome, and flag characters above. +enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW }; + +// Routine for aborting the program which is safe to call from an +// exec-style death test child process, in which case the error +// message is propagated back to the parent process. Otherwise, the +// message is simply printed to stderr. In either case, the program +// then exits with status 1. +void DeathTestAbort(const String& message) { + // On a POSIX system, this function may be called from a threadsafe-style + // death test child process, which operates on a very small stack. Use + // the heap for any additional non-minuscule memory requirements. + const InternalRunDeathTestFlag* const flag = + GetUnitTestImpl()->internal_run_death_test_flag(); + if (flag != NULL) { + FILE* parent = posix::FDOpen(flag->write_fd(), "w"); + fputc(kDeathTestInternalError, parent); + fprintf(parent, "%s", message.c_str()); + fflush(parent); + _exit(1); + } else { + fprintf(stderr, "%s", message.c_str()); + fflush(stderr); + posix::Abort(); + } +} + +// A replacement for CHECK that calls DeathTestAbort if the assertion +// fails. +# define GTEST_DEATH_TEST_CHECK_(expression) \ + do { \ + if (!::testing::internal::IsTrue(expression)) { \ + DeathTestAbort(::testing::internal::String::Format( \ + "CHECK failed: File %s, line %d: %s", \ + __FILE__, __LINE__, #expression)); \ + } \ + } while (::testing::internal::AlwaysFalse()) + +// This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for +// evaluating any system call that fulfills two conditions: it must return +// -1 on failure, and set errno to EINTR when it is interrupted and +// should be tried again. The macro expands to a loop that repeatedly +// evaluates the expression as long as it evaluates to -1 and sets +// errno to EINTR. If the expression evaluates to -1 but errno is +// something other than EINTR, DeathTestAbort is called. +# define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \ + do { \ + int gtest_retval; \ + do { \ + gtest_retval = (expression); \ + } while (gtest_retval == -1 && errno == EINTR); \ + if (gtest_retval == -1) { \ + DeathTestAbort(::testing::internal::String::Format( \ + "CHECK failed: File %s, line %d: %s != -1", \ + __FILE__, __LINE__, #expression)); \ + } \ + } while (::testing::internal::AlwaysFalse()) + +// Returns the message describing the last system error in errno. +String GetLastErrnoDescription() { + return String(errno == 0 ? "" : posix::StrError(errno)); +} + +// This is called from a death test parent process to read a failure +// message from the death test child process and log it with the FATAL +// severity. On Windows, the message is read from a pipe handle. On other +// platforms, it is read from a file descriptor. +static void FailFromInternalError(int fd) { + Message error; + char buffer[256]; + int num_read; + + do { + while ((num_read = posix::Read(fd, buffer, 255)) > 0) { + buffer[num_read] = '\0'; + error << buffer; + } + } while (num_read == -1 && errno == EINTR); + + if (num_read == 0) { + GTEST_LOG_(FATAL) << error.GetString(); + } else { + const int last_error = errno; + GTEST_LOG_(FATAL) << "Error while reading death test internal: " + << GetLastErrnoDescription() << " [" << last_error << "]"; + } +} + +// Death test constructor. Increments the running death test count +// for the current test. +DeathTest::DeathTest() { + TestInfo* const info = GetUnitTestImpl()->current_test_info(); + if (info == NULL) { + DeathTestAbort("Cannot run a death test outside of a TEST or " + "TEST_F construct"); + } +} + +// Creates and returns a death test by dispatching to the current +// death test factory. +bool DeathTest::Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test) { + return GetUnitTestImpl()->death_test_factory()->Create( + statement, regex, file, line, test); +} + +const char* DeathTest::LastMessage() { + return last_death_test_message_.c_str(); +} + +void DeathTest::set_last_death_test_message(const String& message) { + last_death_test_message_ = message; +} + +String DeathTest::last_death_test_message_; + +// Provides cross platform implementation for some death functionality. +class DeathTestImpl : public DeathTest { + protected: + DeathTestImpl(const char* a_statement, const RE* a_regex) + : statement_(a_statement), + regex_(a_regex), + spawned_(false), + status_(-1), + outcome_(IN_PROGRESS), + read_fd_(-1), + write_fd_(-1) {} + + // read_fd_ is expected to be closed and cleared by a derived class. + ~DeathTestImpl() { GTEST_DEATH_TEST_CHECK_(read_fd_ == -1); } + + void Abort(AbortReason reason); + virtual bool Passed(bool status_ok); + + const char* statement() const { return statement_; } + const RE* regex() const { return regex_; } + bool spawned() const { return spawned_; } + void set_spawned(bool is_spawned) { spawned_ = is_spawned; } + int status() const { return status_; } + void set_status(int a_status) { status_ = a_status; } + DeathTestOutcome outcome() const { return outcome_; } + void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outcome; } + int read_fd() const { return read_fd_; } + void set_read_fd(int fd) { read_fd_ = fd; } + int write_fd() const { return write_fd_; } + void set_write_fd(int fd) { write_fd_ = fd; } + + // Called in the parent process only. Reads the result code of the death + // test child process via a pipe, interprets it to set the outcome_ + // member, and closes read_fd_. Outputs diagnostics and terminates in + // case of unexpected codes. + void ReadAndInterpretStatusByte(); + + private: + // The textual content of the code this object is testing. This class + // doesn't own this string and should not attempt to delete it. + const char* const statement_; + // The regular expression which test output must match. DeathTestImpl + // doesn't own this object and should not attempt to delete it. + const RE* const regex_; + // True if the death test child process has been successfully spawned. + bool spawned_; + // The exit status of the child process. + int status_; + // How the death test concluded. + DeathTestOutcome outcome_; + // Descriptor to the read end of the pipe to the child process. It is + // always -1 in the child process. The child keeps its write end of the + // pipe in write_fd_. + int read_fd_; + // Descriptor to the child's write end of the pipe to the parent process. + // It is always -1 in the parent process. The parent keeps its end of the + // pipe in read_fd_. + int write_fd_; +}; + +// Called in the parent process only. Reads the result code of the death +// test child process via a pipe, interprets it to set the outcome_ +// member, and closes read_fd_. Outputs diagnostics and terminates in +// case of unexpected codes. +void DeathTestImpl::ReadAndInterpretStatusByte() { + char flag; + int bytes_read; + + // The read() here blocks until data is available (signifying the + // failure of the death test) or until the pipe is closed (signifying + // its success), so it's okay to call this in the parent before + // the child process has exited. + do { + bytes_read = posix::Read(read_fd(), &flag, 1); + } while (bytes_read == -1 && errno == EINTR); + + if (bytes_read == 0) { + set_outcome(DIED); + } else if (bytes_read == 1) { + switch (flag) { + case kDeathTestReturned: + set_outcome(RETURNED); + break; + case kDeathTestThrew: + set_outcome(THREW); + break; + case kDeathTestLived: + set_outcome(LIVED); + break; + case kDeathTestInternalError: + FailFromInternalError(read_fd()); // Does not return. + break; + default: + GTEST_LOG_(FATAL) << "Death test child process reported " + << "unexpected status byte (" + << static_cast(flag) << ")"; + } + } else { + GTEST_LOG_(FATAL) << "Read from death test child process failed: " + << GetLastErrnoDescription(); + } + GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(read_fd())); + set_read_fd(-1); +} + +// Signals that the death test code which should have exited, didn't. +// Should be called only in a death test child process. +// Writes a status byte to the child's status file descriptor, then +// calls _exit(1). +void DeathTestImpl::Abort(AbortReason reason) { + // The parent process considers the death test to be a failure if + // it finds any data in our pipe. So, here we write a single flag byte + // to the pipe, then exit. + const char status_ch = + reason == TEST_DID_NOT_DIE ? kDeathTestLived : + reason == TEST_THREW_EXCEPTION ? kDeathTestThrew : kDeathTestReturned; + + GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Write(write_fd(), &status_ch, 1)); + // We are leaking the descriptor here because on some platforms (i.e., + // when built as Windows DLL), destructors of global objects will still + // run after calling _exit(). On such systems, write_fd_ will be + // indirectly closed from the destructor of UnitTestImpl, causing double + // close if it is also closed here. On debug configurations, double close + // may assert. As there are no in-process buffers to flush here, we are + // relying on the OS to close the descriptor after the process terminates + // when the destructors are not run. + _exit(1); // Exits w/o any normal exit hooks (we were supposed to crash) +} + +// Returns an indented copy of stderr output for a death test. +// This makes distinguishing death test output lines from regular log lines +// much easier. +static ::std::string FormatDeathTestOutput(const ::std::string& output) { + ::std::string ret; + for (size_t at = 0; ; ) { + const size_t line_end = output.find('\n', at); + ret += "[ DEATH ] "; + if (line_end == ::std::string::npos) { + ret += output.substr(at); + break; + } + ret += output.substr(at, line_end + 1 - at); + at = line_end + 1; + } + return ret; +} + +// Assesses the success or failure of a death test, using both private +// members which have previously been set, and one argument: +// +// Private data members: +// outcome: An enumeration describing how the death test +// concluded: DIED, LIVED, THREW, or RETURNED. The death test +// fails in the latter three cases. +// status: The exit status of the child process. On *nix, it is in the +// in the format specified by wait(2). On Windows, this is the +// value supplied to the ExitProcess() API or a numeric code +// of the exception that terminated the program. +// regex: A regular expression object to be applied to +// the test's captured standard error output; the death test +// fails if it does not match. +// +// Argument: +// status_ok: true if exit_status is acceptable in the context of +// this particular death test, which fails if it is false +// +// Returns true iff all of the above conditions are met. Otherwise, the +// first failing condition, in the order given above, is the one that is +// reported. Also sets the last death test message string. +bool DeathTestImpl::Passed(bool status_ok) { + if (!spawned()) + return false; + + const String error_message = GetCapturedStderr(); + + bool success = false; + Message buffer; + + buffer << "Death test: " << statement() << "\n"; + switch (outcome()) { + case LIVED: + buffer << " Result: failed to die.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case THREW: + buffer << " Result: threw an exception.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case RETURNED: + buffer << " Result: illegal return in test statement.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case DIED: + if (status_ok) { + const bool matched = RE::PartialMatch(error_message.c_str(), *regex()); + if (matched) { + success = true; + } else { + buffer << " Result: died but not with expected error.\n" + << " Expected: " << regex()->pattern() << "\n" + << "Actual msg:\n" << FormatDeathTestOutput(error_message); + } + } else { + buffer << " Result: died but not with expected exit code:\n" + << " " << ExitSummary(status()) << "\n" + << "Actual msg:\n" << FormatDeathTestOutput(error_message); + } + break; + case IN_PROGRESS: + default: + GTEST_LOG_(FATAL) + << "DeathTest::Passed somehow called before conclusion of test"; + } + + DeathTest::set_last_death_test_message(buffer.GetString()); + return success; +} + +# if GTEST_OS_WINDOWS +// WindowsDeathTest implements death tests on Windows. Due to the +// specifics of starting new processes on Windows, death tests there are +// always threadsafe, and Google Test considers the +// --gtest_death_test_style=fast setting to be equivalent to +// --gtest_death_test_style=threadsafe there. +// +// A few implementation notes: Like the Linux version, the Windows +// implementation uses pipes for child-to-parent communication. But due to +// the specifics of pipes on Windows, some extra steps are required: +// +// 1. The parent creates a communication pipe and stores handles to both +// ends of it. +// 2. The parent starts the child and provides it with the information +// necessary to acquire the handle to the write end of the pipe. +// 3. The child acquires the write end of the pipe and signals the parent +// using a Windows event. +// 4. Now the parent can release the write end of the pipe on its side. If +// this is done before step 3, the object's reference count goes down to +// 0 and it is destroyed, preventing the child from acquiring it. The +// parent now has to release it, or read operations on the read end of +// the pipe will not return when the child terminates. +// 5. The parent reads child's output through the pipe (outcome code and +// any possible error messages) from the pipe, and its stderr and then +// determines whether to fail the test. +// +// Note: to distinguish Win32 API calls from the local method and function +// calls, the former are explicitly resolved in the global namespace. +// +class WindowsDeathTest : public DeathTestImpl { + public: + WindowsDeathTest(const char* a_statement, + const RE* a_regex, + const char* file, + int line) + : DeathTestImpl(a_statement, a_regex), file_(file), line_(line) {} + + // All of these virtual functions are inherited from DeathTest. + virtual int Wait(); + virtual TestRole AssumeRole(); + + private: + // The name of the file in which the death test is located. + const char* const file_; + // The line number on which the death test is located. + const int line_; + // Handle to the write end of the pipe to the child process. + AutoHandle write_handle_; + // Child process handle. + AutoHandle child_handle_; + // Event the child process uses to signal the parent that it has + // acquired the handle to the write end of the pipe. After seeing this + // event the parent can release its own handles to make sure its + // ReadFile() calls return when the child terminates. + AutoHandle event_handle_; +}; + +// Waits for the child in a death test to exit, returning its exit +// status, or 0 if no child process exists. As a side effect, sets the +// outcome data member. +int WindowsDeathTest::Wait() { + if (!spawned()) + return 0; + + // Wait until the child either signals that it has acquired the write end + // of the pipe or it dies. + const HANDLE wait_handles[2] = { child_handle_.Get(), event_handle_.Get() }; + switch (::WaitForMultipleObjects(2, + wait_handles, + FALSE, // Waits for any of the handles. + INFINITE)) { + case WAIT_OBJECT_0: + case WAIT_OBJECT_0 + 1: + break; + default: + GTEST_DEATH_TEST_CHECK_(false); // Should not get here. + } + + // The child has acquired the write end of the pipe or exited. + // We release the handle on our side and continue. + write_handle_.Reset(); + event_handle_.Reset(); + + ReadAndInterpretStatusByte(); + + // Waits for the child process to exit if it haven't already. This + // returns immediately if the child has already exited, regardless of + // whether previous calls to WaitForMultipleObjects synchronized on this + // handle or not. + GTEST_DEATH_TEST_CHECK_( + WAIT_OBJECT_0 == ::WaitForSingleObject(child_handle_.Get(), + INFINITE)); + DWORD status_code; + GTEST_DEATH_TEST_CHECK_( + ::GetExitCodeProcess(child_handle_.Get(), &status_code) != FALSE); + child_handle_.Reset(); + set_status(static_cast(status_code)); + return status(); +} + +// The AssumeRole process for a Windows death test. It creates a child +// process with the same executable as the current process to run the +// death test. The child process is given the --gtest_filter and +// --gtest_internal_run_death_test flags such that it knows to run the +// current death test only. +DeathTest::TestRole WindowsDeathTest::AssumeRole() { + const UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const TestInfo* const info = impl->current_test_info(); + const int death_test_index = info->result()->death_test_count(); + + if (flag != NULL) { + // ParseInternalRunDeathTestFlag() has performed all the necessary + // processing. + set_write_fd(flag->write_fd()); + return EXECUTE_TEST; + } + + // WindowsDeathTest uses an anonymous pipe to communicate results of + // a death test. + SECURITY_ATTRIBUTES handles_are_inheritable = { + sizeof(SECURITY_ATTRIBUTES), NULL, TRUE }; + HANDLE read_handle, write_handle; + GTEST_DEATH_TEST_CHECK_( + ::CreatePipe(&read_handle, &write_handle, &handles_are_inheritable, + 0) // Default buffer size. + != FALSE); + set_read_fd(::_open_osfhandle(reinterpret_cast(read_handle), + O_RDONLY)); + write_handle_.Reset(write_handle); + event_handle_.Reset(::CreateEvent( + &handles_are_inheritable, + TRUE, // The event will automatically reset to non-signaled state. + FALSE, // The initial state is non-signalled. + NULL)); // The even is unnamed. + GTEST_DEATH_TEST_CHECK_(event_handle_.Get() != NULL); + const String filter_flag = String::Format("--%s%s=%s.%s", + GTEST_FLAG_PREFIX_, kFilterFlag, + info->test_case_name(), + info->name()); + const String internal_flag = String::Format( + "--%s%s=%s|%d|%d|%u|%Iu|%Iu", + GTEST_FLAG_PREFIX_, + kInternalRunDeathTestFlag, + file_, line_, + death_test_index, + static_cast(::GetCurrentProcessId()), + // size_t has the same with as pointers on both 32-bit and 64-bit + // Windows platforms. + // See http://msdn.microsoft.com/en-us/library/tcxf1dw6.aspx. + reinterpret_cast(write_handle), + reinterpret_cast(event_handle_.Get())); + + char executable_path[_MAX_PATH + 1]; // NOLINT + GTEST_DEATH_TEST_CHECK_( + _MAX_PATH + 1 != ::GetModuleFileNameA(NULL, + executable_path, + _MAX_PATH)); + + String command_line = String::Format("%s %s \"%s\"", + ::GetCommandLineA(), + filter_flag.c_str(), + internal_flag.c_str()); + + DeathTest::set_last_death_test_message(""); + + CaptureStderr(); + // Flush the log buffers since the log streams are shared with the child. + FlushInfoLog(); + + // The child process will share the standard handles with the parent. + STARTUPINFOA startup_info; + memset(&startup_info, 0, sizeof(STARTUPINFO)); + startup_info.dwFlags = STARTF_USESTDHANDLES; + startup_info.hStdInput = ::GetStdHandle(STD_INPUT_HANDLE); + startup_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE); + startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE); + + PROCESS_INFORMATION process_info; + GTEST_DEATH_TEST_CHECK_(::CreateProcessA( + executable_path, + const_cast(command_line.c_str()), + NULL, // Retuned process handle is not inheritable. + NULL, // Retuned thread handle is not inheritable. + TRUE, // Child inherits all inheritable handles (for write_handle_). + 0x0, // Default creation flags. + NULL, // Inherit the parent's environment. + UnitTest::GetInstance()->original_working_dir(), + &startup_info, + &process_info) != FALSE); + child_handle_.Reset(process_info.hProcess); + ::CloseHandle(process_info.hThread); + set_spawned(true); + return OVERSEE_TEST; +} +# else // We are not on Windows. + +// ForkingDeathTest provides implementations for most of the abstract +// methods of the DeathTest interface. Only the AssumeRole method is +// left undefined. +class ForkingDeathTest : public DeathTestImpl { + public: + ForkingDeathTest(const char* statement, const RE* regex); + + // All of these virtual functions are inherited from DeathTest. + virtual int Wait(); + + protected: + void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; } + + private: + // PID of child process during death test; 0 in the child process itself. + pid_t child_pid_; +}; + +// Constructs a ForkingDeathTest. +ForkingDeathTest::ForkingDeathTest(const char* a_statement, const RE* a_regex) + : DeathTestImpl(a_statement, a_regex), + child_pid_(-1) {} + +// Waits for the child in a death test to exit, returning its exit +// status, or 0 if no child process exists. As a side effect, sets the +// outcome data member. +int ForkingDeathTest::Wait() { + if (!spawned()) + return 0; + + ReadAndInterpretStatusByte(); + + int status_value; + GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_value, 0)); + set_status(status_value); + return status_value; +} + +// A concrete death test class that forks, then immediately runs the test +// in the child process. +class NoExecDeathTest : public ForkingDeathTest { + public: + NoExecDeathTest(const char* a_statement, const RE* a_regex) : + ForkingDeathTest(a_statement, a_regex) { } + virtual TestRole AssumeRole(); +}; + +// The AssumeRole process for a fork-and-run death test. It implements a +// straightforward fork, with a simple pipe to transmit the status byte. +DeathTest::TestRole NoExecDeathTest::AssumeRole() { + const size_t thread_count = GetThreadCount(); + if (thread_count != 1) { + GTEST_LOG_(WARNING) << DeathTestThreadWarning(thread_count); + } + + int pipe_fd[2]; + GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1); + + DeathTest::set_last_death_test_message(""); + CaptureStderr(); + // When we fork the process below, the log file buffers are copied, but the + // file descriptors are shared. We flush all log files here so that closing + // the file descriptors in the child process doesn't throw off the + // synchronization between descriptors and buffers in the parent process. + // This is as close to the fork as possible to avoid a race condition in case + // there are multiple threads running before the death test, and another + // thread writes to the log file. + FlushInfoLog(); + + const pid_t child_pid = fork(); + GTEST_DEATH_TEST_CHECK_(child_pid != -1); + set_child_pid(child_pid); + if (child_pid == 0) { + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[0])); + set_write_fd(pipe_fd[1]); + // Redirects all logging to stderr in the child process to prevent + // concurrent writes to the log files. We capture stderr in the parent + // process and append the child process' output to a log. + LogToStderr(); + // Event forwarding to the listeners of event listener API mush be shut + // down in death test subprocesses. + GetUnitTestImpl()->listeners()->SuppressEventForwarding(); + return EXECUTE_TEST; + } else { + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1])); + set_read_fd(pipe_fd[0]); + set_spawned(true); + return OVERSEE_TEST; + } +} + +// A concrete death test class that forks and re-executes the main +// program from the beginning, with command-line flags set that cause +// only this specific death test to be run. +class ExecDeathTest : public ForkingDeathTest { + public: + ExecDeathTest(const char* a_statement, const RE* a_regex, + const char* file, int line) : + ForkingDeathTest(a_statement, a_regex), file_(file), line_(line) { } + virtual TestRole AssumeRole(); + private: + // The name of the file in which the death test is located. + const char* const file_; + // The line number on which the death test is located. + const int line_; +}; + +// Utility class for accumulating command-line arguments. +class Arguments { + public: + Arguments() { + args_.push_back(NULL); + } + + ~Arguments() { + for (std::vector::iterator i = args_.begin(); i != args_.end(); + ++i) { + free(*i); + } + } + void AddArgument(const char* argument) { + args_.insert(args_.end() - 1, posix::StrDup(argument)); + } + + template + void AddArguments(const ::std::vector& arguments) { + for (typename ::std::vector::const_iterator i = arguments.begin(); + i != arguments.end(); + ++i) { + args_.insert(args_.end() - 1, posix::StrDup(i->c_str())); + } + } + char* const* Argv() { + return &args_[0]; + } + private: + std::vector args_; +}; + +// A struct that encompasses the arguments to the child process of a +// threadsafe-style death test process. +struct ExecDeathTestArgs { + char* const* argv; // Command-line arguments for the child's call to exec + int close_fd; // File descriptor to close; the read end of a pipe +}; + +# if GTEST_OS_MAC +inline char** GetEnviron() { + // When Google Test is built as a framework on MacOS X, the environ variable + // is unavailable. Apple's documentation (man environ) recommends using + // _NSGetEnviron() instead. + return *_NSGetEnviron(); +} +# else +// Some POSIX platforms expect you to declare environ. extern "C" makes +// it reside in the global namespace. +extern "C" char** environ; +inline char** GetEnviron() { return environ; } +# endif // GTEST_OS_MAC + +// The main function for a threadsafe-style death test child process. +// This function is called in a clone()-ed process and thus must avoid +// any potentially unsafe operations like malloc or libc functions. +static int ExecDeathTestChildMain(void* child_arg) { + ExecDeathTestArgs* const args = static_cast(child_arg); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(args->close_fd)); + + // We need to execute the test program in the same environment where + // it was originally invoked. Therefore we change to the original + // working directory first. + const char* const original_dir = + UnitTest::GetInstance()->original_working_dir(); + // We can safely call chdir() as it's a direct system call. + if (chdir(original_dir) != 0) { + DeathTestAbort(String::Format("chdir(\"%s\") failed: %s", + original_dir, + GetLastErrnoDescription().c_str())); + return EXIT_FAILURE; + } + + // We can safely call execve() as it's a direct system call. We + // cannot use execvp() as it's a libc function and thus potentially + // unsafe. Since execve() doesn't search the PATH, the user must + // invoke the test program via a valid path that contains at least + // one path separator. + execve(args->argv[0], args->argv, GetEnviron()); + DeathTestAbort(String::Format("execve(%s, ...) in %s failed: %s", + args->argv[0], + original_dir, + GetLastErrnoDescription().c_str())); + return EXIT_FAILURE; +} + +// Two utility routines that together determine the direction the stack +// grows. +// This could be accomplished more elegantly by a single recursive +// function, but we want to guard against the unlikely possibility of +// a smart compiler optimizing the recursion away. +// +// GTEST_NO_INLINE_ is required to prevent GCC 4.6 from inlining +// StackLowerThanAddress into StackGrowsDown, which then doesn't give +// correct answer. +bool StackLowerThanAddress(const void* ptr) GTEST_NO_INLINE_; +bool StackLowerThanAddress(const void* ptr) { + int dummy; + return &dummy < ptr; +} + +bool StackGrowsDown() { + int dummy; + return StackLowerThanAddress(&dummy); +} + +// A threadsafe implementation of fork(2) for threadsafe-style death tests +// that uses clone(2). It dies with an error message if anything goes +// wrong. +static pid_t ExecDeathTestFork(char* const* argv, int close_fd) { + ExecDeathTestArgs args = { argv, close_fd }; + pid_t child_pid = -1; + +# if GTEST_HAS_CLONE + const bool use_fork = GTEST_FLAG(death_test_use_fork); + + if (!use_fork) { + static const bool stack_grows_down = StackGrowsDown(); + const size_t stack_size = getpagesize(); + // MMAP_ANONYMOUS is not defined on Mac, so we use MAP_ANON instead. + void* const stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE, + MAP_ANON | MAP_PRIVATE, -1, 0); + GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED); + void* const stack_top = + static_cast(stack) + (stack_grows_down ? stack_size : 0); + + child_pid = clone(&ExecDeathTestChildMain, stack_top, SIGCHLD, &args); + + GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1); + } +# else + const bool use_fork = true; +# endif // GTEST_HAS_CLONE + + if (use_fork && (child_pid = fork()) == 0) { + ExecDeathTestChildMain(&args); + _exit(0); + } + + GTEST_DEATH_TEST_CHECK_(child_pid != -1); + return child_pid; +} + +// The AssumeRole process for a fork-and-exec death test. It re-executes the +// main program from the beginning, setting the --gtest_filter +// and --gtest_internal_run_death_test flags to cause only the current +// death test to be re-run. +DeathTest::TestRole ExecDeathTest::AssumeRole() { + const UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const TestInfo* const info = impl->current_test_info(); + const int death_test_index = info->result()->death_test_count(); + + if (flag != NULL) { + set_write_fd(flag->write_fd()); + return EXECUTE_TEST; + } + + int pipe_fd[2]; + GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1); + // Clear the close-on-exec flag on the write end of the pipe, lest + // it be closed when the child process does an exec: + GTEST_DEATH_TEST_CHECK_(fcntl(pipe_fd[1], F_SETFD, 0) != -1); + + const String filter_flag = + String::Format("--%s%s=%s.%s", + GTEST_FLAG_PREFIX_, kFilterFlag, + info->test_case_name(), info->name()); + const String internal_flag = + String::Format("--%s%s=%s|%d|%d|%d", + GTEST_FLAG_PREFIX_, kInternalRunDeathTestFlag, + file_, line_, death_test_index, pipe_fd[1]); + Arguments args; + args.AddArguments(GetArgvs()); + args.AddArgument(filter_flag.c_str()); + args.AddArgument(internal_flag.c_str()); + + DeathTest::set_last_death_test_message(""); + + CaptureStderr(); + // See the comment in NoExecDeathTest::AssumeRole for why the next line + // is necessary. + FlushInfoLog(); + + const pid_t child_pid = ExecDeathTestFork(args.Argv(), pipe_fd[0]); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1])); + set_child_pid(child_pid); + set_read_fd(pipe_fd[0]); + set_spawned(true); + return OVERSEE_TEST; +} + +# endif // !GTEST_OS_WINDOWS + +// Creates a concrete DeathTest-derived class that depends on the +// --gtest_death_test_style flag, and sets the pointer pointed to +// by the "test" argument to its address. If the test should be +// skipped, sets that pointer to NULL. Returns true, unless the +// flag is set to an invalid value. +bool DefaultDeathTestFactory::Create(const char* statement, const RE* regex, + const char* file, int line, + DeathTest** test) { + UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const int death_test_index = impl->current_test_info() + ->increment_death_test_count(); + + if (flag != NULL) { + if (death_test_index > flag->index()) { + DeathTest::set_last_death_test_message(String::Format( + "Death test count (%d) somehow exceeded expected maximum (%d)", + death_test_index, flag->index())); + return false; + } + + if (!(flag->file() == file && flag->line() == line && + flag->index() == death_test_index)) { + *test = NULL; + return true; + } + } + +# if GTEST_OS_WINDOWS + + if (GTEST_FLAG(death_test_style) == "threadsafe" || + GTEST_FLAG(death_test_style) == "fast") { + *test = new WindowsDeathTest(statement, regex, file, line); + } + +# else + + if (GTEST_FLAG(death_test_style) == "threadsafe") { + *test = new ExecDeathTest(statement, regex, file, line); + } else if (GTEST_FLAG(death_test_style) == "fast") { + *test = new NoExecDeathTest(statement, regex); + } + +# endif // GTEST_OS_WINDOWS + + else { // NOLINT - this is more readable than unbalanced brackets inside #if. + DeathTest::set_last_death_test_message(String::Format( + "Unknown death test style \"%s\" encountered", + GTEST_FLAG(death_test_style).c_str())); + return false; + } + + return true; +} + +// Splits a given string on a given delimiter, populating a given +// vector with the fields. GTEST_HAS_DEATH_TEST implies that we have +// ::std::string, so we can use it here. +static void SplitString(const ::std::string& str, char delimiter, + ::std::vector< ::std::string>* dest) { + ::std::vector< ::std::string> parsed; + ::std::string::size_type pos = 0; + while (::testing::internal::AlwaysTrue()) { + const ::std::string::size_type colon = str.find(delimiter, pos); + if (colon == ::std::string::npos) { + parsed.push_back(str.substr(pos)); + break; + } else { + parsed.push_back(str.substr(pos, colon - pos)); + pos = colon + 1; + } + } + dest->swap(parsed); +} + +# if GTEST_OS_WINDOWS +// Recreates the pipe and event handles from the provided parameters, +// signals the event, and returns a file descriptor wrapped around the pipe +// handle. This function is called in the child process only. +int GetStatusFileDescriptor(unsigned int parent_process_id, + size_t write_handle_as_size_t, + size_t event_handle_as_size_t) { + AutoHandle parent_process_handle(::OpenProcess(PROCESS_DUP_HANDLE, + FALSE, // Non-inheritable. + parent_process_id)); + if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) { + DeathTestAbort(String::Format("Unable to open parent process %u", + parent_process_id)); + } + + // TODO(vladl@google.com): Replace the following check with a + // compile-time assertion when available. + GTEST_CHECK_(sizeof(HANDLE) <= sizeof(size_t)); + + const HANDLE write_handle = + reinterpret_cast(write_handle_as_size_t); + HANDLE dup_write_handle; + + // The newly initialized handle is accessible only in in the parent + // process. To obtain one accessible within the child, we need to use + // DuplicateHandle. + if (!::DuplicateHandle(parent_process_handle.Get(), write_handle, + ::GetCurrentProcess(), &dup_write_handle, + 0x0, // Requested privileges ignored since + // DUPLICATE_SAME_ACCESS is used. + FALSE, // Request non-inheritable handler. + DUPLICATE_SAME_ACCESS)) { + DeathTestAbort(String::Format( + "Unable to duplicate the pipe handle %Iu from the parent process %u", + write_handle_as_size_t, parent_process_id)); + } + + const HANDLE event_handle = reinterpret_cast(event_handle_as_size_t); + HANDLE dup_event_handle; + + if (!::DuplicateHandle(parent_process_handle.Get(), event_handle, + ::GetCurrentProcess(), &dup_event_handle, + 0x0, + FALSE, + DUPLICATE_SAME_ACCESS)) { + DeathTestAbort(String::Format( + "Unable to duplicate the event handle %Iu from the parent process %u", + event_handle_as_size_t, parent_process_id)); + } + + const int write_fd = + ::_open_osfhandle(reinterpret_cast(dup_write_handle), O_APPEND); + if (write_fd == -1) { + DeathTestAbort(String::Format( + "Unable to convert pipe handle %Iu to a file descriptor", + write_handle_as_size_t)); + } + + // Signals the parent that the write end of the pipe has been acquired + // so the parent can release its own write end. + ::SetEvent(dup_event_handle); + + return write_fd; +} +# endif // GTEST_OS_WINDOWS + +// Returns a newly created InternalRunDeathTestFlag object with fields +// initialized from the GTEST_FLAG(internal_run_death_test) flag if +// the flag is specified; otherwise returns NULL. +InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() { + if (GTEST_FLAG(internal_run_death_test) == "") return NULL; + + // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we + // can use it here. + int line = -1; + int index = -1; + ::std::vector< ::std::string> fields; + SplitString(GTEST_FLAG(internal_run_death_test).c_str(), '|', &fields); + int write_fd = -1; + +# if GTEST_OS_WINDOWS + + unsigned int parent_process_id = 0; + size_t write_handle_as_size_t = 0; + size_t event_handle_as_size_t = 0; + + if (fields.size() != 6 + || !ParseNaturalNumber(fields[1], &line) + || !ParseNaturalNumber(fields[2], &index) + || !ParseNaturalNumber(fields[3], &parent_process_id) + || !ParseNaturalNumber(fields[4], &write_handle_as_size_t) + || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) { + DeathTestAbort(String::Format( + "Bad --gtest_internal_run_death_test flag: %s", + GTEST_FLAG(internal_run_death_test).c_str())); + } + write_fd = GetStatusFileDescriptor(parent_process_id, + write_handle_as_size_t, + event_handle_as_size_t); +# else + + if (fields.size() != 4 + || !ParseNaturalNumber(fields[1], &line) + || !ParseNaturalNumber(fields[2], &index) + || !ParseNaturalNumber(fields[3], &write_fd)) { + DeathTestAbort(String::Format( + "Bad --gtest_internal_run_death_test flag: %s", + GTEST_FLAG(internal_run_death_test).c_str())); + } + +# endif // GTEST_OS_WINDOWS + + return new InternalRunDeathTestFlag(fields[0], line, index, write_fd); +} + +} // namespace internal + +#endif // GTEST_HAS_DEATH_TEST + +} // namespace testing +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: keith.ray@gmail.com (Keith Ray) + + +#include + +#if GTEST_OS_WINDOWS_MOBILE +# include +#elif GTEST_OS_WINDOWS +# include +# include +#elif GTEST_OS_SYMBIAN || GTEST_OS_NACL +// Symbian OpenC and NaCl have PATH_MAX in sys/syslimits.h +# include +#else +# include +# include // Some Linux distributions define PATH_MAX here. +#endif // GTEST_OS_WINDOWS_MOBILE + +#if GTEST_OS_WINDOWS +# define GTEST_PATH_MAX_ _MAX_PATH +#elif defined(PATH_MAX) +# define GTEST_PATH_MAX_ PATH_MAX +#elif defined(_XOPEN_PATH_MAX) +# define GTEST_PATH_MAX_ _XOPEN_PATH_MAX +#else +# define GTEST_PATH_MAX_ _POSIX_PATH_MAX +#endif // GTEST_OS_WINDOWS + + +namespace testing { +namespace internal { + +#if GTEST_OS_WINDOWS +// On Windows, '\\' is the standard path separator, but many tools and the +// Windows API also accept '/' as an alternate path separator. Unless otherwise +// noted, a file path can contain either kind of path separators, or a mixture +// of them. +const char kPathSeparator = '\\'; +const char kAlternatePathSeparator = '/'; +const char kPathSeparatorString[] = "\\"; +const char kAlternatePathSeparatorString[] = "/"; +# if GTEST_OS_WINDOWS_MOBILE +// Windows CE doesn't have a current directory. You should not use +// the current directory in tests on Windows CE, but this at least +// provides a reasonable fallback. +const char kCurrentDirectoryString[] = "\\"; +// Windows CE doesn't define INVALID_FILE_ATTRIBUTES +const DWORD kInvalidFileAttributes = 0xffffffff; +# else +const char kCurrentDirectoryString[] = ".\\"; +# endif // GTEST_OS_WINDOWS_MOBILE +#else +const char kPathSeparator = '/'; +const char kPathSeparatorString[] = "/"; +const char kCurrentDirectoryString[] = "./"; +#endif // GTEST_OS_WINDOWS + +// Returns whether the given character is a valid path separator. +static bool IsPathSeparator(char c) { +#if GTEST_HAS_ALT_PATH_SEP_ + return (c == kPathSeparator) || (c == kAlternatePathSeparator); +#else + return c == kPathSeparator; +#endif +} + +// Returns the current working directory, or "" if unsuccessful. +FilePath FilePath::GetCurrentDir() { +#if GTEST_OS_WINDOWS_MOBILE + // Windows CE doesn't have a current directory, so we just return + // something reasonable. + return FilePath(kCurrentDirectoryString); +#elif GTEST_OS_WINDOWS + char cwd[GTEST_PATH_MAX_ + 1] = { '\0' }; + return FilePath(_getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd); +#else + char cwd[GTEST_PATH_MAX_ + 1] = { '\0' }; + return FilePath(getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd); +#endif // GTEST_OS_WINDOWS_MOBILE +} + +// Returns a copy of the FilePath with the case-insensitive extension removed. +// Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns +// FilePath("dir/file"). If a case-insensitive extension is not +// found, returns a copy of the original FilePath. +FilePath FilePath::RemoveExtension(const char* extension) const { + String dot_extension(String::Format(".%s", extension)); + if (pathname_.EndsWithCaseInsensitive(dot_extension.c_str())) { + return FilePath(String(pathname_.c_str(), pathname_.length() - 4)); + } + return *this; +} + +// Returns a pointer to the last occurence of a valid path separator in +// the FilePath. On Windows, for example, both '/' and '\' are valid path +// separators. Returns NULL if no path separator was found. +const char* FilePath::FindLastPathSeparator() const { + const char* const last_sep = strrchr(c_str(), kPathSeparator); +#if GTEST_HAS_ALT_PATH_SEP_ + const char* const last_alt_sep = strrchr(c_str(), kAlternatePathSeparator); + // Comparing two pointers of which only one is NULL is undefined. + if (last_alt_sep != NULL && + (last_sep == NULL || last_alt_sep > last_sep)) { + return last_alt_sep; + } +#endif + return last_sep; +} + +// Returns a copy of the FilePath with the directory part removed. +// Example: FilePath("path/to/file").RemoveDirectoryName() returns +// FilePath("file"). If there is no directory part ("just_a_file"), it returns +// the FilePath unmodified. If there is no file part ("just_a_dir/") it +// returns an empty FilePath (""). +// On Windows platform, '\' is the path separator, otherwise it is '/'. +FilePath FilePath::RemoveDirectoryName() const { + const char* const last_sep = FindLastPathSeparator(); + return last_sep ? FilePath(String(last_sep + 1)) : *this; +} + +// RemoveFileName returns the directory path with the filename removed. +// Example: FilePath("path/to/file").RemoveFileName() returns "path/to/". +// If the FilePath is "a_file" or "/a_file", RemoveFileName returns +// FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does +// not have a file, like "just/a/dir/", it returns the FilePath unmodified. +// On Windows platform, '\' is the path separator, otherwise it is '/'. +FilePath FilePath::RemoveFileName() const { + const char* const last_sep = FindLastPathSeparator(); + String dir; + if (last_sep) { + dir = String(c_str(), last_sep + 1 - c_str()); + } else { + dir = kCurrentDirectoryString; + } + return FilePath(dir); +} + +// Helper functions for naming files in a directory for xml output. + +// Given directory = "dir", base_name = "test", number = 0, +// extension = "xml", returns "dir/test.xml". If number is greater +// than zero (e.g., 12), returns "dir/test_12.xml". +// On Windows platform, uses \ as the separator rather than /. +FilePath FilePath::MakeFileName(const FilePath& directory, + const FilePath& base_name, + int number, + const char* extension) { + String file; + if (number == 0) { + file = String::Format("%s.%s", base_name.c_str(), extension); + } else { + file = String::Format("%s_%d.%s", base_name.c_str(), number, extension); + } + return ConcatPaths(directory, FilePath(file)); +} + +// Given directory = "dir", relative_path = "test.xml", returns "dir/test.xml". +// On Windows, uses \ as the separator rather than /. +FilePath FilePath::ConcatPaths(const FilePath& directory, + const FilePath& relative_path) { + if (directory.IsEmpty()) + return relative_path; + const FilePath dir(directory.RemoveTrailingPathSeparator()); + return FilePath(String::Format("%s%c%s", dir.c_str(), kPathSeparator, + relative_path.c_str())); +} + +// Returns true if pathname describes something findable in the file-system, +// either a file, directory, or whatever. +bool FilePath::FileOrDirectoryExists() const { +#if GTEST_OS_WINDOWS_MOBILE + LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str()); + const DWORD attributes = GetFileAttributes(unicode); + delete [] unicode; + return attributes != kInvalidFileAttributes; +#else + posix::StatStruct file_stat; + return posix::Stat(pathname_.c_str(), &file_stat) == 0; +#endif // GTEST_OS_WINDOWS_MOBILE +} + +// Returns true if pathname describes a directory in the file-system +// that exists. +bool FilePath::DirectoryExists() const { + bool result = false; +#if GTEST_OS_WINDOWS + // Don't strip off trailing separator if path is a root directory on + // Windows (like "C:\\"). + const FilePath& path(IsRootDirectory() ? *this : + RemoveTrailingPathSeparator()); +#else + const FilePath& path(*this); +#endif + +#if GTEST_OS_WINDOWS_MOBILE + LPCWSTR unicode = String::AnsiToUtf16(path.c_str()); + const DWORD attributes = GetFileAttributes(unicode); + delete [] unicode; + if ((attributes != kInvalidFileAttributes) && + (attributes & FILE_ATTRIBUTE_DIRECTORY)) { + result = true; + } +#else + posix::StatStruct file_stat; + result = posix::Stat(path.c_str(), &file_stat) == 0 && + posix::IsDir(file_stat); +#endif // GTEST_OS_WINDOWS_MOBILE + + return result; +} + +// Returns true if pathname describes a root directory. (Windows has one +// root directory per disk drive.) +bool FilePath::IsRootDirectory() const { +#if GTEST_OS_WINDOWS + // TODO(wan@google.com): on Windows a network share like + // \\server\share can be a root directory, although it cannot be the + // current directory. Handle this properly. + return pathname_.length() == 3 && IsAbsolutePath(); +#else + return pathname_.length() == 1 && IsPathSeparator(pathname_.c_str()[0]); +#endif +} + +// Returns true if pathname describes an absolute path. +bool FilePath::IsAbsolutePath() const { + const char* const name = pathname_.c_str(); +#if GTEST_OS_WINDOWS + return pathname_.length() >= 3 && + ((name[0] >= 'a' && name[0] <= 'z') || + (name[0] >= 'A' && name[0] <= 'Z')) && + name[1] == ':' && + IsPathSeparator(name[2]); +#else + return IsPathSeparator(name[0]); +#endif +} + +// Returns a pathname for a file that does not currently exist. The pathname +// will be directory/base_name.extension or +// directory/base_name_.extension if directory/base_name.extension +// already exists. The number will be incremented until a pathname is found +// that does not already exist. +// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'. +// There could be a race condition if two or more processes are calling this +// function at the same time -- they could both pick the same filename. +FilePath FilePath::GenerateUniqueFileName(const FilePath& directory, + const FilePath& base_name, + const char* extension) { + FilePath full_pathname; + int number = 0; + do { + full_pathname.Set(MakeFileName(directory, base_name, number++, extension)); + } while (full_pathname.FileOrDirectoryExists()); + return full_pathname; +} + +// Returns true if FilePath ends with a path separator, which indicates that +// it is intended to represent a directory. Returns false otherwise. +// This does NOT check that a directory (or file) actually exists. +bool FilePath::IsDirectory() const { + return !pathname_.empty() && + IsPathSeparator(pathname_.c_str()[pathname_.length() - 1]); +} + +// Create directories so that path exists. Returns true if successful or if +// the directories already exist; returns false if unable to create directories +// for any reason. +bool FilePath::CreateDirectoriesRecursively() const { + if (!this->IsDirectory()) { + return false; + } + + if (pathname_.length() == 0 || this->DirectoryExists()) { + return true; + } + + const FilePath parent(this->RemoveTrailingPathSeparator().RemoveFileName()); + return parent.CreateDirectoriesRecursively() && this->CreateFolder(); +} + +// Create the directory so that path exists. Returns true if successful or +// if the directory already exists; returns false if unable to create the +// directory for any reason, including if the parent directory does not +// exist. Not named "CreateDirectory" because that's a macro on Windows. +bool FilePath::CreateFolder() const { +#if GTEST_OS_WINDOWS_MOBILE + FilePath removed_sep(this->RemoveTrailingPathSeparator()); + LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str()); + int result = CreateDirectory(unicode, NULL) ? 0 : -1; + delete [] unicode; +#elif GTEST_OS_WINDOWS + int result = _mkdir(pathname_.c_str()); +#else + int result = mkdir(pathname_.c_str(), 0777); +#endif // GTEST_OS_WINDOWS_MOBILE + + if (result == -1) { + return this->DirectoryExists(); // An error is OK if the directory exists. + } + return true; // No error. +} + +// If input name has a trailing separator character, remove it and return the +// name, otherwise return the name string unmodified. +// On Windows platform, uses \ as the separator, other platforms use /. +FilePath FilePath::RemoveTrailingPathSeparator() const { + return IsDirectory() + ? FilePath(String(pathname_.c_str(), pathname_.length() - 1)) + : *this; +} + +// Removes any redundant separators that might be in the pathname. +// For example, "bar///foo" becomes "bar/foo". Does not eliminate other +// redundancies that might be in a pathname involving "." or "..". +// TODO(wan@google.com): handle Windows network shares (e.g. \\server\share). +void FilePath::Normalize() { + if (pathname_.c_str() == NULL) { + pathname_ = ""; + return; + } + const char* src = pathname_.c_str(); + char* const dest = new char[pathname_.length() + 1]; + char* dest_ptr = dest; + memset(dest_ptr, 0, pathname_.length() + 1); + + while (*src != '\0') { + *dest_ptr = *src; + if (!IsPathSeparator(*src)) { + src++; + } else { +#if GTEST_HAS_ALT_PATH_SEP_ + if (*dest_ptr == kAlternatePathSeparator) { + *dest_ptr = kPathSeparator; + } +#endif + while (IsPathSeparator(*src)) + src++; + } + dest_ptr++; + } + *dest_ptr = '\0'; + pathname_ = dest; + delete[] dest; +} + +} // namespace internal +} // namespace testing +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + + +#include +#include +#include +#include + +#if GTEST_OS_WINDOWS_MOBILE +# include // For TerminateProcess() +#elif GTEST_OS_WINDOWS +# include +# include +#else +# include +#endif // GTEST_OS_WINDOWS_MOBILE + +#if GTEST_OS_MAC +# include +# include +# include +#endif // GTEST_OS_MAC + + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#undef GTEST_IMPLEMENTATION_ + +namespace testing { +namespace internal { + +#if defined(_MSC_VER) || defined(__BORLANDC__) +// MSVC and C++Builder do not provide a definition of STDERR_FILENO. +const int kStdOutFileno = 1; +const int kStdErrFileno = 2; +#else +const int kStdOutFileno = STDOUT_FILENO; +const int kStdErrFileno = STDERR_FILENO; +#endif // _MSC_VER + +#if GTEST_OS_MAC + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +size_t GetThreadCount() { + const task_t task = mach_task_self(); + mach_msg_type_number_t thread_count; + thread_act_array_t thread_list; + const kern_return_t status = task_threads(task, &thread_list, &thread_count); + if (status == KERN_SUCCESS) { + // task_threads allocates resources in thread_list and we need to free them + // to avoid leaks. + vm_deallocate(task, + reinterpret_cast(thread_list), + sizeof(thread_t) * thread_count); + return static_cast(thread_count); + } else { + return 0; + } +} + +#else + +size_t GetThreadCount() { + // There's no portable way to detect the number of threads, so we just + // return 0 to indicate that we cannot detect it. + return 0; +} + +#endif // GTEST_OS_MAC + +#if GTEST_USES_POSIX_RE + +// Implements RE. Currently only needed for death tests. + +RE::~RE() { + if (is_valid_) { + // regfree'ing an invalid regex might crash because the content + // of the regex is undefined. Since the regex's are essentially + // the same, one cannot be valid (or invalid) without the other + // being so too. + regfree(&partial_regex_); + regfree(&full_regex_); + } + free(const_cast(pattern_)); +} + +// Returns true iff regular expression re matches the entire str. +bool RE::FullMatch(const char* str, const RE& re) { + if (!re.is_valid_) return false; + + regmatch_t match; + return regexec(&re.full_regex_, str, 1, &match, 0) == 0; +} + +// Returns true iff regular expression re matches a substring of str +// (including str itself). +bool RE::PartialMatch(const char* str, const RE& re) { + if (!re.is_valid_) return false; + + regmatch_t match; + return regexec(&re.partial_regex_, str, 1, &match, 0) == 0; +} + +// Initializes an RE from its string representation. +void RE::Init(const char* regex) { + pattern_ = posix::StrDup(regex); + + // Reserves enough bytes to hold the regular expression used for a + // full match. + const size_t full_regex_len = strlen(regex) + 10; + char* const full_pattern = new char[full_regex_len]; + + snprintf(full_pattern, full_regex_len, "^(%s)$", regex); + is_valid_ = regcomp(&full_regex_, full_pattern, REG_EXTENDED) == 0; + // We want to call regcomp(&partial_regex_, ...) even if the + // previous expression returns false. Otherwise partial_regex_ may + // not be properly initialized can may cause trouble when it's + // freed. + // + // Some implementation of POSIX regex (e.g. on at least some + // versions of Cygwin) doesn't accept the empty string as a valid + // regex. We change it to an equivalent form "()" to be safe. + if (is_valid_) { + const char* const partial_regex = (*regex == '\0') ? "()" : regex; + is_valid_ = regcomp(&partial_regex_, partial_regex, REG_EXTENDED) == 0; + } + EXPECT_TRUE(is_valid_) + << "Regular expression \"" << regex + << "\" is not a valid POSIX Extended regular expression."; + + delete[] full_pattern; +} + +#elif GTEST_USES_SIMPLE_RE + +// Returns true iff ch appears anywhere in str (excluding the +// terminating '\0' character). +bool IsInSet(char ch, const char* str) { + return ch != '\0' && strchr(str, ch) != NULL; +} + +// Returns true iff ch belongs to the given classification. Unlike +// similar functions in , these aren't affected by the +// current locale. +bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; } +bool IsAsciiPunct(char ch) { + return IsInSet(ch, "^-!\"#$%&'()*+,./:;<=>?@[\\]_`{|}~"); +} +bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); } +bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); } +bool IsAsciiWordChar(char ch) { + return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') || + ('0' <= ch && ch <= '9') || ch == '_'; +} + +// Returns true iff "\\c" is a supported escape sequence. +bool IsValidEscape(char c) { + return (IsAsciiPunct(c) || IsInSet(c, "dDfnrsStvwW")); +} + +// Returns true iff the given atom (specified by escaped and pattern) +// matches ch. The result is undefined if the atom is invalid. +bool AtomMatchesChar(bool escaped, char pattern_char, char ch) { + if (escaped) { // "\\p" where p is pattern_char. + switch (pattern_char) { + case 'd': return IsAsciiDigit(ch); + case 'D': return !IsAsciiDigit(ch); + case 'f': return ch == '\f'; + case 'n': return ch == '\n'; + case 'r': return ch == '\r'; + case 's': return IsAsciiWhiteSpace(ch); + case 'S': return !IsAsciiWhiteSpace(ch); + case 't': return ch == '\t'; + case 'v': return ch == '\v'; + case 'w': return IsAsciiWordChar(ch); + case 'W': return !IsAsciiWordChar(ch); + } + return IsAsciiPunct(pattern_char) && pattern_char == ch; + } + + return (pattern_char == '.' && ch != '\n') || pattern_char == ch; +} + +// Helper function used by ValidateRegex() to format error messages. +String FormatRegexSyntaxError(const char* regex, int index) { + return (Message() << "Syntax error at index " << index + << " in simple regular expression \"" << regex << "\": ").GetString(); +} + +// Generates non-fatal failures and returns false if regex is invalid; +// otherwise returns true. +bool ValidateRegex(const char* regex) { + if (regex == NULL) { + // TODO(wan@google.com): fix the source file location in the + // assertion failures to match where the regex is used in user + // code. + ADD_FAILURE() << "NULL is not a valid simple regular expression."; + return false; + } + + bool is_valid = true; + + // True iff ?, *, or + can follow the previous atom. + bool prev_repeatable = false; + for (int i = 0; regex[i]; i++) { + if (regex[i] == '\\') { // An escape sequence + i++; + if (regex[i] == '\0') { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1) + << "'\\' cannot appear at the end."; + return false; + } + + if (!IsValidEscape(regex[i])) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1) + << "invalid escape sequence \"\\" << regex[i] << "\"."; + is_valid = false; + } + prev_repeatable = true; + } else { // Not an escape sequence. + const char ch = regex[i]; + + if (ch == '^' && i > 0) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'^' can only appear at the beginning."; + is_valid = false; + } else if (ch == '$' && regex[i + 1] != '\0') { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'$' can only appear at the end."; + is_valid = false; + } else if (IsInSet(ch, "()[]{}|")) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'" << ch << "' is unsupported."; + is_valid = false; + } else if (IsRepeat(ch) && !prev_repeatable) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'" << ch << "' can only follow a repeatable token."; + is_valid = false; + } + + prev_repeatable = !IsInSet(ch, "^$?*+"); + } + } + + return is_valid; +} + +// Matches a repeated regex atom followed by a valid simple regular +// expression. The regex atom is defined as c if escaped is false, +// or \c otherwise. repeat is the repetition meta character (?, *, +// or +). The behavior is undefined if str contains too many +// characters to be indexable by size_t, in which case the test will +// probably time out anyway. We are fine with this limitation as +// std::string has it too. +bool MatchRepetitionAndRegexAtHead( + bool escaped, char c, char repeat, const char* regex, + const char* str) { + const size_t min_count = (repeat == '+') ? 1 : 0; + const size_t max_count = (repeat == '?') ? 1 : + static_cast(-1) - 1; + // We cannot call numeric_limits::max() as it conflicts with the + // max() macro on Windows. + + for (size_t i = 0; i <= max_count; ++i) { + // We know that the atom matches each of the first i characters in str. + if (i >= min_count && MatchRegexAtHead(regex, str + i)) { + // We have enough matches at the head, and the tail matches too. + // Since we only care about *whether* the pattern matches str + // (as opposed to *how* it matches), there is no need to find a + // greedy match. + return true; + } + if (str[i] == '\0' || !AtomMatchesChar(escaped, c, str[i])) + return false; + } + return false; +} + +// Returns true iff regex matches a prefix of str. regex must be a +// valid simple regular expression and not start with "^", or the +// result is undefined. +bool MatchRegexAtHead(const char* regex, const char* str) { + if (*regex == '\0') // An empty regex matches a prefix of anything. + return true; + + // "$" only matches the end of a string. Note that regex being + // valid guarantees that there's nothing after "$" in it. + if (*regex == '$') + return *str == '\0'; + + // Is the first thing in regex an escape sequence? + const bool escaped = *regex == '\\'; + if (escaped) + ++regex; + if (IsRepeat(regex[1])) { + // MatchRepetitionAndRegexAtHead() calls MatchRegexAtHead(), so + // here's an indirect recursion. It terminates as the regex gets + // shorter in each recursion. + return MatchRepetitionAndRegexAtHead( + escaped, regex[0], regex[1], regex + 2, str); + } else { + // regex isn't empty, isn't "$", and doesn't start with a + // repetition. We match the first atom of regex with the first + // character of str and recurse. + return (*str != '\0') && AtomMatchesChar(escaped, *regex, *str) && + MatchRegexAtHead(regex + 1, str + 1); + } +} + +// Returns true iff regex matches any substring of str. regex must be +// a valid simple regular expression, or the result is undefined. +// +// The algorithm is recursive, but the recursion depth doesn't exceed +// the regex length, so we won't need to worry about running out of +// stack space normally. In rare cases the time complexity can be +// exponential with respect to the regex length + the string length, +// but usually it's must faster (often close to linear). +bool MatchRegexAnywhere(const char* regex, const char* str) { + if (regex == NULL || str == NULL) + return false; + + if (*regex == '^') + return MatchRegexAtHead(regex + 1, str); + + // A successful match can be anywhere in str. + do { + if (MatchRegexAtHead(regex, str)) + return true; + } while (*str++ != '\0'); + return false; +} + +// Implements the RE class. + +RE::~RE() { + free(const_cast(pattern_)); + free(const_cast(full_pattern_)); +} + +// Returns true iff regular expression re matches the entire str. +bool RE::FullMatch(const char* str, const RE& re) { + return re.is_valid_ && MatchRegexAnywhere(re.full_pattern_, str); +} + +// Returns true iff regular expression re matches a substring of str +// (including str itself). +bool RE::PartialMatch(const char* str, const RE& re) { + return re.is_valid_ && MatchRegexAnywhere(re.pattern_, str); +} + +// Initializes an RE from its string representation. +void RE::Init(const char* regex) { + pattern_ = full_pattern_ = NULL; + if (regex != NULL) { + pattern_ = posix::StrDup(regex); + } + + is_valid_ = ValidateRegex(regex); + if (!is_valid_) { + // No need to calculate the full pattern when the regex is invalid. + return; + } + + const size_t len = strlen(regex); + // Reserves enough bytes to hold the regular expression used for a + // full match: we need space to prepend a '^', append a '$', and + // terminate the string with '\0'. + char* buffer = static_cast(malloc(len + 3)); + full_pattern_ = buffer; + + if (*regex != '^') + *buffer++ = '^'; // Makes sure full_pattern_ starts with '^'. + + // We don't use snprintf or strncpy, as they trigger a warning when + // compiled with VC++ 8.0. + memcpy(buffer, regex, len); + buffer += len; + + if (len == 0 || regex[len - 1] != '$') + *buffer++ = '$'; // Makes sure full_pattern_ ends with '$'. + + *buffer = '\0'; +} + +#endif // GTEST_USES_POSIX_RE + +const char kUnknownFile[] = "unknown file"; + +// Formats a source file path and a line number as they would appear +// in an error message from the compiler used to compile this code. +GTEST_API_ ::std::string FormatFileLocation(const char* file, int line) { + const char* const file_name = file == NULL ? kUnknownFile : file; + + if (line < 0) { + return String::Format("%s:", file_name).c_str(); + } +#ifdef _MSC_VER + return String::Format("%s(%d):", file_name, line).c_str(); +#else + return String::Format("%s:%d:", file_name, line).c_str(); +#endif // _MSC_VER +} + +// Formats a file location for compiler-independent XML output. +// Although this function is not platform dependent, we put it next to +// FormatFileLocation in order to contrast the two functions. +// Note that FormatCompilerIndependentFileLocation() does NOT append colon +// to the file location it produces, unlike FormatFileLocation(). +GTEST_API_ ::std::string FormatCompilerIndependentFileLocation( + const char* file, int line) { + const char* const file_name = file == NULL ? kUnknownFile : file; + + if (line < 0) + return file_name; + else + return String::Format("%s:%d", file_name, line).c_str(); +} + + +GTestLog::GTestLog(GTestLogSeverity severity, const char* file, int line) + : severity_(severity) { + const char* const marker = + severity == GTEST_INFO ? "[ INFO ]" : + severity == GTEST_WARNING ? "[WARNING]" : + severity == GTEST_ERROR ? "[ ERROR ]" : "[ FATAL ]"; + GetStream() << ::std::endl << marker << " " + << FormatFileLocation(file, line).c_str() << ": "; +} + +// Flushes the buffers and, if severity is GTEST_FATAL, aborts the program. +GTestLog::~GTestLog() { + GetStream() << ::std::endl; + if (severity_ == GTEST_FATAL) { + fflush(stderr); + posix::Abort(); + } +} +// Disable Microsoft deprecation warnings for POSIX functions called from +// this class (creat, dup, dup2, and close) +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable: 4996) +#endif // _MSC_VER + +#if GTEST_HAS_STREAM_REDIRECTION + +// Object that captures an output stream (stdout/stderr). +class CapturedStream { + public: + // The ctor redirects the stream to a temporary file. + CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) { + +# if GTEST_OS_WINDOWS + char temp_dir_path[MAX_PATH + 1] = { '\0' }; // NOLINT + char temp_file_path[MAX_PATH + 1] = { '\0' }; // NOLINT + + ::GetTempPathA(sizeof(temp_dir_path), temp_dir_path); + const UINT success = ::GetTempFileNameA(temp_dir_path, + "gtest_redir", + 0, // Generate unique file name. + temp_file_path); + GTEST_CHECK_(success != 0) + << "Unable to create a temporary file in " << temp_dir_path; + const int captured_fd = creat(temp_file_path, _S_IREAD | _S_IWRITE); + GTEST_CHECK_(captured_fd != -1) << "Unable to open temporary file " + << temp_file_path; + filename_ = temp_file_path; +# else + // There's no guarantee that a test has write access to the + // current directory, so we create the temporary file in the /tmp + // directory instead. + char name_template[] = "/tmp/captured_stream.XXXXXX"; + const int captured_fd = mkstemp(name_template); + filename_ = name_template; +# endif // GTEST_OS_WINDOWS + fflush(NULL); + dup2(captured_fd, fd_); + close(captured_fd); + } + + ~CapturedStream() { + remove(filename_.c_str()); + } + + String GetCapturedString() { + if (uncaptured_fd_ != -1) { + // Restores the original stream. + fflush(NULL); + dup2(uncaptured_fd_, fd_); + close(uncaptured_fd_); + uncaptured_fd_ = -1; + } + + FILE* const file = posix::FOpen(filename_.c_str(), "r"); + const String content = ReadEntireFile(file); + posix::FClose(file); + return content; + } + + private: + // Reads the entire content of a file as a String. + static String ReadEntireFile(FILE* file); + + // Returns the size (in bytes) of a file. + static size_t GetFileSize(FILE* file); + + const int fd_; // A stream to capture. + int uncaptured_fd_; + // Name of the temporary file holding the stderr output. + ::std::string filename_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(CapturedStream); +}; + +// Returns the size (in bytes) of a file. +size_t CapturedStream::GetFileSize(FILE* file) { + fseek(file, 0, SEEK_END); + return static_cast(ftell(file)); +} + +// Reads the entire content of a file as a string. +String CapturedStream::ReadEntireFile(FILE* file) { + const size_t file_size = GetFileSize(file); + char* const buffer = new char[file_size]; + + size_t bytes_last_read = 0; // # of bytes read in the last fread() + size_t bytes_read = 0; // # of bytes read so far + + fseek(file, 0, SEEK_SET); + + // Keeps reading the file until we cannot read further or the + // pre-determined file size is reached. + do { + bytes_last_read = fread(buffer+bytes_read, 1, file_size-bytes_read, file); + bytes_read += bytes_last_read; + } while (bytes_last_read > 0 && bytes_read < file_size); + + const String content(buffer, bytes_read); + delete[] buffer; + + return content; +} + +# ifdef _MSC_VER +# pragma warning(pop) +# endif // _MSC_VER + +static CapturedStream* g_captured_stderr = NULL; +static CapturedStream* g_captured_stdout = NULL; + +// Starts capturing an output stream (stdout/stderr). +void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) { + if (*stream != NULL) { + GTEST_LOG_(FATAL) << "Only one " << stream_name + << " capturer can exist at a time."; + } + *stream = new CapturedStream(fd); +} + +// Stops capturing the output stream and returns the captured string. +String GetCapturedStream(CapturedStream** captured_stream) { + const String content = (*captured_stream)->GetCapturedString(); + + delete *captured_stream; + *captured_stream = NULL; + + return content; +} + +// Starts capturing stdout. +void CaptureStdout() { + CaptureStream(kStdOutFileno, "stdout", &g_captured_stdout); +} + +// Starts capturing stderr. +void CaptureStderr() { + CaptureStream(kStdErrFileno, "stderr", &g_captured_stderr); +} + +// Stops capturing stdout and returns the captured string. +String GetCapturedStdout() { return GetCapturedStream(&g_captured_stdout); } + +// Stops capturing stderr and returns the captured string. +String GetCapturedStderr() { return GetCapturedStream(&g_captured_stderr); } + +#endif // GTEST_HAS_STREAM_REDIRECTION + +#if GTEST_HAS_DEATH_TEST + +// A copy of all command line arguments. Set by InitGoogleTest(). +::std::vector g_argvs; + +// Returns the command line as a vector of strings. +const ::std::vector& GetArgvs() { return g_argvs; } + +#endif // GTEST_HAS_DEATH_TEST + +#if GTEST_OS_WINDOWS_MOBILE +namespace posix { +void Abort() { + DebugBreak(); + TerminateProcess(GetCurrentProcess(), 1); +} +} // namespace posix +#endif // GTEST_OS_WINDOWS_MOBILE + +// Returns the name of the environment variable corresponding to the +// given flag. For example, FlagToEnvVar("foo") will return +// "GTEST_FOO" in the open-source version. +static String FlagToEnvVar(const char* flag) { + const String full_flag = + (Message() << GTEST_FLAG_PREFIX_ << flag).GetString(); + + Message env_var; + for (size_t i = 0; i != full_flag.length(); i++) { + env_var << ToUpper(full_flag.c_str()[i]); + } + + return env_var.GetString(); +} + +// Parses 'str' for a 32-bit signed integer. If successful, writes +// the result to *value and returns true; otherwise leaves *value +// unchanged and returns false. +bool ParseInt32(const Message& src_text, const char* str, Int32* value) { + // Parses the environment variable as a decimal integer. + char* end = NULL; + const long long_value = strtol(str, &end, 10); // NOLINT + + // Has strtol() consumed all characters in the string? + if (*end != '\0') { + // No - an invalid character was encountered. + Message msg; + msg << "WARNING: " << src_text + << " is expected to be a 32-bit integer, but actually" + << " has value \"" << str << "\".\n"; + printf("%s", msg.GetString().c_str()); + fflush(stdout); + return false; + } + + // Is the parsed value in the range of an Int32? + const Int32 result = static_cast(long_value); + if (long_value == LONG_MAX || long_value == LONG_MIN || + // The parsed value overflows as a long. (strtol() returns + // LONG_MAX or LONG_MIN when the input overflows.) + result != long_value + // The parsed value overflows as an Int32. + ) { + Message msg; + msg << "WARNING: " << src_text + << " is expected to be a 32-bit integer, but actually" + << " has value " << str << ", which overflows.\n"; + printf("%s", msg.GetString().c_str()); + fflush(stdout); + return false; + } + + *value = result; + return true; +} + +// Reads and returns the Boolean environment variable corresponding to +// the given flag; if it's not set, returns default_value. +// +// The value is considered true iff it's not "0". +bool BoolFromGTestEnv(const char* flag, bool default_value) { + const String env_var = FlagToEnvVar(flag); + const char* const string_value = posix::GetEnv(env_var.c_str()); + return string_value == NULL ? + default_value : strcmp(string_value, "0") != 0; +} + +// Reads and returns a 32-bit integer stored in the environment +// variable corresponding to the given flag; if it isn't set or +// doesn't represent a valid 32-bit integer, returns default_value. +Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) { + const String env_var = FlagToEnvVar(flag); + const char* const string_value = posix::GetEnv(env_var.c_str()); + if (string_value == NULL) { + // The environment variable is not set. + return default_value; + } + + Int32 result = default_value; + if (!ParseInt32(Message() << "Environment variable " << env_var, + string_value, &result)) { + printf("The default value %s is used.\n", + (Message() << default_value).GetString().c_str()); + fflush(stdout); + return default_value; + } + + return result; +} + +// Reads and returns the string environment variable corresponding to +// the given flag; if it's not set, returns default_value. +const char* StringFromGTestEnv(const char* flag, const char* default_value) { + const String env_var = FlagToEnvVar(flag); + const char* const value = posix::GetEnv(env_var.c_str()); + return value == NULL ? default_value : value; +} + +} // namespace internal +} // namespace testing +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Test - The Google C++ Testing Framework +// +// This file implements a universal value printer that can print a +// value of any type T: +// +// void ::testing::internal::UniversalPrinter::Print(value, ostream_ptr); +// +// It uses the << operator when possible, and prints the bytes in the +// object otherwise. A user can override its behavior for a class +// type Foo by defining either operator<<(::std::ostream&, const Foo&) +// or void PrintTo(const Foo&, ::std::ostream*) in the namespace that +// defines Foo. + +#include +#include +#include // NOLINT +#include + +namespace testing { + +namespace { + +using ::std::ostream; + +#if GTEST_OS_WINDOWS_MOBILE // Windows CE does not define _snprintf_s. +# define snprintf _snprintf +#elif _MSC_VER >= 1400 // VC 8.0 and later deprecate snprintf and _snprintf. +# define snprintf _snprintf_s +#elif _MSC_VER +# define snprintf _snprintf +#endif // GTEST_OS_WINDOWS_MOBILE + +// Prints a segment of bytes in the given object. +void PrintByteSegmentInObjectTo(const unsigned char* obj_bytes, size_t start, + size_t count, ostream* os) { + char text[5] = ""; + for (size_t i = 0; i != count; i++) { + const size_t j = start + i; + if (i != 0) { + // Organizes the bytes into groups of 2 for easy parsing by + // human. + if ((j % 2) == 0) + *os << ' '; + else + *os << '-'; + } + snprintf(text, sizeof(text), "%02X", obj_bytes[j]); + *os << text; + } +} + +// Prints the bytes in the given value to the given ostream. +void PrintBytesInObjectToImpl(const unsigned char* obj_bytes, size_t count, + ostream* os) { + // Tells the user how big the object is. + *os << count << "-byte object <"; + + const size_t kThreshold = 132; + const size_t kChunkSize = 64; + // If the object size is bigger than kThreshold, we'll have to omit + // some details by printing only the first and the last kChunkSize + // bytes. + // TODO(wan): let the user control the threshold using a flag. + if (count < kThreshold) { + PrintByteSegmentInObjectTo(obj_bytes, 0, count, os); + } else { + PrintByteSegmentInObjectTo(obj_bytes, 0, kChunkSize, os); + *os << " ... "; + // Rounds up to 2-byte boundary. + const size_t resume_pos = (count - kChunkSize + 1)/2*2; + PrintByteSegmentInObjectTo(obj_bytes, resume_pos, count - resume_pos, os); + } + *os << ">"; +} + +} // namespace + +namespace internal2 { + +// Delegates to PrintBytesInObjectToImpl() to print the bytes in the +// given object. The delegation simplifies the implementation, which +// uses the << operator and thus is easier done outside of the +// ::testing::internal namespace, which contains a << operator that +// sometimes conflicts with the one in STL. +void PrintBytesInObjectTo(const unsigned char* obj_bytes, size_t count, + ostream* os) { + PrintBytesInObjectToImpl(obj_bytes, count, os); +} + +} // namespace internal2 + +namespace internal { + +// Depending on the value of a char (or wchar_t), we print it in one +// of three formats: +// - as is if it's a printable ASCII (e.g. 'a', '2', ' '), +// - as a hexidecimal escape sequence (e.g. '\x7F'), or +// - as a special escape sequence (e.g. '\r', '\n'). +enum CharFormat { + kAsIs, + kHexEscape, + kSpecialEscape +}; + +// Returns true if c is a printable ASCII character. We test the +// value of c directly instead of calling isprint(), which is buggy on +// Windows Mobile. +inline bool IsPrintableAscii(wchar_t c) { + return 0x20 <= c && c <= 0x7E; +} + +// Prints a wide or narrow char c as a character literal without the +// quotes, escaping it when necessary; returns how c was formatted. +// The template argument UnsignedChar is the unsigned version of Char, +// which is the type of c. +template +static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) { + switch (static_cast(c)) { + case L'\0': + *os << "\\0"; + break; + case L'\'': + *os << "\\'"; + break; + case L'\\': + *os << "\\\\"; + break; + case L'\a': + *os << "\\a"; + break; + case L'\b': + *os << "\\b"; + break; + case L'\f': + *os << "\\f"; + break; + case L'\n': + *os << "\\n"; + break; + case L'\r': + *os << "\\r"; + break; + case L'\t': + *os << "\\t"; + break; + case L'\v': + *os << "\\v"; + break; + default: + if (IsPrintableAscii(c)) { + *os << static_cast(c); + return kAsIs; + } else { + *os << String::Format("\\x%X", static_cast(c)); + return kHexEscape; + } + } + return kSpecialEscape; +} + +// Prints a char c as if it's part of a string literal, escaping it when +// necessary; returns how c was formatted. +static CharFormat PrintAsWideStringLiteralTo(wchar_t c, ostream* os) { + switch (c) { + case L'\'': + *os << "'"; + return kAsIs; + case L'"': + *os << "\\\""; + return kSpecialEscape; + default: + return PrintAsCharLiteralTo(c, os); + } +} + +// Prints a char c as if it's part of a string literal, escaping it when +// necessary; returns how c was formatted. +static CharFormat PrintAsNarrowStringLiteralTo(char c, ostream* os) { + return PrintAsWideStringLiteralTo(static_cast(c), os); +} + +// Prints a wide or narrow character c and its code. '\0' is printed +// as "'\\0'", other unprintable characters are also properly escaped +// using the standard C++ escape sequence. The template argument +// UnsignedChar is the unsigned version of Char, which is the type of c. +template +void PrintCharAndCodeTo(Char c, ostream* os) { + // First, print c as a literal in the most readable form we can find. + *os << ((sizeof(c) > 1) ? "L'" : "'"); + const CharFormat format = PrintAsCharLiteralTo(c, os); + *os << "'"; + + // To aid user debugging, we also print c's code in decimal, unless + // it's 0 (in which case c was printed as '\\0', making the code + // obvious). + if (c == 0) + return; + *os << " (" << String::Format("%d", c).c_str(); + + // For more convenience, we print c's code again in hexidecimal, + // unless c was already printed in the form '\x##' or the code is in + // [1, 9]. + if (format == kHexEscape || (1 <= c && c <= 9)) { + // Do nothing. + } else { + *os << String::Format(", 0x%X", + static_cast(c)).c_str(); + } + *os << ")"; +} + +void PrintTo(unsigned char c, ::std::ostream* os) { + PrintCharAndCodeTo(c, os); +} +void PrintTo(signed char c, ::std::ostream* os) { + PrintCharAndCodeTo(c, os); +} + +// Prints a wchar_t as a symbol if it is printable or as its internal +// code otherwise and also as its code. L'\0' is printed as "L'\\0'". +void PrintTo(wchar_t wc, ostream* os) { + PrintCharAndCodeTo(wc, os); +} + +// Prints the given array of characters to the ostream. +// The array starts at *begin, the length is len, it may include '\0' characters +// and may not be null-terminated. +static void PrintCharsAsStringTo(const char* begin, size_t len, ostream* os) { + *os << "\""; + bool is_previous_hex = false; + for (size_t index = 0; index < len; ++index) { + const char cur = begin[index]; + if (is_previous_hex && IsXDigit(cur)) { + // Previous character is of '\x..' form and this character can be + // interpreted as another hexadecimal digit in its number. Break string to + // disambiguate. + *os << "\" \""; + } + is_previous_hex = PrintAsNarrowStringLiteralTo(cur, os) == kHexEscape; + } + *os << "\""; +} + +// Prints a (const) char array of 'len' elements, starting at address 'begin'. +void UniversalPrintArray(const char* begin, size_t len, ostream* os) { + PrintCharsAsStringTo(begin, len, os); +} + +// Prints the given array of wide characters to the ostream. +// The array starts at *begin, the length is len, it may include L'\0' +// characters and may not be null-terminated. +static void PrintWideCharsAsStringTo(const wchar_t* begin, size_t len, + ostream* os) { + *os << "L\""; + bool is_previous_hex = false; + for (size_t index = 0; index < len; ++index) { + const wchar_t cur = begin[index]; + if (is_previous_hex && isascii(cur) && IsXDigit(static_cast(cur))) { + // Previous character is of '\x..' form and this character can be + // interpreted as another hexadecimal digit in its number. Break string to + // disambiguate. + *os << "\" L\""; + } + is_previous_hex = PrintAsWideStringLiteralTo(cur, os) == kHexEscape; + } + *os << "\""; +} + +// Prints the given C string to the ostream. +void PrintTo(const char* s, ostream* os) { + if (s == NULL) { + *os << "NULL"; + } else { + *os << ImplicitCast_(s) << " pointing to "; + PrintCharsAsStringTo(s, strlen(s), os); + } +} + +// MSVC compiler can be configured to define whar_t as a typedef +// of unsigned short. Defining an overload for const wchar_t* in that case +// would cause pointers to unsigned shorts be printed as wide strings, +// possibly accessing more memory than intended and causing invalid +// memory accesses. MSVC defines _NATIVE_WCHAR_T_DEFINED symbol when +// wchar_t is implemented as a native type. +#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED) +// Prints the given wide C string to the ostream. +void PrintTo(const wchar_t* s, ostream* os) { + if (s == NULL) { + *os << "NULL"; + } else { + *os << ImplicitCast_(s) << " pointing to "; + PrintWideCharsAsStringTo(s, wcslen(s), os); + } +} +#endif // wchar_t is native + +// Prints a ::string object. +#if GTEST_HAS_GLOBAL_STRING +void PrintStringTo(const ::string& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_GLOBAL_STRING + +void PrintStringTo(const ::std::string& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} + +// Prints a ::wstring object. +#if GTEST_HAS_GLOBAL_WSTRING +void PrintWideStringTo(const ::wstring& s, ostream* os) { + PrintWideCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +#if GTEST_HAS_STD_WSTRING +void PrintWideStringTo(const ::std::wstring& s, ostream* os) { + PrintWideCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_STD_WSTRING + +} // namespace internal + +} // namespace testing +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// +// The Google C++ Testing Framework (Google Test) + + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#undef GTEST_IMPLEMENTATION_ + +namespace testing { + +using internal::GetUnitTestImpl; + +// Gets the summary of the failure message by omitting the stack trace +// in it. +internal::String TestPartResult::ExtractSummary(const char* message) { + const char* const stack_trace = strstr(message, internal::kStackTraceMarker); + return stack_trace == NULL ? internal::String(message) : + internal::String(message, stack_trace - message); +} + +// Prints a TestPartResult object. +std::ostream& operator<<(std::ostream& os, const TestPartResult& result) { + return os + << result.file_name() << ":" << result.line_number() << ": " + << (result.type() == TestPartResult::kSuccess ? "Success" : + result.type() == TestPartResult::kFatalFailure ? "Fatal failure" : + "Non-fatal failure") << ":\n" + << result.message() << std::endl; +} + +// Appends a TestPartResult to the array. +void TestPartResultArray::Append(const TestPartResult& result) { + array_.push_back(result); +} + +// Returns the TestPartResult at the given index (0-based). +const TestPartResult& TestPartResultArray::GetTestPartResult(int index) const { + if (index < 0 || index >= size()) { + printf("\nInvalid index (%d) into TestPartResultArray.\n", index); + internal::posix::Abort(); + } + + return array_[index]; +} + +// Returns the number of TestPartResult objects in the array. +int TestPartResultArray::size() const { + return static_cast(array_.size()); +} + +namespace internal { + +HasNewFatalFailureHelper::HasNewFatalFailureHelper() + : has_new_fatal_failure_(false), + original_reporter_(GetUnitTestImpl()-> + GetTestPartResultReporterForCurrentThread()) { + GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this); +} + +HasNewFatalFailureHelper::~HasNewFatalFailureHelper() { + GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread( + original_reporter_); +} + +void HasNewFatalFailureHelper::ReportTestPartResult( + const TestPartResult& result) { + if (result.fatally_failed()) + has_new_fatal_failure_ = true; + original_reporter_->ReportTestPartResult(result); +} + +} // namespace internal + +} // namespace testing +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + + +namespace testing { +namespace internal { + +#if GTEST_HAS_TYPED_TEST_P + +// Skips to the first non-space char in str. Returns an empty string if str +// contains only whitespace characters. +static const char* SkipSpaces(const char* str) { + while (IsSpace(*str)) + str++; + return str; +} + +// Verifies that registered_tests match the test names in +// defined_test_names_; returns registered_tests if successful, or +// aborts the program otherwise. +const char* TypedTestCasePState::VerifyRegisteredTestNames( + const char* file, int line, const char* registered_tests) { + typedef ::std::set::const_iterator DefinedTestIter; + registered_ = true; + + // Skip initial whitespace in registered_tests since some + // preprocessors prefix stringizied literals with whitespace. + registered_tests = SkipSpaces(registered_tests); + + Message errors; + ::std::set tests; + for (const char* names = registered_tests; names != NULL; + names = SkipComma(names)) { + const String name = GetPrefixUntilComma(names); + if (tests.count(name) != 0) { + errors << "Test " << name << " is listed more than once.\n"; + continue; + } + + bool found = false; + for (DefinedTestIter it = defined_test_names_.begin(); + it != defined_test_names_.end(); + ++it) { + if (name == *it) { + found = true; + break; + } + } + + if (found) { + tests.insert(name); + } else { + errors << "No test named " << name + << " can be found in this test case.\n"; + } + } + + for (DefinedTestIter it = defined_test_names_.begin(); + it != defined_test_names_.end(); + ++it) { + if (tests.count(*it) == 0) { + errors << "You forgot to list test " << *it << ".\n"; + } + } + + const String& errors_str = errors.GetString(); + if (errors_str != "") { + fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(), + errors_str.c_str()); + fflush(stderr); + posix::Abort(); + } + + return registered_tests; +} + +#endif // GTEST_HAS_TYPED_TEST_P + +} // namespace internal +} // namespace testing diff --git a/src/gtest/gtest.h b/src/gtest/gtest.h new file mode 100755 index 0000000..3143bd6 --- /dev/null +++ b/src/gtest/gtest.h @@ -0,0 +1,19537 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the public API for Google Test. It should be +// included by any test program that uses Google Test. +// +// IMPORTANT NOTE: Due to limitation of the C++ language, we have to +// leave some internal implementation details in this header file. +// They are clearly marked by comments like this: +// +// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +// +// Such code is NOT meant to be used by a user directly, and is subject +// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user +// program! +// +// Acknowledgment: Google Test borrowed the idea of automatic test +// registration from Barthelemy Dagenais' (barthelemy@prologique.com) +// easyUnit framework. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_H_ + +#include +#include + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file declares functions and macros used internally by +// Google Test. They are subject to change without notice. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan) +// +// Low-level types and utilities for porting Google Test to various +// platforms. They are subject to change without notice. DO NOT USE +// THEM IN USER CODE. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ + +// The user can define the following macros in the build script to +// control Google Test's behavior. If the user doesn't define a macro +// in this list, Google Test will define it. +// +// GTEST_HAS_CLONE - Define it to 1/0 to indicate that clone(2) +// is/isn't available. +// GTEST_HAS_EXCEPTIONS - Define it to 1/0 to indicate that exceptions +// are enabled. +// GTEST_HAS_GLOBAL_STRING - Define it to 1/0 to indicate that ::string +// is/isn't available (some systems define +// ::string, which is different to std::string). +// GTEST_HAS_GLOBAL_WSTRING - Define it to 1/0 to indicate that ::string +// is/isn't available (some systems define +// ::wstring, which is different to std::wstring). +// GTEST_HAS_POSIX_RE - Define it to 1/0 to indicate that POSIX regular +// expressions are/aren't available. +// GTEST_HAS_PTHREAD - Define it to 1/0 to indicate that +// is/isn't available. +// GTEST_HAS_RTTI - Define it to 1/0 to indicate that RTTI is/isn't +// enabled. +// GTEST_HAS_STD_WSTRING - Define it to 1/0 to indicate that +// std::wstring does/doesn't work (Google Test can +// be used where std::wstring is unavailable). +// GTEST_HAS_TR1_TUPLE - Define it to 1/0 to indicate tr1::tuple +// is/isn't available. +// GTEST_HAS_SEH - Define it to 1/0 to indicate whether the +// compiler supports Microsoft's "Structured +// Exception Handling". +// GTEST_HAS_STREAM_REDIRECTION +// - Define it to 1/0 to indicate whether the +// platform supports I/O stream redirection using +// dup() and dup2(). +// GTEST_USE_OWN_TR1_TUPLE - Define it to 1/0 to indicate whether Google +// Test's own tr1 tuple implementation should be +// used. Unused when the user sets +// GTEST_HAS_TR1_TUPLE to 0. +// GTEST_LINKED_AS_SHARED_LIBRARY +// - Define to 1 when compiling tests that use +// Google Test as a shared library (known as +// DLL on Windows). +// GTEST_CREATE_SHARED_LIBRARY +// - Define to 1 when compiling Google Test itself +// as a shared library. + +// This header defines the following utilities: +// +// Macros indicating the current platform (defined to 1 if compiled on +// the given platform; otherwise undefined): +// GTEST_OS_AIX - IBM AIX +// GTEST_OS_CYGWIN - Cygwin +// GTEST_OS_HPUX - HP-UX +// GTEST_OS_LINUX - Linux +// GTEST_OS_LINUX_ANDROID - Google Android +// GTEST_OS_MAC - Mac OS X +// GTEST_OS_NACL - Google Native Client (NaCl) +// GTEST_OS_SOLARIS - Sun Solaris +// GTEST_OS_SYMBIAN - Symbian +// GTEST_OS_WINDOWS - Windows (Desktop, MinGW, or Mobile) +// GTEST_OS_WINDOWS_DESKTOP - Windows Desktop +// GTEST_OS_WINDOWS_MINGW - MinGW +// GTEST_OS_WINDOWS_MOBILE - Windows Mobile +// GTEST_OS_ZOS - z/OS +// +// Among the platforms, Cygwin, Linux, Max OS X, and Windows have the +// most stable support. Since core members of the Google Test project +// don't have access to other platforms, support for them may be less +// stable. If you notice any problems on your platform, please notify +// googletestframework@googlegroups.com (patches for fixing them are +// even more welcome!). +// +// Note that it is possible that none of the GTEST_OS_* macros are defined. +// +// Macros indicating available Google Test features (defined to 1 if +// the corresponding feature is supported; otherwise undefined): +// GTEST_HAS_COMBINE - the Combine() function (for value-parameterized +// tests) +// GTEST_HAS_DEATH_TEST - death tests +// GTEST_HAS_PARAM_TEST - value-parameterized tests +// GTEST_HAS_TYPED_TEST - typed tests +// GTEST_HAS_TYPED_TEST_P - type-parameterized tests +// GTEST_USES_POSIX_RE - enhanced POSIX regex is used. Do not confuse with +// GTEST_HAS_POSIX_RE (see above) which users can +// define themselves. +// GTEST_USES_SIMPLE_RE - our own simple regex is used; +// the above two are mutually exclusive. +// GTEST_CAN_COMPARE_NULL - accepts untyped NULL in EXPECT_EQ(). +// +// Macros for basic C++ coding: +// GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning. +// GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances or a +// variable don't have to be used. +// GTEST_DISALLOW_ASSIGN_ - disables operator=. +// GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=. +// GTEST_MUST_USE_RESULT_ - declares that a function's result must be used. +// +// Synchronization: +// Mutex, MutexLock, ThreadLocal, GetThreadCount() +// - synchronization primitives. +// GTEST_IS_THREADSAFE - defined to 1 to indicate that the above +// synchronization primitives have real implementations +// and Google Test is thread-safe; or 0 otherwise. +// +// Template meta programming: +// is_pointer - as in TR1; needed on Symbian and IBM XL C/C++ only. +// IteratorTraits - partial implementation of std::iterator_traits, which +// is not available in libCstd when compiled with Sun C++. +// +// Smart pointers: +// scoped_ptr - as in TR2. +// +// Regular expressions: +// RE - a simple regular expression class using the POSIX +// Extended Regular Expression syntax on UNIX-like +// platforms, or a reduced regular exception syntax on +// other platforms, including Windows. +// +// Logging: +// GTEST_LOG_() - logs messages at the specified severity level. +// LogToStderr() - directs all log messages to stderr. +// FlushInfoLog() - flushes informational log messages. +// +// Stdout and stderr capturing: +// CaptureStdout() - starts capturing stdout. +// GetCapturedStdout() - stops capturing stdout and returns the captured +// string. +// CaptureStderr() - starts capturing stderr. +// GetCapturedStderr() - stops capturing stderr and returns the captured +// string. +// +// Integer types: +// TypeWithSize - maps an integer to a int type. +// Int32, UInt32, Int64, UInt64, TimeInMillis +// - integers of known sizes. +// BiggestInt - the biggest signed integer type. +// +// Command-line utilities: +// GTEST_FLAG() - references a flag. +// GTEST_DECLARE_*() - declares a flag. +// GTEST_DEFINE_*() - defines a flag. +// GetArgvs() - returns the command line as a vector of strings. +// +// Environment variable utilities: +// GetEnv() - gets the value of an environment variable. +// BoolFromGTestEnv() - parses a bool environment variable. +// Int32FromGTestEnv() - parses an Int32 environment variable. +// StringFromGTestEnv() - parses a string environment variable. + +#include // for isspace, etc +#include // for ptrdiff_t +#include +#include +#include +#ifndef _WIN32_WCE +# include +# include +#endif // !_WIN32_WCE + +#include // NOLINT +#include // NOLINT +#include // NOLINT + +#define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com" +#define GTEST_FLAG_PREFIX_ "gtest_" +#define GTEST_FLAG_PREFIX_DASH_ "gtest-" +#define GTEST_FLAG_PREFIX_UPPER_ "GTEST_" +#define GTEST_NAME_ "Google Test" +#define GTEST_PROJECT_URL_ "http://code.google.com/p/googletest/" + +// Determines the version of gcc that is used to compile this. +#ifdef __GNUC__ +// 40302 means version 4.3.2. +# define GTEST_GCC_VER_ \ + (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__) +#endif // __GNUC__ + +// Determines the platform on which Google Test is compiled. +#ifdef __CYGWIN__ +# define GTEST_OS_CYGWIN 1 +#elif defined __SYMBIAN32__ +# define GTEST_OS_SYMBIAN 1 +#elif defined _WIN32 +# define GTEST_OS_WINDOWS 1 +# ifdef _WIN32_WCE +# define GTEST_OS_WINDOWS_MOBILE 1 +# elif defined(__MINGW__) || defined(__MINGW32__) +# define GTEST_OS_WINDOWS_MINGW 1 +# else +# define GTEST_OS_WINDOWS_DESKTOP 1 +# endif // _WIN32_WCE +#elif defined __APPLE__ +# define GTEST_OS_MAC 1 +#elif defined __linux__ +# define GTEST_OS_LINUX 1 +# ifdef ANDROID +# define GTEST_OS_LINUX_ANDROID 1 +# endif // ANDROID +#elif defined __MVS__ +# define GTEST_OS_ZOS 1 +#elif defined(__sun) && defined(__SVR4) +# define GTEST_OS_SOLARIS 1 +#elif defined(_AIX) +# define GTEST_OS_AIX 1 +#elif defined(__hpux) +# define GTEST_OS_HPUX 1 +#elif defined __native_client__ +# define GTEST_OS_NACL 1 +#endif // __CYGWIN__ + +// Brings in definitions for functions used in the testing::internal::posix +// namespace (read, write, close, chdir, isatty, stat). We do not currently +// use them on Windows Mobile. +#if !GTEST_OS_WINDOWS +// This assumes that non-Windows OSes provide unistd.h. For OSes where this +// is not the case, we need to include headers that provide the functions +// mentioned above. +# include +# if !GTEST_OS_NACL +// TODO(vladl@google.com): Remove this condition when Native Client SDK adds +// strings.h (tracked in +// http://code.google.com/p/nativeclient/issues/detail?id=1175). +# include // Native Client doesn't provide strings.h. +# endif +#elif !GTEST_OS_WINDOWS_MOBILE +# include +# include +#endif + +// Defines this to true iff Google Test can use POSIX regular expressions. +#ifndef GTEST_HAS_POSIX_RE +# define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS) +#endif + +#if GTEST_HAS_POSIX_RE + +// On some platforms, needs someone to define size_t, and +// won't compile otherwise. We can #include it here as we already +// included , which is guaranteed to define size_t through +// . +# include // NOLINT + +# define GTEST_USES_POSIX_RE 1 + +#elif GTEST_OS_WINDOWS + +// is not available on Windows. Use our own simple regex +// implementation instead. +# define GTEST_USES_SIMPLE_RE 1 + +#else + +// may not be available on this platform. Use our own +// simple regex implementation instead. +# define GTEST_USES_SIMPLE_RE 1 + +#endif // GTEST_HAS_POSIX_RE + +#ifndef GTEST_HAS_EXCEPTIONS +// The user didn't tell us whether exceptions are enabled, so we need +// to figure it out. +# if defined(_MSC_VER) || defined(__BORLANDC__) +// MSVC's and C++Builder's implementations of the STL use the _HAS_EXCEPTIONS +// macro to enable exceptions, so we'll do the same. +// Assumes that exceptions are enabled by default. +# ifndef _HAS_EXCEPTIONS +# define _HAS_EXCEPTIONS 1 +# endif // _HAS_EXCEPTIONS +# define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS +# elif defined(__GNUC__) && __EXCEPTIONS +// gcc defines __EXCEPTIONS to 1 iff exceptions are enabled. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__SUNPRO_CC) +// Sun Pro CC supports exceptions. However, there is no compile-time way of +// detecting whether they are enabled or not. Therefore, we assume that +// they are enabled unless the user tells us otherwise. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__IBMCPP__) && __EXCEPTIONS +// xlC defines __EXCEPTIONS to 1 iff exceptions are enabled. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__HP_aCC) +// Exception handling is in effect by default in HP aCC compiler. It has to +// be turned of by +noeh compiler option if desired. +# define GTEST_HAS_EXCEPTIONS 1 +# else +// For other compilers, we assume exceptions are disabled to be +// conservative. +# define GTEST_HAS_EXCEPTIONS 0 +# endif // defined(_MSC_VER) || defined(__BORLANDC__) +#endif // GTEST_HAS_EXCEPTIONS + +#if !defined(GTEST_HAS_STD_STRING) +// Even though we don't use this macro any longer, we keep it in case +// some clients still depend on it. +# define GTEST_HAS_STD_STRING 1 +#elif !GTEST_HAS_STD_STRING +// The user told us that ::std::string isn't available. +# error "Google Test cannot be used where ::std::string isn't available." +#endif // !defined(GTEST_HAS_STD_STRING) + +#ifndef GTEST_HAS_GLOBAL_STRING +// The user didn't tell us whether ::string is available, so we need +// to figure it out. + +# define GTEST_HAS_GLOBAL_STRING 0 + +#endif // GTEST_HAS_GLOBAL_STRING + +#ifndef GTEST_HAS_STD_WSTRING +// The user didn't tell us whether ::std::wstring is available, so we need +// to figure it out. +// TODO(wan@google.com): uses autoconf to detect whether ::std::wstring +// is available. + +// Cygwin 1.7 and below doesn't support ::std::wstring. +// Solaris' libc++ doesn't support it either. Android has +// no support for it at least as recent as Froyo (2.2). +# define GTEST_HAS_STD_WSTRING \ + (!(GTEST_OS_LINUX_ANDROID || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS)) + +#endif // GTEST_HAS_STD_WSTRING + +#ifndef GTEST_HAS_GLOBAL_WSTRING +// The user didn't tell us whether ::wstring is available, so we need +// to figure it out. +# define GTEST_HAS_GLOBAL_WSTRING \ + (GTEST_HAS_STD_WSTRING && GTEST_HAS_GLOBAL_STRING) +#endif // GTEST_HAS_GLOBAL_WSTRING + +// Determines whether RTTI is available. +#ifndef GTEST_HAS_RTTI +// The user didn't tell us whether RTTI is enabled, so we need to +// figure it out. + +# ifdef _MSC_VER + +# ifdef _CPPRTTI // MSVC defines this macro iff RTTI is enabled. +# define GTEST_HAS_RTTI 1 +# else +# define GTEST_HAS_RTTI 0 +# endif + +// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled. +# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40302) + +# ifdef __GXX_RTTI +# define GTEST_HAS_RTTI 1 +# else +# define GTEST_HAS_RTTI 0 +# endif // __GXX_RTTI + +// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if +// both the typeid and dynamic_cast features are present. +# elif defined(__IBMCPP__) && (__IBMCPP__ >= 900) + +# ifdef __RTTI_ALL__ +# define GTEST_HAS_RTTI 1 +# else +# define GTEST_HAS_RTTI 0 +# endif + +# else + +// For all other compilers, we assume RTTI is enabled. +# define GTEST_HAS_RTTI 1 + +# endif // _MSC_VER + +#endif // GTEST_HAS_RTTI + +// It's this header's responsibility to #include when RTTI +// is enabled. +#if GTEST_HAS_RTTI +# include +#endif + +// Determines whether Google Test can use the pthreads library. +#ifndef GTEST_HAS_PTHREAD +// The user didn't tell us explicitly, so we assume pthreads support is +// available on Linux and Mac. +// +// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0 +// to your compiler flags. +# define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX) +#endif // GTEST_HAS_PTHREAD + +#if GTEST_HAS_PTHREAD +// gtest-port.h guarantees to #include when GTEST_HAS_PTHREAD is +// true. +# include // NOLINT + +// For timespec and nanosleep, used below. +# include // NOLINT +#endif + +// Determines whether Google Test can use tr1/tuple. You can define +// this macro to 0 to prevent Google Test from using tuple (any +// feature depending on tuple with be disabled in this mode). +#ifndef GTEST_HAS_TR1_TUPLE +// The user didn't tell us not to do it, so we assume it's OK. +# define GTEST_HAS_TR1_TUPLE 1 +#endif // GTEST_HAS_TR1_TUPLE + +// Determines whether Google Test's own tr1 tuple implementation +// should be used. +#ifndef GTEST_USE_OWN_TR1_TUPLE +// The user didn't tell us, so we need to figure it out. + +// We use our own TR1 tuple if we aren't sure the user has an +// implementation of it already. At this time, GCC 4.0.0+ and MSVC +// 2010 are the only mainstream compilers that come with a TR1 tuple +// implementation. NVIDIA's CUDA NVCC compiler pretends to be GCC by +// defining __GNUC__ and friends, but cannot compile GCC's tuple +// implementation. MSVC 2008 (9.0) provides TR1 tuple in a 323 MB +// Feature Pack download, which we cannot assume the user has. +# if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000)) \ + || _MSC_VER >= 1600 +# define GTEST_USE_OWN_TR1_TUPLE 0 +# else +# define GTEST_USE_OWN_TR1_TUPLE 1 +# endif + +#endif // GTEST_USE_OWN_TR1_TUPLE + +// To avoid conditional compilation everywhere, we make it +// gtest-port.h's responsibility to #include the header implementing +// tr1/tuple. +#if GTEST_HAS_TR1_TUPLE + +# if GTEST_USE_OWN_TR1_TUPLE +// This file was GENERATED by a script. DO NOT EDIT BY HAND!!! + +// Copyright 2009 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Implements a subset of TR1 tuple needed by Google Test and Google Mock. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_ + +#include // For ::std::pair. + +// The compiler used in Symbian has a bug that prevents us from declaring the +// tuple template as a friend (it complains that tuple is redefined). This +// hack bypasses the bug by declaring the members that should otherwise be +// private as public. +// Sun Studio versions < 12 also have the above bug. +#if defined(__SYMBIAN32__) || (defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590) +# define GTEST_DECLARE_TUPLE_AS_FRIEND_ public: +#else +# define GTEST_DECLARE_TUPLE_AS_FRIEND_ \ + template friend class tuple; \ + private: +#endif + +// GTEST_n_TUPLE_(T) is the type of an n-tuple. +#define GTEST_0_TUPLE_(T) tuple<> +#define GTEST_1_TUPLE_(T) tuple +#define GTEST_2_TUPLE_(T) tuple +#define GTEST_3_TUPLE_(T) tuple +#define GTEST_4_TUPLE_(T) tuple +#define GTEST_5_TUPLE_(T) tuple +#define GTEST_6_TUPLE_(T) tuple +#define GTEST_7_TUPLE_(T) tuple +#define GTEST_8_TUPLE_(T) tuple +#define GTEST_9_TUPLE_(T) tuple +#define GTEST_10_TUPLE_(T) tuple + +// GTEST_n_TYPENAMES_(T) declares a list of n typenames. +#define GTEST_0_TYPENAMES_(T) +#define GTEST_1_TYPENAMES_(T) typename T##0 +#define GTEST_2_TYPENAMES_(T) typename T##0, typename T##1 +#define GTEST_3_TYPENAMES_(T) typename T##0, typename T##1, typename T##2 +#define GTEST_4_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3 +#define GTEST_5_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4 +#define GTEST_6_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5 +#define GTEST_7_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6 +#define GTEST_8_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6, typename T##7 +#define GTEST_9_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6, \ + typename T##7, typename T##8 +#define GTEST_10_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6, \ + typename T##7, typename T##8, typename T##9 + +// In theory, defining stuff in the ::std namespace is undefined +// behavior. We can do this as we are playing the role of a standard +// library vendor. +namespace std { +namespace tr1 { + +template +class tuple; + +// Anything in namespace gtest_internal is Google Test's INTERNAL +// IMPLEMENTATION DETAIL and MUST NOT BE USED DIRECTLY in user code. +namespace gtest_internal { + +// ByRef::type is T if T is a reference; otherwise it's const T&. +template +struct ByRef { typedef const T& type; }; // NOLINT +template +struct ByRef { typedef T& type; }; // NOLINT + +// A handy wrapper for ByRef. +#define GTEST_BY_REF_(T) typename ::std::tr1::gtest_internal::ByRef::type + +// AddRef::type is T if T is a reference; otherwise it's T&. This +// is the same as tr1::add_reference::type. +template +struct AddRef { typedef T& type; }; // NOLINT +template +struct AddRef { typedef T& type; }; // NOLINT + +// A handy wrapper for AddRef. +#define GTEST_ADD_REF_(T) typename ::std::tr1::gtest_internal::AddRef::type + +// A helper for implementing get(). +template class Get; + +// A helper for implementing tuple_element. kIndexValid is true +// iff k < the number of fields in tuple type T. +template +struct TupleElement; + +template +struct TupleElement { typedef T0 type; }; + +template +struct TupleElement { typedef T1 type; }; + +template +struct TupleElement { typedef T2 type; }; + +template +struct TupleElement { typedef T3 type; }; + +template +struct TupleElement { typedef T4 type; }; + +template +struct TupleElement { typedef T5 type; }; + +template +struct TupleElement { typedef T6 type; }; + +template +struct TupleElement { typedef T7 type; }; + +template +struct TupleElement { typedef T8 type; }; + +template +struct TupleElement { typedef T9 type; }; + +} // namespace gtest_internal + +template <> +class tuple<> { + public: + tuple() {} + tuple(const tuple& /* t */) {} + tuple& operator=(const tuple& /* t */) { return *this; } +}; + +template +class GTEST_1_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0) : f0_(f0) {} + + tuple(const tuple& t) : f0_(t.f0_) {} + + template + tuple(const GTEST_1_TUPLE_(U)& t) : f0_(t.f0_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_1_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_1_TUPLE_(U)& t) { + f0_ = t.f0_; + return *this; + } + + T0 f0_; +}; + +template +class GTEST_2_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1) : f0_(f0), + f1_(f1) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_) {} + + template + tuple(const GTEST_2_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_) {} + template + tuple(const ::std::pair& p) : f0_(p.first), f1_(p.second) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_2_TUPLE_(U)& t) { + return CopyFrom(t); + } + template + tuple& operator=(const ::std::pair& p) { + f0_ = p.first; + f1_ = p.second; + return *this; + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_2_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + return *this; + } + + T0 f0_; + T1 f1_; +}; + +template +class GTEST_3_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2) : f0_(f0), f1_(f1), f2_(f2) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {} + + template + tuple(const GTEST_3_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_3_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_3_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; +}; + +template +class GTEST_4_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3) : f0_(f0), f1_(f1), f2_(f2), + f3_(f3) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_) {} + + template + tuple(const GTEST_4_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_4_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_4_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; +}; + +template +class GTEST_5_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, + GTEST_BY_REF_(T4) f4) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_) {} + + template + tuple(const GTEST_5_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_5_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_5_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; +}; + +template +class GTEST_6_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4), + f5_(f5) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_) {} + + template + tuple(const GTEST_6_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_6_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_6_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; +}; + +template +class GTEST_7_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6) : f0_(f0), f1_(f1), f2_(f2), + f3_(f3), f4_(f4), f5_(f5), f6_(f6) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {} + + template + tuple(const GTEST_7_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_7_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_7_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; +}; + +template +class GTEST_8_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, + GTEST_BY_REF_(T7) f7) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4), + f5_(f5), f6_(f6), f7_(f7) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {} + + template + tuple(const GTEST_8_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_8_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_8_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + f7_ = t.f7_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; + T7 f7_; +}; + +template +class GTEST_9_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7, + GTEST_BY_REF_(T8) f8) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4), + f5_(f5), f6_(f6), f7_(f7), f8_(f8) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {} + + template + tuple(const GTEST_9_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_9_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_9_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + f7_ = t.f7_; + f8_ = t.f8_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; + T7 f7_; + T8 f8_; +}; + +template +class tuple { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_(), + f9_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7, + GTEST_BY_REF_(T8) f8, GTEST_BY_REF_(T9) f9) : f0_(f0), f1_(f1), f2_(f2), + f3_(f3), f4_(f4), f5_(f5), f6_(f6), f7_(f7), f8_(f8), f9_(f9) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), f9_(t.f9_) {} + + template + tuple(const GTEST_10_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), + f9_(t.f9_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_10_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_10_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + f7_ = t.f7_; + f8_ = t.f8_; + f9_ = t.f9_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; + T7 f7_; + T8 f8_; + T9 f9_; +}; + +// 6.1.3.2 Tuple creation functions. + +// Known limitations: we don't support passing an +// std::tr1::reference_wrapper to make_tuple(). And we don't +// implement tie(). + +inline tuple<> make_tuple() { return tuple<>(); } + +template +inline GTEST_1_TUPLE_(T) make_tuple(const T0& f0) { + return GTEST_1_TUPLE_(T)(f0); +} + +template +inline GTEST_2_TUPLE_(T) make_tuple(const T0& f0, const T1& f1) { + return GTEST_2_TUPLE_(T)(f0, f1); +} + +template +inline GTEST_3_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2) { + return GTEST_3_TUPLE_(T)(f0, f1, f2); +} + +template +inline GTEST_4_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3) { + return GTEST_4_TUPLE_(T)(f0, f1, f2, f3); +} + +template +inline GTEST_5_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4) { + return GTEST_5_TUPLE_(T)(f0, f1, f2, f3, f4); +} + +template +inline GTEST_6_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5) { + return GTEST_6_TUPLE_(T)(f0, f1, f2, f3, f4, f5); +} + +template +inline GTEST_7_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6) { + return GTEST_7_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6); +} + +template +inline GTEST_8_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7) { + return GTEST_8_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7); +} + +template +inline GTEST_9_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7, + const T8& f8) { + return GTEST_9_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8); +} + +template +inline GTEST_10_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7, + const T8& f8, const T9& f9) { + return GTEST_10_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9); +} + +// 6.1.3.3 Tuple helper classes. + +template struct tuple_size; + +template +struct tuple_size { static const int value = 0; }; + +template +struct tuple_size { static const int value = 1; }; + +template +struct tuple_size { static const int value = 2; }; + +template +struct tuple_size { static const int value = 3; }; + +template +struct tuple_size { static const int value = 4; }; + +template +struct tuple_size { static const int value = 5; }; + +template +struct tuple_size { static const int value = 6; }; + +template +struct tuple_size { static const int value = 7; }; + +template +struct tuple_size { static const int value = 8; }; + +template +struct tuple_size { static const int value = 9; }; + +template +struct tuple_size { static const int value = 10; }; + +template +struct tuple_element { + typedef typename gtest_internal::TupleElement< + k < (tuple_size::value), k, Tuple>::type type; +}; + +#define GTEST_TUPLE_ELEMENT_(k, Tuple) typename tuple_element::type + +// 6.1.3.4 Element access. + +namespace gtest_internal { + +template <> +class Get<0> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple)) + Field(Tuple& t) { return t.f0_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple)) + ConstField(const Tuple& t) { return t.f0_; } +}; + +template <> +class Get<1> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple)) + Field(Tuple& t) { return t.f1_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple)) + ConstField(const Tuple& t) { return t.f1_; } +}; + +template <> +class Get<2> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple)) + Field(Tuple& t) { return t.f2_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple)) + ConstField(const Tuple& t) { return t.f2_; } +}; + +template <> +class Get<3> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple)) + Field(Tuple& t) { return t.f3_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple)) + ConstField(const Tuple& t) { return t.f3_; } +}; + +template <> +class Get<4> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple)) + Field(Tuple& t) { return t.f4_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple)) + ConstField(const Tuple& t) { return t.f4_; } +}; + +template <> +class Get<5> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple)) + Field(Tuple& t) { return t.f5_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple)) + ConstField(const Tuple& t) { return t.f5_; } +}; + +template <> +class Get<6> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple)) + Field(Tuple& t) { return t.f6_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple)) + ConstField(const Tuple& t) { return t.f6_; } +}; + +template <> +class Get<7> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple)) + Field(Tuple& t) { return t.f7_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple)) + ConstField(const Tuple& t) { return t.f7_; } +}; + +template <> +class Get<8> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple)) + Field(Tuple& t) { return t.f8_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple)) + ConstField(const Tuple& t) { return t.f8_; } +}; + +template <> +class Get<9> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple)) + Field(Tuple& t) { return t.f9_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple)) + ConstField(const Tuple& t) { return t.f9_; } +}; + +} // namespace gtest_internal + +template +GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T))) +get(GTEST_10_TUPLE_(T)& t) { + return gtest_internal::Get::Field(t); +} + +template +GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T))) +get(const GTEST_10_TUPLE_(T)& t) { + return gtest_internal::Get::ConstField(t); +} + +// 6.1.3.5 Relational operators + +// We only implement == and !=, as we don't have a need for the rest yet. + +namespace gtest_internal { + +// SameSizeTuplePrefixComparator::Eq(t1, t2) returns true if the +// first k fields of t1 equals the first k fields of t2. +// SameSizeTuplePrefixComparator(k1, k2) would be a compiler error if +// k1 != k2. +template +struct SameSizeTuplePrefixComparator; + +template <> +struct SameSizeTuplePrefixComparator<0, 0> { + template + static bool Eq(const Tuple1& /* t1 */, const Tuple2& /* t2 */) { + return true; + } +}; + +template +struct SameSizeTuplePrefixComparator { + template + static bool Eq(const Tuple1& t1, const Tuple2& t2) { + return SameSizeTuplePrefixComparator::Eq(t1, t2) && + ::std::tr1::get(t1) == ::std::tr1::get(t2); + } +}; + +} // namespace gtest_internal + +template +inline bool operator==(const GTEST_10_TUPLE_(T)& t, + const GTEST_10_TUPLE_(U)& u) { + return gtest_internal::SameSizeTuplePrefixComparator< + tuple_size::value, + tuple_size::value>::Eq(t, u); +} + +template +inline bool operator!=(const GTEST_10_TUPLE_(T)& t, + const GTEST_10_TUPLE_(U)& u) { return !(t == u); } + +// 6.1.4 Pairs. +// Unimplemented. + +} // namespace tr1 +} // namespace std + +#undef GTEST_0_TUPLE_ +#undef GTEST_1_TUPLE_ +#undef GTEST_2_TUPLE_ +#undef GTEST_3_TUPLE_ +#undef GTEST_4_TUPLE_ +#undef GTEST_5_TUPLE_ +#undef GTEST_6_TUPLE_ +#undef GTEST_7_TUPLE_ +#undef GTEST_8_TUPLE_ +#undef GTEST_9_TUPLE_ +#undef GTEST_10_TUPLE_ + +#undef GTEST_0_TYPENAMES_ +#undef GTEST_1_TYPENAMES_ +#undef GTEST_2_TYPENAMES_ +#undef GTEST_3_TYPENAMES_ +#undef GTEST_4_TYPENAMES_ +#undef GTEST_5_TYPENAMES_ +#undef GTEST_6_TYPENAMES_ +#undef GTEST_7_TYPENAMES_ +#undef GTEST_8_TYPENAMES_ +#undef GTEST_9_TYPENAMES_ +#undef GTEST_10_TYPENAMES_ + +#undef GTEST_DECLARE_TUPLE_AS_FRIEND_ +#undef GTEST_BY_REF_ +#undef GTEST_ADD_REF_ +#undef GTEST_TUPLE_ELEMENT_ + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_ +# elif GTEST_OS_SYMBIAN + +// On Symbian, BOOST_HAS_TR1_TUPLE causes Boost's TR1 tuple library to +// use STLport's tuple implementation, which unfortunately doesn't +// work as the copy of STLport distributed with Symbian is incomplete. +// By making sure BOOST_HAS_TR1_TUPLE is undefined, we force Boost to +// use its own tuple implementation. +# ifdef BOOST_HAS_TR1_TUPLE +# undef BOOST_HAS_TR1_TUPLE +# endif // BOOST_HAS_TR1_TUPLE + +// This prevents , which defines +// BOOST_HAS_TR1_TUPLE, from being #included by Boost's . +# define BOOST_TR1_DETAIL_CONFIG_HPP_INCLUDED +# include + +# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000) +// GCC 4.0+ implements tr1/tuple in the header. This does +// not conform to the TR1 spec, which requires the header to be . + +# if !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302 +// Until version 4.3.2, gcc has a bug that causes , +// which is #included by , to not compile when RTTI is +// disabled. _TR1_FUNCTIONAL is the header guard for +// . Hence the following #define is a hack to prevent +// from being included. +# define _TR1_FUNCTIONAL 1 +# include +# undef _TR1_FUNCTIONAL // Allows the user to #include + // if he chooses to. +# else +# include // NOLINT +# endif // !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302 + +# else +// If the compiler is not GCC 4.0+, we assume the user is using a +// spec-conforming TR1 implementation. +# include // NOLINT +# endif // GTEST_USE_OWN_TR1_TUPLE + +#endif // GTEST_HAS_TR1_TUPLE + +// Determines whether clone(2) is supported. +// Usually it will only be available on Linux, excluding +// Linux on the Itanium architecture. +// Also see http://linux.die.net/man/2/clone. +#ifndef GTEST_HAS_CLONE +// The user didn't tell us, so we need to figure it out. + +# if GTEST_OS_LINUX && !defined(__ia64__) +# define GTEST_HAS_CLONE 1 +# else +# define GTEST_HAS_CLONE 0 +# endif // GTEST_OS_LINUX && !defined(__ia64__) + +#endif // GTEST_HAS_CLONE + +// Determines whether to support stream redirection. This is used to test +// output correctness and to implement death tests. +#ifndef GTEST_HAS_STREAM_REDIRECTION +// By default, we assume that stream redirection is supported on all +// platforms except known mobile ones. +# if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN +# define GTEST_HAS_STREAM_REDIRECTION 0 +# else +# define GTEST_HAS_STREAM_REDIRECTION 1 +# endif // !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN +#endif // GTEST_HAS_STREAM_REDIRECTION + +// Determines whether to support death tests. +// Google Test does not support death tests for VC 7.1 and earlier as +// abort() in a VC 7.1 application compiled as GUI in debug config +// pops up a dialog window that cannot be suppressed programmatically. +#if (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \ + (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \ + GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX || GTEST_OS_HPUX) +# define GTEST_HAS_DEATH_TEST 1 +# include // NOLINT +#endif + +// We don't support MSVC 7.1 with exceptions disabled now. Therefore +// all the compilers we care about are adequate for supporting +// value-parameterized tests. +#define GTEST_HAS_PARAM_TEST 1 + +// Determines whether to support type-driven tests. + +// Typed tests need and variadic macros, which GCC, VC++ 8.0, +// Sun Pro CC, IBM Visual Age, and HP aCC support. +#if defined(__GNUC__) || (_MSC_VER >= 1400) || defined(__SUNPRO_CC) || \ + defined(__IBMCPP__) || defined(__HP_aCC) +# define GTEST_HAS_TYPED_TEST 1 +# define GTEST_HAS_TYPED_TEST_P 1 +#endif + +// Determines whether to support Combine(). This only makes sense when +// value-parameterized tests are enabled. The implementation doesn't +// work on Sun Studio since it doesn't understand templated conversion +// operators. +#if GTEST_HAS_PARAM_TEST && GTEST_HAS_TR1_TUPLE && !defined(__SUNPRO_CC) +# define GTEST_HAS_COMBINE 1 +#endif + +// Determines whether the system compiler uses UTF-16 for encoding wide strings. +#define GTEST_WIDE_STRING_USES_UTF16_ \ + (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_SYMBIAN || GTEST_OS_AIX) + +// Determines whether test results can be streamed to a socket. +#if GTEST_OS_LINUX +# define GTEST_CAN_STREAM_RESULTS_ 1 +#endif + +// Defines some utility macros. + +// The GNU compiler emits a warning if nested "if" statements are followed by +// an "else" statement and braces are not used to explicitly disambiguate the +// "else" binding. This leads to problems with code like: +// +// if (gate) +// ASSERT_*(condition) << "Some message"; +// +// The "switch (0) case 0:" idiom is used to suppress this. +#ifdef __INTEL_COMPILER +# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ +#else +# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: default: // NOLINT +#endif + +// Use this annotation at the end of a struct/class definition to +// prevent the compiler from optimizing away instances that are never +// used. This is useful when all interesting logic happens inside the +// c'tor and / or d'tor. Example: +// +// struct Foo { +// Foo() { ... } +// } GTEST_ATTRIBUTE_UNUSED_; +// +// Also use it after a variable or parameter declaration to tell the +// compiler the variable/parameter does not have to be used. +#if defined(__GNUC__) && !defined(COMPILER_ICC) +# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused)) +#else +# define GTEST_ATTRIBUTE_UNUSED_ +#endif + +// A macro to disallow operator= +// This should be used in the private: declarations for a class. +#define GTEST_DISALLOW_ASSIGN_(type)\ + void operator=(type const &) + +// A macro to disallow copy constructor and operator= +// This should be used in the private: declarations for a class. +#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type)\ + type(type const &);\ + GTEST_DISALLOW_ASSIGN_(type) + +// Tell the compiler to warn about unused return values for functions declared +// with this macro. The macro should be used on function declarations +// following the argument list: +// +// Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_; +#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 30400) && !defined(COMPILER_ICC) +# define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result)) +#else +# define GTEST_MUST_USE_RESULT_ +#endif // __GNUC__ && (GTEST_GCC_VER_ >= 30400) && !COMPILER_ICC + +// Determine whether the compiler supports Microsoft's Structured Exception +// Handling. This is supported by several Windows compilers but generally +// does not exist on any other system. +#ifndef GTEST_HAS_SEH +// The user didn't tell us, so we need to figure it out. + +# if defined(_MSC_VER) || defined(__BORLANDC__) +// These two compilers are known to support SEH. +# define GTEST_HAS_SEH 1 +# else +// Assume no SEH. +# define GTEST_HAS_SEH 0 +# endif + +#endif // GTEST_HAS_SEH + +#ifdef _MSC_VER + +# if GTEST_LINKED_AS_SHARED_LIBRARY +# define GTEST_API_ __declspec(dllimport) +# elif GTEST_CREATE_SHARED_LIBRARY +# define GTEST_API_ __declspec(dllexport) +# endif + +#endif // _MSC_VER + +#ifndef GTEST_API_ +# define GTEST_API_ +#endif + +#ifdef __GNUC__ +// Ask the compiler to never inline a given function. +# define GTEST_NO_INLINE_ __attribute__((noinline)) +#else +# define GTEST_NO_INLINE_ +#endif + +namespace testing { + +class Message; + +namespace internal { + +class String; + +// The GTEST_COMPILE_ASSERT_ macro can be used to verify that a compile time +// expression is true. For example, you could use it to verify the +// size of a static array: +// +// GTEST_COMPILE_ASSERT_(ARRAYSIZE(content_type_names) == CONTENT_NUM_TYPES, +// content_type_names_incorrect_size); +// +// or to make sure a struct is smaller than a certain size: +// +// GTEST_COMPILE_ASSERT_(sizeof(foo) < 128, foo_too_large); +// +// The second argument to the macro is the name of the variable. If +// the expression is false, most compilers will issue a warning/error +// containing the name of the variable. + +template +struct CompileAssert { +}; + +#define GTEST_COMPILE_ASSERT_(expr, msg) \ + typedef ::testing::internal::CompileAssert<(bool(expr))> \ + msg[bool(expr) ? 1 : -1] + +// Implementation details of GTEST_COMPILE_ASSERT_: +// +// - GTEST_COMPILE_ASSERT_ works by defining an array type that has -1 +// elements (and thus is invalid) when the expression is false. +// +// - The simpler definition +// +// #define GTEST_COMPILE_ASSERT_(expr, msg) typedef char msg[(expr) ? 1 : -1] +// +// does not work, as gcc supports variable-length arrays whose sizes +// are determined at run-time (this is gcc's extension and not part +// of the C++ standard). As a result, gcc fails to reject the +// following code with the simple definition: +// +// int foo; +// GTEST_COMPILE_ASSERT_(foo, msg); // not supposed to compile as foo is +// // not a compile-time constant. +// +// - By using the type CompileAssert<(bool(expr))>, we ensures that +// expr is a compile-time constant. (Template arguments must be +// determined at compile-time.) +// +// - The outter parentheses in CompileAssert<(bool(expr))> are necessary +// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written +// +// CompileAssert +// +// instead, these compilers will refuse to compile +// +// GTEST_COMPILE_ASSERT_(5 > 0, some_message); +// +// (They seem to think the ">" in "5 > 0" marks the end of the +// template argument list.) +// +// - The array size is (bool(expr) ? 1 : -1), instead of simply +// +// ((expr) ? 1 : -1). +// +// This is to avoid running into a bug in MS VC 7.1, which +// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1. + +// StaticAssertTypeEqHelper is used by StaticAssertTypeEq defined in gtest.h. +// +// This template is declared, but intentionally undefined. +template +struct StaticAssertTypeEqHelper; + +template +struct StaticAssertTypeEqHelper {}; + +#if GTEST_HAS_GLOBAL_STRING +typedef ::string string; +#else +typedef ::std::string string; +#endif // GTEST_HAS_GLOBAL_STRING + +#if GTEST_HAS_GLOBAL_WSTRING +typedef ::wstring wstring; +#elif GTEST_HAS_STD_WSTRING +typedef ::std::wstring wstring; +#endif // GTEST_HAS_GLOBAL_WSTRING + +// A helper for suppressing warnings on constant condition. It just +// returns 'condition'. +GTEST_API_ bool IsTrue(bool condition); + +// Defines scoped_ptr. + +// This implementation of scoped_ptr is PARTIAL - it only contains +// enough stuff to satisfy Google Test's need. +template +class scoped_ptr { + public: + typedef T element_type; + + explicit scoped_ptr(T* p = NULL) : ptr_(p) {} + ~scoped_ptr() { reset(); } + + T& operator*() const { return *ptr_; } + T* operator->() const { return ptr_; } + T* get() const { return ptr_; } + + T* release() { + T* const ptr = ptr_; + ptr_ = NULL; + return ptr; + } + + void reset(T* p = NULL) { + if (p != ptr_) { + if (IsTrue(sizeof(T) > 0)) { // Makes sure T is a complete type. + delete ptr_; + } + ptr_ = p; + } + } + private: + T* ptr_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(scoped_ptr); +}; + +// Defines RE. + +// A simple C++ wrapper for . It uses the POSIX Extended +// Regular Expression syntax. +class GTEST_API_ RE { + public: + // A copy constructor is required by the Standard to initialize object + // references from r-values. + RE(const RE& other) { Init(other.pattern()); } + + // Constructs an RE from a string. + RE(const ::std::string& regex) { Init(regex.c_str()); } // NOLINT + +#if GTEST_HAS_GLOBAL_STRING + + RE(const ::string& regex) { Init(regex.c_str()); } // NOLINT + +#endif // GTEST_HAS_GLOBAL_STRING + + RE(const char* regex) { Init(regex); } // NOLINT + ~RE(); + + // Returns the string representation of the regex. + const char* pattern() const { return pattern_; } + + // FullMatch(str, re) returns true iff regular expression re matches + // the entire str. + // PartialMatch(str, re) returns true iff regular expression re + // matches a substring of str (including str itself). + // + // TODO(wan@google.com): make FullMatch() and PartialMatch() work + // when str contains NUL characters. + static bool FullMatch(const ::std::string& str, const RE& re) { + return FullMatch(str.c_str(), re); + } + static bool PartialMatch(const ::std::string& str, const RE& re) { + return PartialMatch(str.c_str(), re); + } + +#if GTEST_HAS_GLOBAL_STRING + + static bool FullMatch(const ::string& str, const RE& re) { + return FullMatch(str.c_str(), re); + } + static bool PartialMatch(const ::string& str, const RE& re) { + return PartialMatch(str.c_str(), re); + } + +#endif // GTEST_HAS_GLOBAL_STRING + + static bool FullMatch(const char* str, const RE& re); + static bool PartialMatch(const char* str, const RE& re); + + private: + void Init(const char* regex); + + // We use a const char* instead of a string, as Google Test may be used + // where string is not available. We also do not use Google Test's own + // String type here, in order to simplify dependencies between the + // files. + const char* pattern_; + bool is_valid_; + +#if GTEST_USES_POSIX_RE + + regex_t full_regex_; // For FullMatch(). + regex_t partial_regex_; // For PartialMatch(). + +#else // GTEST_USES_SIMPLE_RE + + const char* full_pattern_; // For FullMatch(); + +#endif + + GTEST_DISALLOW_ASSIGN_(RE); +}; + +// Formats a source file path and a line number as they would appear +// in an error message from the compiler used to compile this code. +GTEST_API_ ::std::string FormatFileLocation(const char* file, int line); + +// Formats a file location for compiler-independent XML output. +// Although this function is not platform dependent, we put it next to +// FormatFileLocation in order to contrast the two functions. +GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file, + int line); + +// Defines logging utilities: +// GTEST_LOG_(severity) - logs messages at the specified severity level. The +// message itself is streamed into the macro. +// LogToStderr() - directs all log messages to stderr. +// FlushInfoLog() - flushes informational log messages. + +enum GTestLogSeverity { + GTEST_INFO, + GTEST_WARNING, + GTEST_ERROR, + GTEST_FATAL +}; + +// Formats log entry severity, provides a stream object for streaming the +// log message, and terminates the message with a newline when going out of +// scope. +class GTEST_API_ GTestLog { + public: + GTestLog(GTestLogSeverity severity, const char* file, int line); + + // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program. + ~GTestLog(); + + ::std::ostream& GetStream() { return ::std::cerr; } + + private: + const GTestLogSeverity severity_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog); +}; + +#define GTEST_LOG_(severity) \ + ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \ + __FILE__, __LINE__).GetStream() + +inline void LogToStderr() {} +inline void FlushInfoLog() { fflush(NULL); } + +// INTERNAL IMPLEMENTATION - DO NOT USE. +// +// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition +// is not satisfied. +// Synopsys: +// GTEST_CHECK_(boolean_condition); +// or +// GTEST_CHECK_(boolean_condition) << "Additional message"; +// +// This checks the condition and if the condition is not satisfied +// it prints message about the condition violation, including the +// condition itself, plus additional message streamed into it, if any, +// and then it aborts the program. It aborts the program irrespective of +// whether it is built in the debug mode or not. +#define GTEST_CHECK_(condition) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::IsTrue(condition)) \ + ; \ + else \ + GTEST_LOG_(FATAL) << "Condition " #condition " failed. " + +// An all-mode assert to verify that the given POSIX-style function +// call returns 0 (indicating success). Known limitation: this +// doesn't expand to a balanced 'if' statement, so enclose the macro +// in {} if you need to use it as the only statement in an 'if' +// branch. +#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \ + if (const int gtest_error = (posix_call)) \ + GTEST_LOG_(FATAL) << #posix_call << "failed with error " \ + << gtest_error + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Use ImplicitCast_ as a safe version of static_cast for upcasting in +// the type hierarchy (e.g. casting a Foo* to a SuperclassOfFoo* or a +// const Foo*). When you use ImplicitCast_, the compiler checks that +// the cast is safe. Such explicit ImplicitCast_s are necessary in +// surprisingly many situations where C++ demands an exact type match +// instead of an argument type convertable to a target type. +// +// The syntax for using ImplicitCast_ is the same as for static_cast: +// +// ImplicitCast_(expr) +// +// ImplicitCast_ would have been part of the C++ standard library, +// but the proposal was submitted too late. It will probably make +// its way into the language in the future. +// +// This relatively ugly name is intentional. It prevents clashes with +// similar functions users may have (e.g., implicit_cast). The internal +// namespace alone is not enough because the function can be found by ADL. +template +inline To ImplicitCast_(To x) { return x; } + +// When you upcast (that is, cast a pointer from type Foo to type +// SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts +// always succeed. When you downcast (that is, cast a pointer from +// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because +// how do you know the pointer is really of type SubclassOfFoo? It +// could be a bare Foo, or of type DifferentSubclassOfFoo. Thus, +// when you downcast, you should use this macro. In debug mode, we +// use dynamic_cast<> to double-check the downcast is legal (we die +// if it's not). In normal mode, we do the efficient static_cast<> +// instead. Thus, it's important to test in debug mode to make sure +// the cast is legal! +// This is the only place in the code we should use dynamic_cast<>. +// In particular, you SHOULDN'T be using dynamic_cast<> in order to +// do RTTI (eg code like this: +// if (dynamic_cast(foo)) HandleASubclass1Object(foo); +// if (dynamic_cast(foo)) HandleASubclass2Object(foo); +// You should design the code some other way not to need this. +// +// This relatively ugly name is intentional. It prevents clashes with +// similar functions users may have (e.g., down_cast). The internal +// namespace alone is not enough because the function can be found by ADL. +template // use like this: DownCast_(foo); +inline To DownCast_(From* f) { // so we only accept pointers + // Ensures that To is a sub-type of From *. This test is here only + // for compile-time type checking, and has no overhead in an + // optimized build at run-time, as it will be optimized away + // completely. + if (false) { + const To to = NULL; + ::testing::internal::ImplicitCast_(to); + } + +#if GTEST_HAS_RTTI + // RTTI: debug mode only! + GTEST_CHECK_(f == NULL || dynamic_cast(f) != NULL); +#endif + return static_cast(f); +} + +// Downcasts the pointer of type Base to Derived. +// Derived must be a subclass of Base. The parameter MUST +// point to a class of type Derived, not any subclass of it. +// When RTTI is available, the function performs a runtime +// check to enforce this. +template +Derived* CheckedDowncastToActualType(Base* base) { +#if GTEST_HAS_RTTI + GTEST_CHECK_(typeid(*base) == typeid(Derived)); + return dynamic_cast(base); // NOLINT +#else + return static_cast(base); // Poor man's downcast. +#endif +} + +#if GTEST_HAS_STREAM_REDIRECTION + +// Defines the stderr capturer: +// CaptureStdout - starts capturing stdout. +// GetCapturedStdout - stops capturing stdout and returns the captured string. +// CaptureStderr - starts capturing stderr. +// GetCapturedStderr - stops capturing stderr and returns the captured string. +// +GTEST_API_ void CaptureStdout(); +GTEST_API_ String GetCapturedStdout(); +GTEST_API_ void CaptureStderr(); +GTEST_API_ String GetCapturedStderr(); + +#endif // GTEST_HAS_STREAM_REDIRECTION + + +#if GTEST_HAS_DEATH_TEST + +// A copy of all command line arguments. Set by InitGoogleTest(). +extern ::std::vector g_argvs; + +// GTEST_HAS_DEATH_TEST implies we have ::std::string. +const ::std::vector& GetArgvs(); + +#endif // GTEST_HAS_DEATH_TEST + +// Defines synchronization primitives. + +#if GTEST_HAS_PTHREAD + +// Sleeps for (roughly) n milli-seconds. This function is only for +// testing Google Test's own constructs. Don't use it in user tests, +// either directly or indirectly. +inline void SleepMilliseconds(int n) { + const timespec time = { + 0, // 0 seconds. + n * 1000L * 1000L, // And n ms. + }; + nanosleep(&time, NULL); +} + +// Allows a controller thread to pause execution of newly created +// threads until notified. Instances of this class must be created +// and destroyed in the controller thread. +// +// This class is only for testing Google Test's own constructs. Do not +// use it in user tests, either directly or indirectly. +class Notification { + public: + Notification() : notified_(false) {} + + // Notifies all threads created with this notification to start. Must + // be called from the controller thread. + void Notify() { notified_ = true; } + + // Blocks until the controller thread notifies. Must be called from a test + // thread. + void WaitForNotification() { + while(!notified_) { + SleepMilliseconds(10); + } + } + + private: + volatile bool notified_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification); +}; + +// As a C-function, ThreadFuncWithCLinkage cannot be templated itself. +// Consequently, it cannot select a correct instantiation of ThreadWithParam +// in order to call its Run(). Introducing ThreadWithParamBase as a +// non-templated base class for ThreadWithParam allows us to bypass this +// problem. +class ThreadWithParamBase { + public: + virtual ~ThreadWithParamBase() {} + virtual void Run() = 0; +}; + +// pthread_create() accepts a pointer to a function type with the C linkage. +// According to the Standard (7.5/1), function types with different linkages +// are different even if they are otherwise identical. Some compilers (for +// example, SunStudio) treat them as different types. Since class methods +// cannot be defined with C-linkage we need to define a free C-function to +// pass into pthread_create(). +extern "C" inline void* ThreadFuncWithCLinkage(void* thread) { + static_cast(thread)->Run(); + return NULL; +} + +// Helper class for testing Google Test's multi-threading constructs. +// To use it, write: +// +// void ThreadFunc(int param) { /* Do things with param */ } +// Notification thread_can_start; +// ... +// // The thread_can_start parameter is optional; you can supply NULL. +// ThreadWithParam thread(&ThreadFunc, 5, &thread_can_start); +// thread_can_start.Notify(); +// +// These classes are only for testing Google Test's own constructs. Do +// not use them in user tests, either directly or indirectly. +template +class ThreadWithParam : public ThreadWithParamBase { + public: + typedef void (*UserThreadFunc)(T); + + ThreadWithParam( + UserThreadFunc func, T param, Notification* thread_can_start) + : func_(func), + param_(param), + thread_can_start_(thread_can_start), + finished_(false) { + ThreadWithParamBase* const base = this; + // The thread can be created only after all fields except thread_ + // have been initialized. + GTEST_CHECK_POSIX_SUCCESS_( + pthread_create(&thread_, 0, &ThreadFuncWithCLinkage, base)); + } + ~ThreadWithParam() { Join(); } + + void Join() { + if (!finished_) { + GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, 0)); + finished_ = true; + } + } + + virtual void Run() { + if (thread_can_start_ != NULL) + thread_can_start_->WaitForNotification(); + func_(param_); + } + + private: + const UserThreadFunc func_; // User-supplied thread function. + const T param_; // User-supplied parameter to the thread function. + // When non-NULL, used to block execution until the controller thread + // notifies. + Notification* const thread_can_start_; + bool finished_; // true iff we know that the thread function has finished. + pthread_t thread_; // The native thread object. + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam); +}; + +// MutexBase and Mutex implement mutex on pthreads-based platforms. They +// are used in conjunction with class MutexLock: +// +// Mutex mutex; +// ... +// MutexLock lock(&mutex); // Acquires the mutex and releases it at the end +// // of the current scope. +// +// MutexBase implements behavior for both statically and dynamically +// allocated mutexes. Do not use MutexBase directly. Instead, write +// the following to define a static mutex: +// +// GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex); +// +// You can forward declare a static mutex like this: +// +// GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex); +// +// To create a dynamic mutex, just define an object of type Mutex. +class MutexBase { + public: + // Acquires this mutex. + void Lock() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_)); + owner_ = pthread_self(); + } + + // Releases this mutex. + void Unlock() { + // We don't protect writing to owner_ here, as it's the caller's + // responsibility to ensure that the current thread holds the + // mutex when this is called. + owner_ = 0; + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_)); + } + + // Does nothing if the current thread holds the mutex. Otherwise, crashes + // with high probability. + void AssertHeld() const { + GTEST_CHECK_(owner_ == pthread_self()) + << "The current thread is not holding the mutex @" << this; + } + + // A static mutex may be used before main() is entered. It may even + // be used before the dynamic initialization stage. Therefore we + // must be able to initialize a static mutex object at link time. + // This means MutexBase has to be a POD and its member variables + // have to be public. + public: + pthread_mutex_t mutex_; // The underlying pthread mutex. + pthread_t owner_; // The thread holding the mutex; 0 means no one holds it. +}; + +// Forward-declares a static mutex. +# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::MutexBase mutex + +// Defines and statically (i.e. at link time) initializes a static mutex. +# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \ + ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, 0 } + +// The Mutex class can only be used for mutexes created at runtime. It +// shares its API with MutexBase otherwise. +class Mutex : public MutexBase { + public: + Mutex() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL)); + owner_ = 0; + } + ~Mutex() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_)); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex); +}; + +// We cannot name this class MutexLock as the ctor declaration would +// conflict with a macro named MutexLock, which is defined on some +// platforms. Hence the typedef trick below. +class GTestMutexLock { + public: + explicit GTestMutexLock(MutexBase* mutex) + : mutex_(mutex) { mutex_->Lock(); } + + ~GTestMutexLock() { mutex_->Unlock(); } + + private: + MutexBase* const mutex_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock); +}; + +typedef GTestMutexLock MutexLock; + +// Helpers for ThreadLocal. + +// pthread_key_create() requires DeleteThreadLocalValue() to have +// C-linkage. Therefore it cannot be templatized to access +// ThreadLocal. Hence the need for class +// ThreadLocalValueHolderBase. +class ThreadLocalValueHolderBase { + public: + virtual ~ThreadLocalValueHolderBase() {} +}; + +// Called by pthread to delete thread-local data stored by +// pthread_setspecific(). +extern "C" inline void DeleteThreadLocalValue(void* value_holder) { + delete static_cast(value_holder); +} + +// Implements thread-local storage on pthreads-based systems. +// +// // Thread 1 +// ThreadLocal tl(100); // 100 is the default value for each thread. +// +// // Thread 2 +// tl.set(150); // Changes the value for thread 2 only. +// EXPECT_EQ(150, tl.get()); +// +// // Thread 1 +// EXPECT_EQ(100, tl.get()); // In thread 1, tl has the original value. +// tl.set(200); +// EXPECT_EQ(200, tl.get()); +// +// The template type argument T must have a public copy constructor. +// In addition, the default ThreadLocal constructor requires T to have +// a public default constructor. +// +// An object managed for a thread by a ThreadLocal instance is deleted +// when the thread exits. Or, if the ThreadLocal instance dies in +// that thread, when the ThreadLocal dies. It's the user's +// responsibility to ensure that all other threads using a ThreadLocal +// have exited when it dies, or the per-thread objects for those +// threads will not be deleted. +// +// Google Test only uses global ThreadLocal objects. That means they +// will die after main() has returned. Therefore, no per-thread +// object managed by Google Test will be leaked as long as all threads +// using Google Test have exited when main() returns. +template +class ThreadLocal { + public: + ThreadLocal() : key_(CreateKey()), + default_() {} + explicit ThreadLocal(const T& value) : key_(CreateKey()), + default_(value) {} + + ~ThreadLocal() { + // Destroys the managed object for the current thread, if any. + DeleteThreadLocalValue(pthread_getspecific(key_)); + + // Releases resources associated with the key. This will *not* + // delete managed objects for other threads. + GTEST_CHECK_POSIX_SUCCESS_(pthread_key_delete(key_)); + } + + T* pointer() { return GetOrCreateValue(); } + const T* pointer() const { return GetOrCreateValue(); } + const T& get() const { return *pointer(); } + void set(const T& value) { *pointer() = value; } + + private: + // Holds a value of type T. + class ValueHolder : public ThreadLocalValueHolderBase { + public: + explicit ValueHolder(const T& value) : value_(value) {} + + T* pointer() { return &value_; } + + private: + T value_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder); + }; + + static pthread_key_t CreateKey() { + pthread_key_t key; + // When a thread exits, DeleteThreadLocalValue() will be called on + // the object managed for that thread. + GTEST_CHECK_POSIX_SUCCESS_( + pthread_key_create(&key, &DeleteThreadLocalValue)); + return key; + } + + T* GetOrCreateValue() const { + ThreadLocalValueHolderBase* const holder = + static_cast(pthread_getspecific(key_)); + if (holder != NULL) { + return CheckedDowncastToActualType(holder)->pointer(); + } + + ValueHolder* const new_holder = new ValueHolder(default_); + ThreadLocalValueHolderBase* const holder_base = new_holder; + GTEST_CHECK_POSIX_SUCCESS_(pthread_setspecific(key_, holder_base)); + return new_holder->pointer(); + } + + // A key pthreads uses for looking up per-thread values. + const pthread_key_t key_; + const T default_; // The default value for each thread. + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal); +}; + +# define GTEST_IS_THREADSAFE 1 + +#else // GTEST_HAS_PTHREAD + +// A dummy implementation of synchronization primitives (mutex, lock, +// and thread-local variable). Necessary for compiling Google Test where +// mutex is not supported - using Google Test in multiple threads is not +// supported on such platforms. + +class Mutex { + public: + Mutex() {} + void AssertHeld() const {} +}; + +# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::Mutex mutex + +# define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex + +class GTestMutexLock { + public: + explicit GTestMutexLock(Mutex*) {} // NOLINT +}; + +typedef GTestMutexLock MutexLock; + +template +class ThreadLocal { + public: + ThreadLocal() : value_() {} + explicit ThreadLocal(const T& value) : value_(value) {} + T* pointer() { return &value_; } + const T* pointer() const { return &value_; } + const T& get() const { return value_; } + void set(const T& value) { value_ = value; } + private: + T value_; +}; + +// The above synchronization primitives have dummy implementations. +// Therefore Google Test is not thread-safe. +# define GTEST_IS_THREADSAFE 0 + +#endif // GTEST_HAS_PTHREAD + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +GTEST_API_ size_t GetThreadCount(); + +// Passing non-POD classes through ellipsis (...) crashes the ARM +// compiler and generates a warning in Sun Studio. The Nokia Symbian +// and the IBM XL C/C++ compiler try to instantiate a copy constructor +// for objects passed through ellipsis (...), failing for uncopyable +// objects. We define this to ensure that only POD is passed through +// ellipsis on these systems. +#if defined(__SYMBIAN32__) || defined(__IBMCPP__) || defined(__SUNPRO_CC) +// We lose support for NULL detection where the compiler doesn't like +// passing non-POD classes through ellipsis (...). +# define GTEST_ELLIPSIS_NEEDS_POD_ 1 +#else +# define GTEST_CAN_COMPARE_NULL 1 +#endif + +// The Nokia Symbian and IBM XL C/C++ compilers cannot decide between +// const T& and const T* in a function template. These compilers +// _can_ decide between class template specializations for T and T*, +// so a tr1::type_traits-like is_pointer works. +#if defined(__SYMBIAN32__) || defined(__IBMCPP__) +# define GTEST_NEEDS_IS_POINTER_ 1 +#endif + +template +struct bool_constant { + typedef bool_constant type; + static const bool value = bool_value; +}; +template const bool bool_constant::value; + +typedef bool_constant false_type; +typedef bool_constant true_type; + +template +struct is_pointer : public false_type {}; + +template +struct is_pointer : public true_type {}; + +template +struct IteratorTraits { + typedef typename Iterator::value_type value_type; +}; + +template +struct IteratorTraits { + typedef T value_type; +}; + +template +struct IteratorTraits { + typedef T value_type; +}; + +#if GTEST_OS_WINDOWS +# define GTEST_PATH_SEP_ "\\" +# define GTEST_HAS_ALT_PATH_SEP_ 1 +// The biggest signed integer type the compiler supports. +typedef __int64 BiggestInt; +#else +# define GTEST_PATH_SEP_ "/" +# define GTEST_HAS_ALT_PATH_SEP_ 0 +typedef long long BiggestInt; // NOLINT +#endif // GTEST_OS_WINDOWS + +// Utilities for char. + +// isspace(int ch) and friends accept an unsigned char or EOF. char +// may be signed, depending on the compiler (or compiler flags). +// Therefore we need to cast a char to unsigned char before calling +// isspace(), etc. + +inline bool IsAlpha(char ch) { + return isalpha(static_cast(ch)) != 0; +} +inline bool IsAlNum(char ch) { + return isalnum(static_cast(ch)) != 0; +} +inline bool IsDigit(char ch) { + return isdigit(static_cast(ch)) != 0; +} +inline bool IsLower(char ch) { + return islower(static_cast(ch)) != 0; +} +inline bool IsSpace(char ch) { + return isspace(static_cast(ch)) != 0; +} +inline bool IsUpper(char ch) { + return isupper(static_cast(ch)) != 0; +} +inline bool IsXDigit(char ch) { + return isxdigit(static_cast(ch)) != 0; +} + +inline char ToLower(char ch) { + return static_cast(tolower(static_cast(ch))); +} +inline char ToUpper(char ch) { + return static_cast(toupper(static_cast(ch))); +} + +// The testing::internal::posix namespace holds wrappers for common +// POSIX functions. These wrappers hide the differences between +// Windows/MSVC and POSIX systems. Since some compilers define these +// standard functions as macros, the wrapper cannot have the same name +// as the wrapped function. + +namespace posix { + +// Functions with a different name on Windows. + +#if GTEST_OS_WINDOWS + +typedef struct _stat StatStruct; + +# ifdef __BORLANDC__ +inline int IsATTY(int fd) { return isatty(fd); } +inline int StrCaseCmp(const char* s1, const char* s2) { + return stricmp(s1, s2); +} +inline char* StrDup(const char* src) { return strdup(src); } +# else // !__BORLANDC__ +# if GTEST_OS_WINDOWS_MOBILE +inline int IsATTY(int /* fd */) { return 0; } +# else +inline int IsATTY(int fd) { return _isatty(fd); } +# endif // GTEST_OS_WINDOWS_MOBILE +inline int StrCaseCmp(const char* s1, const char* s2) { + return _stricmp(s1, s2); +} +inline char* StrDup(const char* src) { return _strdup(src); } +# endif // __BORLANDC__ + +# if GTEST_OS_WINDOWS_MOBILE +inline int FileNo(FILE* file) { return reinterpret_cast(_fileno(file)); } +// Stat(), RmDir(), and IsDir() are not needed on Windows CE at this +// time and thus not defined there. +# else +inline int FileNo(FILE* file) { return _fileno(file); } +inline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); } +inline int RmDir(const char* dir) { return _rmdir(dir); } +inline bool IsDir(const StatStruct& st) { + return (_S_IFDIR & st.st_mode) != 0; +} +# endif // GTEST_OS_WINDOWS_MOBILE + +#else + +typedef struct stat StatStruct; + +inline int FileNo(FILE* file) { return fileno(file); } +inline int IsATTY(int fd) { return isatty(fd); } +inline int Stat(const char* path, StatStruct* buf) { return stat(path, buf); } +inline int StrCaseCmp(const char* s1, const char* s2) { + return strcasecmp(s1, s2); +} +inline char* StrDup(const char* src) { return strdup(src); } +inline int RmDir(const char* dir) { return rmdir(dir); } +inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); } + +#endif // GTEST_OS_WINDOWS + +// Functions deprecated by MSVC 8.0. + +#ifdef _MSC_VER +// Temporarily disable warning 4996 (deprecated function). +# pragma warning(push) +# pragma warning(disable:4996) +#endif + +inline const char* StrNCpy(char* dest, const char* src, size_t n) { + return strncpy(dest, src, n); +} + +// ChDir(), FReopen(), FDOpen(), Read(), Write(), Close(), and +// StrError() aren't needed on Windows CE at this time and thus not +// defined there. + +#if !GTEST_OS_WINDOWS_MOBILE +inline int ChDir(const char* dir) { return chdir(dir); } +#endif +inline FILE* FOpen(const char* path, const char* mode) { + return fopen(path, mode); +} +#if !GTEST_OS_WINDOWS_MOBILE +inline FILE *FReopen(const char* path, const char* mode, FILE* stream) { + return freopen(path, mode, stream); +} +inline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); } +#endif +inline int FClose(FILE* fp) { return fclose(fp); } +#if !GTEST_OS_WINDOWS_MOBILE +inline int Read(int fd, void* buf, unsigned int count) { + return static_cast(read(fd, buf, count)); +} +inline int Write(int fd, const void* buf, unsigned int count) { + return static_cast(write(fd, buf, count)); +} +inline int Close(int fd) { return close(fd); } +inline const char* StrError(int errnum) { return strerror(errnum); } +#endif +inline const char* GetEnv(const char* name) { +#if GTEST_OS_WINDOWS_MOBILE + // We are on Windows CE, which has no environment variables. + return NULL; +#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9) + // Environment variables which we programmatically clear will be set to the + // empty string rather than unset (NULL). Handle that case. + const char* const env = getenv(name); + return (env != NULL && env[0] != '\0') ? env : NULL; +#else + return getenv(name); +#endif +} + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif + +#if GTEST_OS_WINDOWS_MOBILE +// Windows CE has no C library. The abort() function is used in +// several places in Google Test. This implementation provides a reasonable +// imitation of standard behaviour. +void Abort(); +#else +inline void Abort() { abort(); } +#endif // GTEST_OS_WINDOWS_MOBILE + +} // namespace posix + +// The maximum number a BiggestInt can represent. This definition +// works no matter BiggestInt is represented in one's complement or +// two's complement. +// +// We cannot rely on numeric_limits in STL, as __int64 and long long +// are not part of standard C++ and numeric_limits doesn't need to be +// defined for them. +const BiggestInt kMaxBiggestInt = + ~(static_cast(1) << (8*sizeof(BiggestInt) - 1)); + +// This template class serves as a compile-time function from size to +// type. It maps a size in bytes to a primitive type with that +// size. e.g. +// +// TypeWithSize<4>::UInt +// +// is typedef-ed to be unsigned int (unsigned integer made up of 4 +// bytes). +// +// Such functionality should belong to STL, but I cannot find it +// there. +// +// Google Test uses this class in the implementation of floating-point +// comparison. +// +// For now it only handles UInt (unsigned int) as that's all Google Test +// needs. Other types can be easily added in the future if need +// arises. +template +class TypeWithSize { + public: + // This prevents the user from using TypeWithSize with incorrect + // values of N. + typedef void UInt; +}; + +// The specialization for size 4. +template <> +class TypeWithSize<4> { + public: + // unsigned int has size 4 in both gcc and MSVC. + // + // As base/basictypes.h doesn't compile on Windows, we cannot use + // uint32, uint64, and etc here. + typedef int Int; + typedef unsigned int UInt; +}; + +// The specialization for size 8. +template <> +class TypeWithSize<8> { + public: + +#if GTEST_OS_WINDOWS + typedef __int64 Int; + typedef unsigned __int64 UInt; +#else + typedef long long Int; // NOLINT + typedef unsigned long long UInt; // NOLINT +#endif // GTEST_OS_WINDOWS +}; + +// Integer types of known sizes. +typedef TypeWithSize<4>::Int Int32; +typedef TypeWithSize<4>::UInt UInt32; +typedef TypeWithSize<8>::Int Int64; +typedef TypeWithSize<8>::UInt UInt64; +typedef TypeWithSize<8>::Int TimeInMillis; // Represents time in milliseconds. + +// Utilities for command line flags and environment variables. + +// Macro for referencing flags. +#define GTEST_FLAG(name) FLAGS_gtest_##name + +// Macros for declaring flags. +#define GTEST_DECLARE_bool_(name) GTEST_API_ extern bool GTEST_FLAG(name) +#define GTEST_DECLARE_int32_(name) \ + GTEST_API_ extern ::testing::internal::Int32 GTEST_FLAG(name) +#define GTEST_DECLARE_string_(name) \ + GTEST_API_ extern ::testing::internal::String GTEST_FLAG(name) + +// Macros for defining flags. +#define GTEST_DEFINE_bool_(name, default_val, doc) \ + GTEST_API_ bool GTEST_FLAG(name) = (default_val) +#define GTEST_DEFINE_int32_(name, default_val, doc) \ + GTEST_API_ ::testing::internal::Int32 GTEST_FLAG(name) = (default_val) +#define GTEST_DEFINE_string_(name, default_val, doc) \ + GTEST_API_ ::testing::internal::String GTEST_FLAG(name) = (default_val) + +// Parses 'str' for a 32-bit signed integer. If successful, writes the result +// to *value and returns true; otherwise leaves *value unchanged and returns +// false. +// TODO(chandlerc): Find a better way to refactor flag and environment parsing +// out of both gtest-port.cc and gtest.cc to avoid exporting this utility +// function. +bool ParseInt32(const Message& src_text, const char* str, Int32* value); + +// Parses a bool/Int32/string from the environment variable +// corresponding to the given Google Test flag. +bool BoolFromGTestEnv(const char* flag, bool default_val); +GTEST_API_ Int32 Int32FromGTestEnv(const char* flag, Int32 default_val); +const char* StringFromGTestEnv(const char* flag, const char* default_val); + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ + +#if GTEST_OS_LINUX +# include +# include +# include +# include +#endif // GTEST_OS_LINUX + +#include +#include +#include +#include +#include + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file declares the String class and functions used internally by +// Google Test. They are subject to change without notice. They should not used +// by code external to Google Test. +// +// This header file is #included by . +// It should not be #included by other files. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ + +#ifdef __BORLANDC__ +// string.h is not guaranteed to provide strcpy on C++ Builder. +# include +#endif + +#include + +#include + +namespace testing { +namespace internal { + +// String - a UTF-8 string class. +// +// For historic reasons, we don't use std::string. +// +// TODO(wan@google.com): replace this class with std::string or +// implement it in terms of the latter. +// +// Note that String can represent both NULL and the empty string, +// while std::string cannot represent NULL. +// +// NULL and the empty string are considered different. NULL is less +// than anything (including the empty string) except itself. +// +// This class only provides minimum functionality necessary for +// implementing Google Test. We do not intend to implement a full-fledged +// string class here. +// +// Since the purpose of this class is to provide a substitute for +// std::string on platforms where it cannot be used, we define a copy +// constructor and assignment operators such that we don't need +// conditional compilation in a lot of places. +// +// In order to make the representation efficient, the d'tor of String +// is not virtual. Therefore DO NOT INHERIT FROM String. +class GTEST_API_ String { + public: + // Static utility methods + + // Returns the input enclosed in double quotes if it's not NULL; + // otherwise returns "(null)". For example, "\"Hello\"" is returned + // for input "Hello". + // + // This is useful for printing a C string in the syntax of a literal. + // + // Known issue: escape sequences are not handled yet. + static String ShowCStringQuoted(const char* c_str); + + // Clones a 0-terminated C string, allocating memory using new. The + // caller is responsible for deleting the return value using + // delete[]. Returns the cloned string, or NULL if the input is + // NULL. + // + // This is different from strdup() in string.h, which allocates + // memory using malloc(). + static const char* CloneCString(const char* c_str); + +#if GTEST_OS_WINDOWS_MOBILE + // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be + // able to pass strings to Win32 APIs on CE we need to convert them + // to 'Unicode', UTF-16. + + // Creates a UTF-16 wide string from the given ANSI string, allocating + // memory using new. The caller is responsible for deleting the return + // value using delete[]. Returns the wide string, or NULL if the + // input is NULL. + // + // The wide string is created using the ANSI codepage (CP_ACP) to + // match the behaviour of the ANSI versions of Win32 calls and the + // C runtime. + static LPCWSTR AnsiToUtf16(const char* c_str); + + // Creates an ANSI string from the given wide string, allocating + // memory using new. The caller is responsible for deleting the return + // value using delete[]. Returns the ANSI string, or NULL if the + // input is NULL. + // + // The returned string is created using the ANSI codepage (CP_ACP) to + // match the behaviour of the ANSI versions of Win32 calls and the + // C runtime. + static const char* Utf16ToAnsi(LPCWSTR utf16_str); +#endif + + // Compares two C strings. Returns true iff they have the same content. + // + // Unlike strcmp(), this function can handle NULL argument(s). A + // NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool CStringEquals(const char* lhs, const char* rhs); + + // Converts a wide C string to a String using the UTF-8 encoding. + // NULL will be converted to "(null)". If an error occurred during + // the conversion, "(failed to convert from wide string)" is + // returned. + static String ShowWideCString(const wchar_t* wide_c_str); + + // Similar to ShowWideCString(), except that this function encloses + // the converted string in double quotes. + static String ShowWideCStringQuoted(const wchar_t* wide_c_str); + + // Compares two wide C strings. Returns true iff they have the same + // content. + // + // Unlike wcscmp(), this function can handle NULL argument(s). A + // NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs); + + // Compares two C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike strcasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool CaseInsensitiveCStringEquals(const char* lhs, + const char* rhs); + + // Compares two wide C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike wcscasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL wide C string, + // including the empty string. + // NB: The implementations on different platforms slightly differ. + // On windows, this method uses _wcsicmp which compares according to LC_CTYPE + // environment variable. On GNU platform this method uses wcscasecmp + // which compares according to LC_CTYPE category of the current locale. + // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the + // current locale. + static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs, + const wchar_t* rhs); + + // Formats a list of arguments to a String, using the same format + // spec string as for printf. + // + // We do not use the StringPrintf class as it is not universally + // available. + // + // The result is limited to 4096 characters (including the tailing + // 0). If 4096 characters are not enough to format the input, + // "" is returned. + static String Format(const char* format, ...); + + // C'tors + + // The default c'tor constructs a NULL string. + String() : c_str_(NULL), length_(0) {} + + // Constructs a String by cloning a 0-terminated C string. + String(const char* a_c_str) { // NOLINT + if (a_c_str == NULL) { + c_str_ = NULL; + length_ = 0; + } else { + ConstructNonNull(a_c_str, strlen(a_c_str)); + } + } + + // Constructs a String by copying a given number of chars from a + // buffer. E.g. String("hello", 3) creates the string "hel", + // String("a\0bcd", 4) creates "a\0bc", String(NULL, 0) creates "", + // and String(NULL, 1) results in access violation. + String(const char* buffer, size_t a_length) { + ConstructNonNull(buffer, a_length); + } + + // The copy c'tor creates a new copy of the string. The two + // String objects do not share content. + String(const String& str) : c_str_(NULL), length_(0) { *this = str; } + + // D'tor. String is intended to be a final class, so the d'tor + // doesn't need to be virtual. + ~String() { delete[] c_str_; } + + // Allows a String to be implicitly converted to an ::std::string or + // ::string, and vice versa. Converting a String containing a NULL + // pointer to ::std::string or ::string is undefined behavior. + // Converting a ::std::string or ::string containing an embedded NUL + // character to a String will result in the prefix up to the first + // NUL character. + String(const ::std::string& str) { + ConstructNonNull(str.c_str(), str.length()); + } + + operator ::std::string() const { return ::std::string(c_str(), length()); } + +#if GTEST_HAS_GLOBAL_STRING + String(const ::string& str) { + ConstructNonNull(str.c_str(), str.length()); + } + + operator ::string() const { return ::string(c_str(), length()); } +#endif // GTEST_HAS_GLOBAL_STRING + + // Returns true iff this is an empty string (i.e. ""). + bool empty() const { return (c_str() != NULL) && (length() == 0); } + + // Compares this with another String. + // Returns < 0 if this is less than rhs, 0 if this is equal to rhs, or > 0 + // if this is greater than rhs. + int Compare(const String& rhs) const; + + // Returns true iff this String equals the given C string. A NULL + // string and a non-NULL string are considered not equal. + bool operator==(const char* a_c_str) const { return Compare(a_c_str) == 0; } + + // Returns true iff this String is less than the given String. A + // NULL string is considered less than "". + bool operator<(const String& rhs) const { return Compare(rhs) < 0; } + + // Returns true iff this String doesn't equal the given C string. A NULL + // string and a non-NULL string are considered not equal. + bool operator!=(const char* a_c_str) const { return !(*this == a_c_str); } + + // Returns true iff this String ends with the given suffix. *Any* + // String is considered to end with a NULL or empty suffix. + bool EndsWith(const char* suffix) const; + + // Returns true iff this String ends with the given suffix, not considering + // case. Any String is considered to end with a NULL or empty suffix. + bool EndsWithCaseInsensitive(const char* suffix) const; + + // Returns the length of the encapsulated string, or 0 if the + // string is NULL. + size_t length() const { return length_; } + + // Gets the 0-terminated C string this String object represents. + // The String object still owns the string. Therefore the caller + // should NOT delete the return value. + const char* c_str() const { return c_str_; } + + // Assigns a C string to this object. Self-assignment works. + const String& operator=(const char* a_c_str) { + return *this = String(a_c_str); + } + + // Assigns a String object to this object. Self-assignment works. + const String& operator=(const String& rhs) { + if (this != &rhs) { + delete[] c_str_; + if (rhs.c_str() == NULL) { + c_str_ = NULL; + length_ = 0; + } else { + ConstructNonNull(rhs.c_str(), rhs.length()); + } + } + + return *this; + } + + private: + // Constructs a non-NULL String from the given content. This + // function can only be called when c_str_ has not been allocated. + // ConstructNonNull(NULL, 0) results in an empty string (""). + // ConstructNonNull(NULL, non_zero) is undefined behavior. + void ConstructNonNull(const char* buffer, size_t a_length) { + char* const str = new char[a_length + 1]; + memcpy(str, buffer, a_length); + str[a_length] = '\0'; + c_str_ = str; + length_ = a_length; + } + + const char* c_str_; + size_t length_; +}; // class String + +// Streams a String to an ostream. Each '\0' character in the String +// is replaced with "\\0". +inline ::std::ostream& operator<<(::std::ostream& os, const String& str) { + if (str.c_str() == NULL) { + os << "(null)"; + } else { + const char* const c_str = str.c_str(); + for (size_t i = 0; i != str.length(); i++) { + if (c_str[i] == '\0') { + os << "\\0"; + } else { + os << c_str[i]; + } + } + } + return os; +} + +// Gets the content of the stringstream's buffer as a String. Each '\0' +// character in the buffer is replaced with "\\0". +GTEST_API_ String StringStreamToString(::std::stringstream* stream); + +// Converts a streamable value to a String. A NULL pointer is +// converted to "(null)". When the input value is a ::string, +// ::std::string, ::wstring, or ::std::wstring object, each NUL +// character in it is replaced with "\\0". + +// Declared here but defined in gtest.h, so that it has access +// to the definition of the Message class, required by the ARM +// compiler. +template +String StreamableToString(const T& streamable); + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: keith.ray@gmail.com (Keith Ray) +// +// Google Test filepath utilities +// +// This header file declares classes and functions used internally by +// Google Test. They are subject to change without notice. +// +// This file is #included in . +// Do not include this header file separately! + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ + + +namespace testing { +namespace internal { + +// FilePath - a class for file and directory pathname manipulation which +// handles platform-specific conventions (like the pathname separator). +// Used for helper functions for naming files in a directory for xml output. +// Except for Set methods, all methods are const or static, which provides an +// "immutable value object" -- useful for peace of mind. +// A FilePath with a value ending in a path separator ("like/this/") represents +// a directory, otherwise it is assumed to represent a file. In either case, +// it may or may not represent an actual file or directory in the file system. +// Names are NOT checked for syntax correctness -- no checking for illegal +// characters, malformed paths, etc. + +class GTEST_API_ FilePath { + public: + FilePath() : pathname_("") { } + FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { } + + explicit FilePath(const char* pathname) : pathname_(pathname) { + Normalize(); + } + + explicit FilePath(const String& pathname) : pathname_(pathname) { + Normalize(); + } + + FilePath& operator=(const FilePath& rhs) { + Set(rhs); + return *this; + } + + void Set(const FilePath& rhs) { + pathname_ = rhs.pathname_; + } + + String ToString() const { return pathname_; } + const char* c_str() const { return pathname_.c_str(); } + + // Returns the current working directory, or "" if unsuccessful. + static FilePath GetCurrentDir(); + + // Given directory = "dir", base_name = "test", number = 0, + // extension = "xml", returns "dir/test.xml". If number is greater + // than zero (e.g., 12), returns "dir/test_12.xml". + // On Windows platform, uses \ as the separator rather than /. + static FilePath MakeFileName(const FilePath& directory, + const FilePath& base_name, + int number, + const char* extension); + + // Given directory = "dir", relative_path = "test.xml", + // returns "dir/test.xml". + // On Windows, uses \ as the separator rather than /. + static FilePath ConcatPaths(const FilePath& directory, + const FilePath& relative_path); + + // Returns a pathname for a file that does not currently exist. The pathname + // will be directory/base_name.extension or + // directory/base_name_.extension if directory/base_name.extension + // already exists. The number will be incremented until a pathname is found + // that does not already exist. + // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'. + // There could be a race condition if two or more processes are calling this + // function at the same time -- they could both pick the same filename. + static FilePath GenerateUniqueFileName(const FilePath& directory, + const FilePath& base_name, + const char* extension); + + // Returns true iff the path is NULL or "". + bool IsEmpty() const { return c_str() == NULL || *c_str() == '\0'; } + + // If input name has a trailing separator character, removes it and returns + // the name, otherwise return the name string unmodified. + // On Windows platform, uses \ as the separator, other platforms use /. + FilePath RemoveTrailingPathSeparator() const; + + // Returns a copy of the FilePath with the directory part removed. + // Example: FilePath("path/to/file").RemoveDirectoryName() returns + // FilePath("file"). If there is no directory part ("just_a_file"), it returns + // the FilePath unmodified. If there is no file part ("just_a_dir/") it + // returns an empty FilePath (""). + // On Windows platform, '\' is the path separator, otherwise it is '/'. + FilePath RemoveDirectoryName() const; + + // RemoveFileName returns the directory path with the filename removed. + // Example: FilePath("path/to/file").RemoveFileName() returns "path/to/". + // If the FilePath is "a_file" or "/a_file", RemoveFileName returns + // FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does + // not have a file, like "just/a/dir/", it returns the FilePath unmodified. + // On Windows platform, '\' is the path separator, otherwise it is '/'. + FilePath RemoveFileName() const; + + // Returns a copy of the FilePath with the case-insensitive extension removed. + // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns + // FilePath("dir/file"). If a case-insensitive extension is not + // found, returns a copy of the original FilePath. + FilePath RemoveExtension(const char* extension) const; + + // Creates directories so that path exists. Returns true if successful or if + // the directories already exist; returns false if unable to create + // directories for any reason. Will also return false if the FilePath does + // not represent a directory (that is, it doesn't end with a path separator). + bool CreateDirectoriesRecursively() const; + + // Create the directory so that path exists. Returns true if successful or + // if the directory already exists; returns false if unable to create the + // directory for any reason, including if the parent directory does not + // exist. Not named "CreateDirectory" because that's a macro on Windows. + bool CreateFolder() const; + + // Returns true if FilePath describes something in the file-system, + // either a file, directory, or whatever, and that something exists. + bool FileOrDirectoryExists() const; + + // Returns true if pathname describes a directory in the file-system + // that exists. + bool DirectoryExists() const; + + // Returns true if FilePath ends with a path separator, which indicates that + // it is intended to represent a directory. Returns false otherwise. + // This does NOT check that a directory (or file) actually exists. + bool IsDirectory() const; + + // Returns true if pathname describes a root directory. (Windows has one + // root directory per disk drive.) + bool IsRootDirectory() const; + + // Returns true if pathname describes an absolute path. + bool IsAbsolutePath() const; + + private: + // Replaces multiple consecutive separators with a single separator. + // For example, "bar///foo" becomes "bar/foo". Does not eliminate other + // redundancies that might be in a pathname involving "." or "..". + // + // A pathname with multiple consecutive separators may occur either through + // user error or as a result of some scripts or APIs that generate a pathname + // with a trailing separator. On other platforms the same API or script + // may NOT generate a pathname with a trailing "/". Then elsewhere that + // pathname may have another "/" and pathname components added to it, + // without checking for the separator already being there. + // The script language and operating system may allow paths like "foo//bar" + // but some of the functions in FilePath will not handle that correctly. In + // particular, RemoveTrailingPathSeparator() only removes one separator, and + // it is called in CreateDirectoriesRecursively() assuming that it will change + // a pathname from directory syntax (trailing separator) to filename syntax. + // + // On Windows this method also replaces the alternate path separator '/' with + // the primary path separator '\\', so that for example "bar\\/\\foo" becomes + // "bar\\foo". + + void Normalize(); + + // Returns a pointer to the last occurence of a valid path separator in + // the FilePath. On Windows, for example, both '/' and '\' are valid path + // separators. Returns NULL if no path separator was found. + const char* FindLastPathSeparator() const; + + String pathname_; +}; // class FilePath + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ +// This file was GENERATED by command: +// pump.py gtest-type-util.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Type utilities needed for implementing typed and type-parameterized +// tests. This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +// Currently we support at most 50 types in a list, and at most 50 +// type-parameterized tests in one type-parameterized test case. +// Please contact googletestframework@googlegroups.com if you need +// more. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ + + +// #ifdef __GNUC__ is too general here. It is possible to use gcc without using +// libstdc++ (which is where cxxabi.h comes from). +# ifdef __GLIBCXX__ +# include +# elif defined(__HP_aCC) +# include +# endif // __GLIBCXX__ + +namespace testing { +namespace internal { + +// GetTypeName() returns a human-readable name of type T. +// NB: This function is also used in Google Mock, so don't move it inside of +// the typed-test-only section below. +template +String GetTypeName() { +# if GTEST_HAS_RTTI + + const char* const name = typeid(T).name(); +# if defined(__GLIBCXX__) || defined(__HP_aCC) + int status = 0; + // gcc's implementation of typeid(T).name() mangles the type name, + // so we have to demangle it. +# ifdef __GLIBCXX__ + using abi::__cxa_demangle; +# endif // __GLIBCXX__ + char* const readable_name = __cxa_demangle(name, 0, 0, &status); + const String name_str(status == 0 ? readable_name : name); + free(readable_name); + return name_str; +# else + return name; +# endif // __GLIBCXX__ || __HP_aCC + +# else + + return ""; + +# endif // GTEST_HAS_RTTI +} + +#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// AssertyTypeEq::type is defined iff T1 and T2 are the same +// type. This can be used as a compile-time assertion to ensure that +// two types are equal. + +template +struct AssertTypeEq; + +template +struct AssertTypeEq { + typedef bool type; +}; + +// A unique type used as the default value for the arguments of class +// template Types. This allows us to simulate variadic templates +// (e.g. Types, Type, and etc), which C++ doesn't +// support directly. +struct None {}; + +// The following family of struct and struct templates are used to +// represent type lists. In particular, TypesN +// represents a type list with N types (T1, T2, ..., and TN) in it. +// Except for Types0, every struct in the family has two member types: +// Head for the first type in the list, and Tail for the rest of the +// list. + +// The empty type list. +struct Types0 {}; + +// Type lists of length 1, 2, 3, and so on. + +template +struct Types1 { + typedef T1 Head; + typedef Types0 Tail; +}; +template +struct Types2 { + typedef T1 Head; + typedef Types1 Tail; +}; + +template +struct Types3 { + typedef T1 Head; + typedef Types2 Tail; +}; + +template +struct Types4 { + typedef T1 Head; + typedef Types3 Tail; +}; + +template +struct Types5 { + typedef T1 Head; + typedef Types4 Tail; +}; + +template +struct Types6 { + typedef T1 Head; + typedef Types5 Tail; +}; + +template +struct Types7 { + typedef T1 Head; + typedef Types6 Tail; +}; + +template +struct Types8 { + typedef T1 Head; + typedef Types7 Tail; +}; + +template +struct Types9 { + typedef T1 Head; + typedef Types8 Tail; +}; + +template +struct Types10 { + typedef T1 Head; + typedef Types9 Tail; +}; + +template +struct Types11 { + typedef T1 Head; + typedef Types10 Tail; +}; + +template +struct Types12 { + typedef T1 Head; + typedef Types11 Tail; +}; + +template +struct Types13 { + typedef T1 Head; + typedef Types12 Tail; +}; + +template +struct Types14 { + typedef T1 Head; + typedef Types13 Tail; +}; + +template +struct Types15 { + typedef T1 Head; + typedef Types14 Tail; +}; + +template +struct Types16 { + typedef T1 Head; + typedef Types15 Tail; +}; + +template +struct Types17 { + typedef T1 Head; + typedef Types16 Tail; +}; + +template +struct Types18 { + typedef T1 Head; + typedef Types17 Tail; +}; + +template +struct Types19 { + typedef T1 Head; + typedef Types18 Tail; +}; + +template +struct Types20 { + typedef T1 Head; + typedef Types19 Tail; +}; + +template +struct Types21 { + typedef T1 Head; + typedef Types20 Tail; +}; + +template +struct Types22 { + typedef T1 Head; + typedef Types21 Tail; +}; + +template +struct Types23 { + typedef T1 Head; + typedef Types22 Tail; +}; + +template +struct Types24 { + typedef T1 Head; + typedef Types23 Tail; +}; + +template +struct Types25 { + typedef T1 Head; + typedef Types24 Tail; +}; + +template +struct Types26 { + typedef T1 Head; + typedef Types25 Tail; +}; + +template +struct Types27 { + typedef T1 Head; + typedef Types26 Tail; +}; + +template +struct Types28 { + typedef T1 Head; + typedef Types27 Tail; +}; + +template +struct Types29 { + typedef T1 Head; + typedef Types28 Tail; +}; + +template +struct Types30 { + typedef T1 Head; + typedef Types29 Tail; +}; + +template +struct Types31 { + typedef T1 Head; + typedef Types30 Tail; +}; + +template +struct Types32 { + typedef T1 Head; + typedef Types31 Tail; +}; + +template +struct Types33 { + typedef T1 Head; + typedef Types32 Tail; +}; + +template +struct Types34 { + typedef T1 Head; + typedef Types33 Tail; +}; + +template +struct Types35 { + typedef T1 Head; + typedef Types34 Tail; +}; + +template +struct Types36 { + typedef T1 Head; + typedef Types35 Tail; +}; + +template +struct Types37 { + typedef T1 Head; + typedef Types36 Tail; +}; + +template +struct Types38 { + typedef T1 Head; + typedef Types37 Tail; +}; + +template +struct Types39 { + typedef T1 Head; + typedef Types38 Tail; +}; + +template +struct Types40 { + typedef T1 Head; + typedef Types39 Tail; +}; + +template +struct Types41 { + typedef T1 Head; + typedef Types40 Tail; +}; + +template +struct Types42 { + typedef T1 Head; + typedef Types41 Tail; +}; + +template +struct Types43 { + typedef T1 Head; + typedef Types42 Tail; +}; + +template +struct Types44 { + typedef T1 Head; + typedef Types43 Tail; +}; + +template +struct Types45 { + typedef T1 Head; + typedef Types44 Tail; +}; + +template +struct Types46 { + typedef T1 Head; + typedef Types45 Tail; +}; + +template +struct Types47 { + typedef T1 Head; + typedef Types46 Tail; +}; + +template +struct Types48 { + typedef T1 Head; + typedef Types47 Tail; +}; + +template +struct Types49 { + typedef T1 Head; + typedef Types48 Tail; +}; + +template +struct Types50 { + typedef T1 Head; + typedef Types49 Tail; +}; + + +} // namespace internal + +// We don't want to require the users to write TypesN<...> directly, +// as that would require them to count the length. Types<...> is much +// easier to write, but generates horrible messages when there is a +// compiler error, as gcc insists on printing out each template +// argument, even if it has the default value (this means Types +// will appear as Types in the compiler +// errors). +// +// Our solution is to combine the best part of the two approaches: a +// user would write Types, and Google Test will translate +// that to TypesN internally to make error messages +// readable. The translation is done by the 'type' member of the +// Types template. +template +struct Types { + typedef internal::Types50 type; +}; + +template <> +struct Types { + typedef internal::Types0 type; +}; +template +struct Types { + typedef internal::Types1 type; +}; +template +struct Types { + typedef internal::Types2 type; +}; +template +struct Types { + typedef internal::Types3 type; +}; +template +struct Types { + typedef internal::Types4 type; +}; +template +struct Types { + typedef internal::Types5 type; +}; +template +struct Types { + typedef internal::Types6 type; +}; +template +struct Types { + typedef internal::Types7 type; +}; +template +struct Types { + typedef internal::Types8 type; +}; +template +struct Types { + typedef internal::Types9 type; +}; +template +struct Types { + typedef internal::Types10 type; +}; +template +struct Types { + typedef internal::Types11 type; +}; +template +struct Types { + typedef internal::Types12 type; +}; +template +struct Types { + typedef internal::Types13 type; +}; +template +struct Types { + typedef internal::Types14 type; +}; +template +struct Types { + typedef internal::Types15 type; +}; +template +struct Types { + typedef internal::Types16 type; +}; +template +struct Types { + typedef internal::Types17 type; +}; +template +struct Types { + typedef internal::Types18 type; +}; +template +struct Types { + typedef internal::Types19 type; +}; +template +struct Types { + typedef internal::Types20 type; +}; +template +struct Types { + typedef internal::Types21 type; +}; +template +struct Types { + typedef internal::Types22 type; +}; +template +struct Types { + typedef internal::Types23 type; +}; +template +struct Types { + typedef internal::Types24 type; +}; +template +struct Types { + typedef internal::Types25 type; +}; +template +struct Types { + typedef internal::Types26 type; +}; +template +struct Types { + typedef internal::Types27 type; +}; +template +struct Types { + typedef internal::Types28 type; +}; +template +struct Types { + typedef internal::Types29 type; +}; +template +struct Types { + typedef internal::Types30 type; +}; +template +struct Types { + typedef internal::Types31 type; +}; +template +struct Types { + typedef internal::Types32 type; +}; +template +struct Types { + typedef internal::Types33 type; +}; +template +struct Types { + typedef internal::Types34 type; +}; +template +struct Types { + typedef internal::Types35 type; +}; +template +struct Types { + typedef internal::Types36 type; +}; +template +struct Types { + typedef internal::Types37 type; +}; +template +struct Types { + typedef internal::Types38 type; +}; +template +struct Types { + typedef internal::Types39 type; +}; +template +struct Types { + typedef internal::Types40 type; +}; +template +struct Types { + typedef internal::Types41 type; +}; +template +struct Types { + typedef internal::Types42 type; +}; +template +struct Types { + typedef internal::Types43 type; +}; +template +struct Types { + typedef internal::Types44 type; +}; +template +struct Types { + typedef internal::Types45 type; +}; +template +struct Types { + typedef internal::Types46 type; +}; +template +struct Types { + typedef internal::Types47 type; +}; +template +struct Types { + typedef internal::Types48 type; +}; +template +struct Types { + typedef internal::Types49 type; +}; + +namespace internal { + +# define GTEST_TEMPLATE_ template class + +// The template "selector" struct TemplateSel is used to +// represent Tmpl, which must be a class template with one type +// parameter, as a type. TemplateSel::Bind::type is defined +// as the type Tmpl. This allows us to actually instantiate the +// template "selected" by TemplateSel. +// +// This trick is necessary for simulating typedef for class templates, +// which C++ doesn't support directly. +template +struct TemplateSel { + template + struct Bind { + typedef Tmpl type; + }; +}; + +# define GTEST_BIND_(TmplSel, T) \ + TmplSel::template Bind::type + +// A unique struct template used as the default value for the +// arguments of class template Templates. This allows us to simulate +// variadic templates (e.g. Templates, Templates, +// and etc), which C++ doesn't support directly. +template +struct NoneT {}; + +// The following family of struct and struct templates are used to +// represent template lists. In particular, TemplatesN represents a list of N templates (T1, T2, ..., and TN). Except +// for Templates0, every struct in the family has two member types: +// Head for the selector of the first template in the list, and Tail +// for the rest of the list. + +// The empty template list. +struct Templates0 {}; + +// Template lists of length 1, 2, 3, and so on. + +template +struct Templates1 { + typedef TemplateSel Head; + typedef Templates0 Tail; +}; +template +struct Templates2 { + typedef TemplateSel Head; + typedef Templates1 Tail; +}; + +template +struct Templates3 { + typedef TemplateSel Head; + typedef Templates2 Tail; +}; + +template +struct Templates4 { + typedef TemplateSel Head; + typedef Templates3 Tail; +}; + +template +struct Templates5 { + typedef TemplateSel Head; + typedef Templates4 Tail; +}; + +template +struct Templates6 { + typedef TemplateSel Head; + typedef Templates5 Tail; +}; + +template +struct Templates7 { + typedef TemplateSel Head; + typedef Templates6 Tail; +}; + +template +struct Templates8 { + typedef TemplateSel Head; + typedef Templates7 Tail; +}; + +template +struct Templates9 { + typedef TemplateSel Head; + typedef Templates8 Tail; +}; + +template +struct Templates10 { + typedef TemplateSel Head; + typedef Templates9 Tail; +}; + +template +struct Templates11 { + typedef TemplateSel Head; + typedef Templates10 Tail; +}; + +template +struct Templates12 { + typedef TemplateSel Head; + typedef Templates11 Tail; +}; + +template +struct Templates13 { + typedef TemplateSel Head; + typedef Templates12 Tail; +}; + +template +struct Templates14 { + typedef TemplateSel Head; + typedef Templates13 Tail; +}; + +template +struct Templates15 { + typedef TemplateSel Head; + typedef Templates14 Tail; +}; + +template +struct Templates16 { + typedef TemplateSel Head; + typedef Templates15 Tail; +}; + +template +struct Templates17 { + typedef TemplateSel Head; + typedef Templates16 Tail; +}; + +template +struct Templates18 { + typedef TemplateSel Head; + typedef Templates17 Tail; +}; + +template +struct Templates19 { + typedef TemplateSel Head; + typedef Templates18 Tail; +}; + +template +struct Templates20 { + typedef TemplateSel Head; + typedef Templates19 Tail; +}; + +template +struct Templates21 { + typedef TemplateSel Head; + typedef Templates20 Tail; +}; + +template +struct Templates22 { + typedef TemplateSel Head; + typedef Templates21 Tail; +}; + +template +struct Templates23 { + typedef TemplateSel Head; + typedef Templates22 Tail; +}; + +template +struct Templates24 { + typedef TemplateSel Head; + typedef Templates23 Tail; +}; + +template +struct Templates25 { + typedef TemplateSel Head; + typedef Templates24 Tail; +}; + +template +struct Templates26 { + typedef TemplateSel Head; + typedef Templates25 Tail; +}; + +template +struct Templates27 { + typedef TemplateSel Head; + typedef Templates26 Tail; +}; + +template +struct Templates28 { + typedef TemplateSel Head; + typedef Templates27 Tail; +}; + +template +struct Templates29 { + typedef TemplateSel Head; + typedef Templates28 Tail; +}; + +template +struct Templates30 { + typedef TemplateSel Head; + typedef Templates29 Tail; +}; + +template +struct Templates31 { + typedef TemplateSel Head; + typedef Templates30 Tail; +}; + +template +struct Templates32 { + typedef TemplateSel Head; + typedef Templates31 Tail; +}; + +template +struct Templates33 { + typedef TemplateSel Head; + typedef Templates32 Tail; +}; + +template +struct Templates34 { + typedef TemplateSel Head; + typedef Templates33 Tail; +}; + +template +struct Templates35 { + typedef TemplateSel Head; + typedef Templates34 Tail; +}; + +template +struct Templates36 { + typedef TemplateSel Head; + typedef Templates35 Tail; +}; + +template +struct Templates37 { + typedef TemplateSel Head; + typedef Templates36 Tail; +}; + +template +struct Templates38 { + typedef TemplateSel Head; + typedef Templates37 Tail; +}; + +template +struct Templates39 { + typedef TemplateSel Head; + typedef Templates38 Tail; +}; + +template +struct Templates40 { + typedef TemplateSel Head; + typedef Templates39 Tail; +}; + +template +struct Templates41 { + typedef TemplateSel Head; + typedef Templates40 Tail; +}; + +template +struct Templates42 { + typedef TemplateSel Head; + typedef Templates41 Tail; +}; + +template +struct Templates43 { + typedef TemplateSel Head; + typedef Templates42 Tail; +}; + +template +struct Templates44 { + typedef TemplateSel Head; + typedef Templates43 Tail; +}; + +template +struct Templates45 { + typedef TemplateSel Head; + typedef Templates44 Tail; +}; + +template +struct Templates46 { + typedef TemplateSel Head; + typedef Templates45 Tail; +}; + +template +struct Templates47 { + typedef TemplateSel Head; + typedef Templates46 Tail; +}; + +template +struct Templates48 { + typedef TemplateSel Head; + typedef Templates47 Tail; +}; + +template +struct Templates49 { + typedef TemplateSel Head; + typedef Templates48 Tail; +}; + +template +struct Templates50 { + typedef TemplateSel Head; + typedef Templates49 Tail; +}; + + +// We don't want to require the users to write TemplatesN<...> directly, +// as that would require them to count the length. Templates<...> is much +// easier to write, but generates horrible messages when there is a +// compiler error, as gcc insists on printing out each template +// argument, even if it has the default value (this means Templates +// will appear as Templates in the compiler +// errors). +// +// Our solution is to combine the best part of the two approaches: a +// user would write Templates, and Google Test will translate +// that to TemplatesN internally to make error messages +// readable. The translation is done by the 'type' member of the +// Templates template. +template +struct Templates { + typedef Templates50 type; +}; + +template <> +struct Templates { + typedef Templates0 type; +}; +template +struct Templates { + typedef Templates1 type; +}; +template +struct Templates { + typedef Templates2 type; +}; +template +struct Templates { + typedef Templates3 type; +}; +template +struct Templates { + typedef Templates4 type; +}; +template +struct Templates { + typedef Templates5 type; +}; +template +struct Templates { + typedef Templates6 type; +}; +template +struct Templates { + typedef Templates7 type; +}; +template +struct Templates { + typedef Templates8 type; +}; +template +struct Templates { + typedef Templates9 type; +}; +template +struct Templates { + typedef Templates10 type; +}; +template +struct Templates { + typedef Templates11 type; +}; +template +struct Templates { + typedef Templates12 type; +}; +template +struct Templates { + typedef Templates13 type; +}; +template +struct Templates { + typedef Templates14 type; +}; +template +struct Templates { + typedef Templates15 type; +}; +template +struct Templates { + typedef Templates16 type; +}; +template +struct Templates { + typedef Templates17 type; +}; +template +struct Templates { + typedef Templates18 type; +}; +template +struct Templates { + typedef Templates19 type; +}; +template +struct Templates { + typedef Templates20 type; +}; +template +struct Templates { + typedef Templates21 type; +}; +template +struct Templates { + typedef Templates22 type; +}; +template +struct Templates { + typedef Templates23 type; +}; +template +struct Templates { + typedef Templates24 type; +}; +template +struct Templates { + typedef Templates25 type; +}; +template +struct Templates { + typedef Templates26 type; +}; +template +struct Templates { + typedef Templates27 type; +}; +template +struct Templates { + typedef Templates28 type; +}; +template +struct Templates { + typedef Templates29 type; +}; +template +struct Templates { + typedef Templates30 type; +}; +template +struct Templates { + typedef Templates31 type; +}; +template +struct Templates { + typedef Templates32 type; +}; +template +struct Templates { + typedef Templates33 type; +}; +template +struct Templates { + typedef Templates34 type; +}; +template +struct Templates { + typedef Templates35 type; +}; +template +struct Templates { + typedef Templates36 type; +}; +template +struct Templates { + typedef Templates37 type; +}; +template +struct Templates { + typedef Templates38 type; +}; +template +struct Templates { + typedef Templates39 type; +}; +template +struct Templates { + typedef Templates40 type; +}; +template +struct Templates { + typedef Templates41 type; +}; +template +struct Templates { + typedef Templates42 type; +}; +template +struct Templates { + typedef Templates43 type; +}; +template +struct Templates { + typedef Templates44 type; +}; +template +struct Templates { + typedef Templates45 type; +}; +template +struct Templates { + typedef Templates46 type; +}; +template +struct Templates { + typedef Templates47 type; +}; +template +struct Templates { + typedef Templates48 type; +}; +template +struct Templates { + typedef Templates49 type; +}; + +// The TypeList template makes it possible to use either a single type +// or a Types<...> list in TYPED_TEST_CASE() and +// INSTANTIATE_TYPED_TEST_CASE_P(). + +template +struct TypeList { typedef Types1 type; }; + +template +struct TypeList > { + typedef typename Types::type type; +}; + +#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ + +// Due to C++ preprocessor weirdness, we need double indirection to +// concatenate two tokens when one of them is __LINE__. Writing +// +// foo ## __LINE__ +// +// will result in the token foo__LINE__, instead of foo followed by +// the current line number. For more details, see +// http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6 +#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar) +#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar + +// Google Test defines the testing::Message class to allow construction of +// test messages via the << operator. The idea is that anything +// streamable to std::ostream can be streamed to a testing::Message. +// This allows a user to use his own types in Google Test assertions by +// overloading the << operator. +// +// util/gtl/stl_logging-inl.h overloads << for STL containers. These +// overloads cannot be defined in the std namespace, as that will be +// undefined behavior. Therefore, they are defined in the global +// namespace instead. +// +// C++'s symbol lookup rule (i.e. Koenig lookup) says that these +// overloads are visible in either the std namespace or the global +// namespace, but not other namespaces, including the testing +// namespace which Google Test's Message class is in. +// +// To allow STL containers (and other types that has a << operator +// defined in the global namespace) to be used in Google Test assertions, +// testing::Message must access the custom << operator from the global +// namespace. Hence this helper function. +// +// Note: Jeffrey Yasskin suggested an alternative fix by "using +// ::operator<<;" in the definition of Message's operator<<. That fix +// doesn't require a helper function, but unfortunately doesn't +// compile with MSVC. +template +inline void GTestStreamToHelper(std::ostream* os, const T& val) { + *os << val; +} + +class ProtocolMessage; +namespace proto2 { class Message; } + +namespace testing { + +// Forward declarations. + +class AssertionResult; // Result of an assertion. +class Message; // Represents a failure message. +class Test; // Represents a test. +class TestInfo; // Information about a test. +class TestPartResult; // Result of a test part. +class UnitTest; // A collection of test cases. + +template +::std::string PrintToString(const T& value); + +namespace internal { + +struct TraceInfo; // Information about a trace point. +class ScopedTrace; // Implements scoped trace. +class TestInfoImpl; // Opaque implementation of TestInfo +class UnitTestImpl; // Opaque implementation of UnitTest + +// How many times InitGoogleTest() has been called. +extern int g_init_gtest_count; + +// The text used in failure messages to indicate the start of the +// stack trace. +GTEST_API_ extern const char kStackTraceMarker[]; + +// A secret type that Google Test users don't know about. It has no +// definition on purpose. Therefore it's impossible to create a +// Secret object, which is what we want. +class Secret; + +// Two overloaded helpers for checking at compile time whether an +// expression is a null pointer literal (i.e. NULL or any 0-valued +// compile-time integral constant). Their return values have +// different sizes, so we can use sizeof() to test which version is +// picked by the compiler. These helpers have no implementations, as +// we only need their signatures. +// +// Given IsNullLiteralHelper(x), the compiler will pick the first +// version if x can be implicitly converted to Secret*, and pick the +// second version otherwise. Since Secret is a secret and incomplete +// type, the only expression a user can write that has type Secret* is +// a null pointer literal. Therefore, we know that x is a null +// pointer literal if and only if the first version is picked by the +// compiler. +char IsNullLiteralHelper(Secret* p); +char (&IsNullLiteralHelper(...))[2]; // NOLINT + +// A compile-time bool constant that is true if and only if x is a +// null pointer literal (i.e. NULL or any 0-valued compile-time +// integral constant). +#ifdef GTEST_ELLIPSIS_NEEDS_POD_ +// We lose support for NULL detection where the compiler doesn't like +// passing non-POD classes through ellipsis (...). +# define GTEST_IS_NULL_LITERAL_(x) false +#else +# define GTEST_IS_NULL_LITERAL_(x) \ + (sizeof(::testing::internal::IsNullLiteralHelper(x)) == 1) +#endif // GTEST_ELLIPSIS_NEEDS_POD_ + +// Appends the user-supplied message to the Google-Test-generated message. +GTEST_API_ String AppendUserMessage(const String& gtest_msg, + const Message& user_msg); + +// A helper class for creating scoped traces in user programs. +class GTEST_API_ ScopedTrace { + public: + // The c'tor pushes the given source file location and message onto + // a trace stack maintained by Google Test. + ScopedTrace(const char* file, int line, const Message& message); + + // The d'tor pops the info pushed by the c'tor. + // + // Note that the d'tor is not virtual in order to be efficient. + // Don't inherit from ScopedTrace! + ~ScopedTrace(); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace); +} GTEST_ATTRIBUTE_UNUSED_; // A ScopedTrace object does its job in its + // c'tor and d'tor. Therefore it doesn't + // need to be used otherwise. + +// Converts a streamable value to a String. A NULL pointer is +// converted to "(null)". When the input value is a ::string, +// ::std::string, ::wstring, or ::std::wstring object, each NUL +// character in it is replaced with "\\0". +// Declared here but defined in gtest.h, so that it has access +// to the definition of the Message class, required by the ARM +// compiler. +template +String StreamableToString(const T& streamable); + +// The Symbian compiler has a bug that prevents it from selecting the +// correct overload of FormatForComparisonFailureMessage (see below) +// unless we pass the first argument by reference. If we do that, +// however, Visual Age C++ 10.1 generates a compiler error. Therefore +// we only apply the work-around for Symbian. +#if defined(__SYMBIAN32__) +# define GTEST_CREF_WORKAROUND_ const& +#else +# define GTEST_CREF_WORKAROUND_ +#endif + +// When this operand is a const char* or char*, if the other operand +// is a ::std::string or ::string, we print this operand as a C string +// rather than a pointer (we do the same for wide strings); otherwise +// we print it as a pointer to be safe. + +// This internal macro is used to avoid duplicated code. +#define GTEST_FORMAT_IMPL_(operand2_type, operand1_printer)\ +inline String FormatForComparisonFailureMessage(\ + operand2_type::value_type* GTEST_CREF_WORKAROUND_ str, \ + const operand2_type& /*operand2*/) {\ + return operand1_printer(str);\ +}\ +inline String FormatForComparisonFailureMessage(\ + const operand2_type::value_type* GTEST_CREF_WORKAROUND_ str, \ + const operand2_type& /*operand2*/) {\ + return operand1_printer(str);\ +} + +GTEST_FORMAT_IMPL_(::std::string, String::ShowCStringQuoted) +#if GTEST_HAS_STD_WSTRING +GTEST_FORMAT_IMPL_(::std::wstring, String::ShowWideCStringQuoted) +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_GLOBAL_STRING +GTEST_FORMAT_IMPL_(::string, String::ShowCStringQuoted) +#endif // GTEST_HAS_GLOBAL_STRING +#if GTEST_HAS_GLOBAL_WSTRING +GTEST_FORMAT_IMPL_(::wstring, String::ShowWideCStringQuoted) +#endif // GTEST_HAS_GLOBAL_WSTRING + +#undef GTEST_FORMAT_IMPL_ + +// The next four overloads handle the case where the operand being +// printed is a char/wchar_t pointer and the other operand is not a +// string/wstring object. In such cases, we just print the operand as +// a pointer to be safe. +#define GTEST_FORMAT_CHAR_PTR_IMPL_(CharType) \ + template \ + String FormatForComparisonFailureMessage(CharType* GTEST_CREF_WORKAROUND_ p, \ + const T&) { \ + return PrintToString(static_cast(p)); \ + } + +GTEST_FORMAT_CHAR_PTR_IMPL_(char) +GTEST_FORMAT_CHAR_PTR_IMPL_(const char) +GTEST_FORMAT_CHAR_PTR_IMPL_(wchar_t) +GTEST_FORMAT_CHAR_PTR_IMPL_(const wchar_t) + +#undef GTEST_FORMAT_CHAR_PTR_IMPL_ + +// Constructs and returns the message for an equality assertion +// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure. +// +// The first four parameters are the expressions used in the assertion +// and their values, as strings. For example, for ASSERT_EQ(foo, bar) +// where foo is 5 and bar is 6, we have: +// +// expected_expression: "foo" +// actual_expression: "bar" +// expected_value: "5" +// actual_value: "6" +// +// The ignoring_case parameter is true iff the assertion is a +// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will +// be inserted into the message. +GTEST_API_ AssertionResult EqFailure(const char* expected_expression, + const char* actual_expression, + const String& expected_value, + const String& actual_value, + bool ignoring_case); + +// Constructs a failure message for Boolean assertions such as EXPECT_TRUE. +GTEST_API_ String GetBoolAssertionFailureMessage( + const AssertionResult& assertion_result, + const char* expression_text, + const char* actual_predicate_value, + const char* expected_predicate_value); + +// This template class represents an IEEE floating-point number +// (either single-precision or double-precision, depending on the +// template parameters). +// +// The purpose of this class is to do more sophisticated number +// comparison. (Due to round-off error, etc, it's very unlikely that +// two floating-points will be equal exactly. Hence a naive +// comparison by the == operation often doesn't work.) +// +// Format of IEEE floating-point: +// +// The most-significant bit being the leftmost, an IEEE +// floating-point looks like +// +// sign_bit exponent_bits fraction_bits +// +// Here, sign_bit is a single bit that designates the sign of the +// number. +// +// For float, there are 8 exponent bits and 23 fraction bits. +// +// For double, there are 11 exponent bits and 52 fraction bits. +// +// More details can be found at +// http://en.wikipedia.org/wiki/IEEE_floating-point_standard. +// +// Template parameter: +// +// RawType: the raw floating-point type (either float or double) +template +class FloatingPoint { + public: + // Defines the unsigned integer type that has the same size as the + // floating point number. + typedef typename TypeWithSize::UInt Bits; + + // Constants. + + // # of bits in a number. + static const size_t kBitCount = 8*sizeof(RawType); + + // # of fraction bits in a number. + static const size_t kFractionBitCount = + std::numeric_limits::digits - 1; + + // # of exponent bits in a number. + static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount; + + // The mask for the sign bit. + static const Bits kSignBitMask = static_cast(1) << (kBitCount - 1); + + // The mask for the fraction bits. + static const Bits kFractionBitMask = + ~static_cast(0) >> (kExponentBitCount + 1); + + // The mask for the exponent bits. + static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask); + + // How many ULP's (Units in the Last Place) we want to tolerate when + // comparing two numbers. The larger the value, the more error we + // allow. A 0 value means that two numbers must be exactly the same + // to be considered equal. + // + // The maximum error of a single floating-point operation is 0.5 + // units in the last place. On Intel CPU's, all floating-point + // calculations are done with 80-bit precision, while double has 64 + // bits. Therefore, 4 should be enough for ordinary use. + // + // See the following article for more details on ULP: + // http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm. + static const size_t kMaxUlps = 4; + + // Constructs a FloatingPoint from a raw floating-point number. + // + // On an Intel CPU, passing a non-normalized NAN (Not a Number) + // around may change its bits, although the new value is guaranteed + // to be also a NAN. Therefore, don't expect this constructor to + // preserve the bits in x when x is a NAN. + explicit FloatingPoint(const RawType& x) { u_.value_ = x; } + + // Static methods + + // Reinterprets a bit pattern as a floating-point number. + // + // This function is needed to test the AlmostEquals() method. + static RawType ReinterpretBits(const Bits bits) { + FloatingPoint fp(0); + fp.u_.bits_ = bits; + return fp.u_.value_; + } + + // Returns the floating-point number that represent positive infinity. + static RawType Infinity() { + return ReinterpretBits(kExponentBitMask); + } + + // Non-static methods + + // Returns the bits that represents this number. + const Bits &bits() const { return u_.bits_; } + + // Returns the exponent bits of this number. + Bits exponent_bits() const { return kExponentBitMask & u_.bits_; } + + // Returns the fraction bits of this number. + Bits fraction_bits() const { return kFractionBitMask & u_.bits_; } + + // Returns the sign bit of this number. + Bits sign_bit() const { return kSignBitMask & u_.bits_; } + + // Returns true iff this is NAN (not a number). + bool is_nan() const { + // It's a NAN if the exponent bits are all ones and the fraction + // bits are not entirely zeros. + return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0); + } + + // Returns true iff this number is at most kMaxUlps ULP's away from + // rhs. In particular, this function: + // + // - returns false if either number is (or both are) NAN. + // - treats really large numbers as almost equal to infinity. + // - thinks +0.0 and -0.0 are 0 DLP's apart. + bool AlmostEquals(const FloatingPoint& rhs) const { + // The IEEE standard says that any comparison operation involving + // a NAN must return false. + if (is_nan() || rhs.is_nan()) return false; + + return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_) + <= kMaxUlps; + } + + private: + // The data type used to store the actual floating-point number. + union FloatingPointUnion { + RawType value_; // The raw floating-point number. + Bits bits_; // The bits that represent the number. + }; + + // Converts an integer from the sign-and-magnitude representation to + // the biased representation. More precisely, let N be 2 to the + // power of (kBitCount - 1), an integer x is represented by the + // unsigned number x + N. + // + // For instance, + // + // -N + 1 (the most negative number representable using + // sign-and-magnitude) is represented by 1; + // 0 is represented by N; and + // N - 1 (the biggest number representable using + // sign-and-magnitude) is represented by 2N - 1. + // + // Read http://en.wikipedia.org/wiki/Signed_number_representations + // for more details on signed number representations. + static Bits SignAndMagnitudeToBiased(const Bits &sam) { + if (kSignBitMask & sam) { + // sam represents a negative number. + return ~sam + 1; + } else { + // sam represents a positive number. + return kSignBitMask | sam; + } + } + + // Given two numbers in the sign-and-magnitude representation, + // returns the distance between them as an unsigned number. + static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1, + const Bits &sam2) { + const Bits biased1 = SignAndMagnitudeToBiased(sam1); + const Bits biased2 = SignAndMagnitudeToBiased(sam2); + return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1); + } + + FloatingPointUnion u_; +}; + +// Typedefs the instances of the FloatingPoint template class that we +// care to use. +typedef FloatingPoint Float; +typedef FloatingPoint Double; + +// In order to catch the mistake of putting tests that use different +// test fixture classes in the same test case, we need to assign +// unique IDs to fixture classes and compare them. The TypeId type is +// used to hold such IDs. The user should treat TypeId as an opaque +// type: the only operation allowed on TypeId values is to compare +// them for equality using the == operator. +typedef const void* TypeId; + +template +class TypeIdHelper { + public: + // dummy_ must not have a const type. Otherwise an overly eager + // compiler (e.g. MSVC 7.1 & 8.0) may try to merge + // TypeIdHelper::dummy_ for different Ts as an "optimization". + static bool dummy_; +}; + +template +bool TypeIdHelper::dummy_ = false; + +// GetTypeId() returns the ID of type T. Different values will be +// returned for different types. Calling the function twice with the +// same type argument is guaranteed to return the same ID. +template +TypeId GetTypeId() { + // The compiler is required to allocate a different + // TypeIdHelper::dummy_ variable for each T used to instantiate + // the template. Therefore, the address of dummy_ is guaranteed to + // be unique. + return &(TypeIdHelper::dummy_); +} + +// Returns the type ID of ::testing::Test. Always call this instead +// of GetTypeId< ::testing::Test>() to get the type ID of +// ::testing::Test, as the latter may give the wrong result due to a +// suspected linker bug when compiling Google Test as a Mac OS X +// framework. +GTEST_API_ TypeId GetTestTypeId(); + +// Defines the abstract factory interface that creates instances +// of a Test object. +class TestFactoryBase { + public: + virtual ~TestFactoryBase() {} + + // Creates a test instance to run. The instance is both created and destroyed + // within TestInfoImpl::Run() + virtual Test* CreateTest() = 0; + + protected: + TestFactoryBase() {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase); +}; + +// This class provides implementation of TeastFactoryBase interface. +// It is used in TEST and TEST_F macros. +template +class TestFactoryImpl : public TestFactoryBase { + public: + virtual Test* CreateTest() { return new TestClass; } +}; + +#if GTEST_OS_WINDOWS + +// Predicate-formatters for implementing the HRESULT checking macros +// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED} +// We pass a long instead of HRESULT to avoid causing an +// include dependency for the HRESULT type. +GTEST_API_ AssertionResult IsHRESULTSuccess(const char* expr, + long hr); // NOLINT +GTEST_API_ AssertionResult IsHRESULTFailure(const char* expr, + long hr); // NOLINT + +#endif // GTEST_OS_WINDOWS + +// Types of SetUpTestCase() and TearDownTestCase() functions. +typedef void (*SetUpTestCaseFunc)(); +typedef void (*TearDownTestCaseFunc)(); + +// Creates a new TestInfo object and registers it with Google Test; +// returns the created object. +// +// Arguments: +// +// test_case_name: name of the test case +// name: name of the test +// type_param the name of the test's type parameter, or NULL if +// this is not a typed or a type-parameterized test. +// value_param text representation of the test's value parameter, +// or NULL if this is not a type-parameterized test. +// fixture_class_id: ID of the test fixture class +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +// factory: pointer to the factory that creates a test object. +// The newly created TestInfo instance will assume +// ownership of the factory object. +GTEST_API_ TestInfo* MakeAndRegisterTestInfo( + const char* test_case_name, const char* name, + const char* type_param, + const char* value_param, + TypeId fixture_class_id, + SetUpTestCaseFunc set_up_tc, + TearDownTestCaseFunc tear_down_tc, + TestFactoryBase* factory); + +// If *pstr starts with the given prefix, modifies *pstr to be right +// past the prefix and returns true; otherwise leaves *pstr unchanged +// and returns false. None of pstr, *pstr, and prefix can be NULL. +GTEST_API_ bool SkipPrefix(const char* prefix, const char** pstr); + +#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// State of the definition of a type-parameterized test case. +class GTEST_API_ TypedTestCasePState { + public: + TypedTestCasePState() : registered_(false) {} + + // Adds the given test name to defined_test_names_ and return true + // if the test case hasn't been registered; otherwise aborts the + // program. + bool AddTestName(const char* file, int line, const char* case_name, + const char* test_name) { + if (registered_) { + fprintf(stderr, "%s Test %s must be defined before " + "REGISTER_TYPED_TEST_CASE_P(%s, ...).\n", + FormatFileLocation(file, line).c_str(), test_name, case_name); + fflush(stderr); + posix::Abort(); + } + defined_test_names_.insert(test_name); + return true; + } + + // Verifies that registered_tests match the test names in + // defined_test_names_; returns registered_tests if successful, or + // aborts the program otherwise. + const char* VerifyRegisteredTestNames( + const char* file, int line, const char* registered_tests); + + private: + bool registered_; + ::std::set defined_test_names_; +}; + +// Skips to the first non-space char after the first comma in 'str'; +// returns NULL if no comma is found in 'str'. +inline const char* SkipComma(const char* str) { + const char* comma = strchr(str, ','); + if (comma == NULL) { + return NULL; + } + while (IsSpace(*(++comma))) {} + return comma; +} + +// Returns the prefix of 'str' before the first comma in it; returns +// the entire string if it contains no comma. +inline String GetPrefixUntilComma(const char* str) { + const char* comma = strchr(str, ','); + return comma == NULL ? String(str) : String(str, comma - str); +} + +// TypeParameterizedTest::Register() +// registers a list of type-parameterized tests with Google Test. The +// return value is insignificant - we just need to return something +// such that we can call this function in a namespace scope. +// +// Implementation note: The GTEST_TEMPLATE_ macro declares a template +// template parameter. It's defined in gtest-type-util.h. +template +class TypeParameterizedTest { + public: + // 'index' is the index of the test in the type list 'Types' + // specified in INSTANTIATE_TYPED_TEST_CASE_P(Prefix, TestCase, + // Types). Valid values for 'index' are [0, N - 1] where N is the + // length of Types. + static bool Register(const char* prefix, const char* case_name, + const char* test_names, int index) { + typedef typename Types::Head Type; + typedef Fixture FixtureClass; + typedef typename GTEST_BIND_(TestSel, Type) TestClass; + + // First, registers the first type-parameterized test in the type + // list. + MakeAndRegisterTestInfo( + String::Format("%s%s%s/%d", prefix, prefix[0] == '\0' ? "" : "/", + case_name, index).c_str(), + GetPrefixUntilComma(test_names).c_str(), + GetTypeName().c_str(), + NULL, // No value parameter. + GetTypeId(), + TestClass::SetUpTestCase, + TestClass::TearDownTestCase, + new TestFactoryImpl); + + // Next, recurses (at compile time) with the tail of the type list. + return TypeParameterizedTest + ::Register(prefix, case_name, test_names, index + 1); + } +}; + +// The base case for the compile time recursion. +template +class TypeParameterizedTest { + public: + static bool Register(const char* /*prefix*/, const char* /*case_name*/, + const char* /*test_names*/, int /*index*/) { + return true; + } +}; + +// TypeParameterizedTestCase::Register() +// registers *all combinations* of 'Tests' and 'Types' with Google +// Test. The return value is insignificant - we just need to return +// something such that we can call this function in a namespace scope. +template +class TypeParameterizedTestCase { + public: + static bool Register(const char* prefix, const char* case_name, + const char* test_names) { + typedef typename Tests::Head Head; + + // First, register the first test in 'Test' for each type in 'Types'. + TypeParameterizedTest::Register( + prefix, case_name, test_names, 0); + + // Next, recurses (at compile time) with the tail of the test list. + return TypeParameterizedTestCase + ::Register(prefix, case_name, SkipComma(test_names)); + } +}; + +// The base case for the compile time recursion. +template +class TypeParameterizedTestCase { + public: + static bool Register(const char* /*prefix*/, const char* /*case_name*/, + const char* /*test_names*/) { + return true; + } +}; + +#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// Returns the current OS stack trace as a String. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in +// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. +GTEST_API_ String GetCurrentOsStackTraceExceptTop(UnitTest* unit_test, + int skip_count); + +// Helpers for suppressing warnings on unreachable code or constant +// condition. + +// Always returns true. +GTEST_API_ bool AlwaysTrue(); + +// Always returns false. +inline bool AlwaysFalse() { return !AlwaysTrue(); } + +// Helper for suppressing false warning from Clang on a const char* +// variable declared in a conditional expression always being NULL in +// the else branch. +struct GTEST_API_ ConstCharPtr { + ConstCharPtr(const char* str) : value(str) {} + operator bool() const { return true; } + const char* value; +}; + +// A simple Linear Congruential Generator for generating random +// numbers with a uniform distribution. Unlike rand() and srand(), it +// doesn't use global state (and therefore can't interfere with user +// code). Unlike rand_r(), it's portable. An LCG isn't very random, +// but it's good enough for our purposes. +class GTEST_API_ Random { + public: + static const UInt32 kMaxRange = 1u << 31; + + explicit Random(UInt32 seed) : state_(seed) {} + + void Reseed(UInt32 seed) { state_ = seed; } + + // Generates a random number from [0, range). Crashes if 'range' is + // 0 or greater than kMaxRange. + UInt32 Generate(UInt32 range); + + private: + UInt32 state_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(Random); +}; + +// Defining a variable of type CompileAssertTypesEqual will cause a +// compiler error iff T1 and T2 are different types. +template +struct CompileAssertTypesEqual; + +template +struct CompileAssertTypesEqual { +}; + +// Removes the reference from a type if it is a reference type, +// otherwise leaves it unchanged. This is the same as +// tr1::remove_reference, which is not widely available yet. +template +struct RemoveReference { typedef T type; }; // NOLINT +template +struct RemoveReference { typedef T type; }; // NOLINT + +// A handy wrapper around RemoveReference that works when the argument +// T depends on template parameters. +#define GTEST_REMOVE_REFERENCE_(T) \ + typename ::testing::internal::RemoveReference::type + +// Removes const from a type if it is a const type, otherwise leaves +// it unchanged. This is the same as tr1::remove_const, which is not +// widely available yet. +template +struct RemoveConst { typedef T type; }; // NOLINT +template +struct RemoveConst { typedef T type; }; // NOLINT + +// MSVC 8.0, Sun C++, and IBM XL C++ have a bug which causes the above +// definition to fail to remove the const in 'const int[3]' and 'const +// char[3][4]'. The following specialization works around the bug. +// However, it causes trouble with GCC and thus needs to be +// conditionally compiled. +#if defined(_MSC_VER) || defined(__SUNPRO_CC) || defined(__IBMCPP__) +template +struct RemoveConst { + typedef typename RemoveConst::type type[N]; +}; +#endif + +// A handy wrapper around RemoveConst that works when the argument +// T depends on template parameters. +#define GTEST_REMOVE_CONST_(T) \ + typename ::testing::internal::RemoveConst::type + +// Turns const U&, U&, const U, and U all into U. +#define GTEST_REMOVE_REFERENCE_AND_CONST_(T) \ + GTEST_REMOVE_CONST_(GTEST_REMOVE_REFERENCE_(T)) + +// Adds reference to a type if it is not a reference type, +// otherwise leaves it unchanged. This is the same as +// tr1::add_reference, which is not widely available yet. +template +struct AddReference { typedef T& type; }; // NOLINT +template +struct AddReference { typedef T& type; }; // NOLINT + +// A handy wrapper around AddReference that works when the argument T +// depends on template parameters. +#define GTEST_ADD_REFERENCE_(T) \ + typename ::testing::internal::AddReference::type + +// Adds a reference to const on top of T as necessary. For example, +// it transforms +// +// char ==> const char& +// const char ==> const char& +// char& ==> const char& +// const char& ==> const char& +// +// The argument T must depend on some template parameters. +#define GTEST_REFERENCE_TO_CONST_(T) \ + GTEST_ADD_REFERENCE_(const GTEST_REMOVE_REFERENCE_(T)) + +// ImplicitlyConvertible::value is a compile-time bool +// constant that's true iff type From can be implicitly converted to +// type To. +template +class ImplicitlyConvertible { + private: + // We need the following helper functions only for their types. + // They have no implementations. + + // MakeFrom() is an expression whose type is From. We cannot simply + // use From(), as the type From may not have a public default + // constructor. + static From MakeFrom(); + + // These two functions are overloaded. Given an expression + // Helper(x), the compiler will pick the first version if x can be + // implicitly converted to type To; otherwise it will pick the + // second version. + // + // The first version returns a value of size 1, and the second + // version returns a value of size 2. Therefore, by checking the + // size of Helper(x), which can be done at compile time, we can tell + // which version of Helper() is used, and hence whether x can be + // implicitly converted to type To. + static char Helper(To); + static char (&Helper(...))[2]; // NOLINT + + // We have to put the 'public' section after the 'private' section, + // or MSVC refuses to compile the code. + public: + // MSVC warns about implicitly converting from double to int for + // possible loss of data, so we need to temporarily disable the + // warning. +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4244) // Temporarily disables warning 4244. + + static const bool value = + sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1; +# pragma warning(pop) // Restores the warning state. +#elif defined(__BORLANDC__) + // C++Builder cannot use member overload resolution during template + // instantiation. The simplest workaround is to use its C++0x type traits + // functions (C++Builder 2009 and above only). + static const bool value = __is_convertible(From, To); +#else + static const bool value = + sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1; +#endif // _MSV_VER +}; +template +const bool ImplicitlyConvertible::value; + +// IsAProtocolMessage::value is a compile-time bool constant that's +// true iff T is type ProtocolMessage, proto2::Message, or a subclass +// of those. +template +struct IsAProtocolMessage + : public bool_constant< + ImplicitlyConvertible::value || + ImplicitlyConvertible::value> { +}; + +// When the compiler sees expression IsContainerTest(0), if C is an +// STL-style container class, the first overload of IsContainerTest +// will be viable (since both C::iterator* and C::const_iterator* are +// valid types and NULL can be implicitly converted to them). It will +// be picked over the second overload as 'int' is a perfect match for +// the type of argument 0. If C::iterator or C::const_iterator is not +// a valid type, the first overload is not viable, and the second +// overload will be picked. Therefore, we can determine whether C is +// a container class by checking the type of IsContainerTest(0). +// The value of the expression is insignificant. +// +// Note that we look for both C::iterator and C::const_iterator. The +// reason is that C++ injects the name of a class as a member of the +// class itself (e.g. you can refer to class iterator as either +// 'iterator' or 'iterator::iterator'). If we look for C::iterator +// only, for example, we would mistakenly think that a class named +// iterator is an STL container. +// +// Also note that the simpler approach of overloading +// IsContainerTest(typename C::const_iterator*) and +// IsContainerTest(...) doesn't work with Visual Age C++ and Sun C++. +typedef int IsContainer; +template +IsContainer IsContainerTest(int /* dummy */, + typename C::iterator* /* it */ = NULL, + typename C::const_iterator* /* const_it */ = NULL) { + return 0; +} + +typedef char IsNotContainer; +template +IsNotContainer IsContainerTest(long /* dummy */) { return '\0'; } + +// EnableIf::type is void when 'Cond' is true, and +// undefined when 'Cond' is false. To use SFINAE to make a function +// overload only apply when a particular expression is true, add +// "typename EnableIf::type* = 0" as the last parameter. +template struct EnableIf; +template<> struct EnableIf { typedef void type; }; // NOLINT + +// Utilities for native arrays. + +// ArrayEq() compares two k-dimensional native arrays using the +// elements' operator==, where k can be any integer >= 0. When k is +// 0, ArrayEq() degenerates into comparing a single pair of values. + +template +bool ArrayEq(const T* lhs, size_t size, const U* rhs); + +// This generic version is used when k is 0. +template +inline bool ArrayEq(const T& lhs, const U& rhs) { return lhs == rhs; } + +// This overload is used when k >= 1. +template +inline bool ArrayEq(const T(&lhs)[N], const U(&rhs)[N]) { + return internal::ArrayEq(lhs, N, rhs); +} + +// This helper reduces code bloat. If we instead put its logic inside +// the previous ArrayEq() function, arrays with different sizes would +// lead to different copies of the template code. +template +bool ArrayEq(const T* lhs, size_t size, const U* rhs) { + for (size_t i = 0; i != size; i++) { + if (!internal::ArrayEq(lhs[i], rhs[i])) + return false; + } + return true; +} + +// Finds the first element in the iterator range [begin, end) that +// equals elem. Element may be a native array type itself. +template +Iter ArrayAwareFind(Iter begin, Iter end, const Element& elem) { + for (Iter it = begin; it != end; ++it) { + if (internal::ArrayEq(*it, elem)) + return it; + } + return end; +} + +// CopyArray() copies a k-dimensional native array using the elements' +// operator=, where k can be any integer >= 0. When k is 0, +// CopyArray() degenerates into copying a single value. + +template +void CopyArray(const T* from, size_t size, U* to); + +// This generic version is used when k is 0. +template +inline void CopyArray(const T& from, U* to) { *to = from; } + +// This overload is used when k >= 1. +template +inline void CopyArray(const T(&from)[N], U(*to)[N]) { + internal::CopyArray(from, N, *to); +} + +// This helper reduces code bloat. If we instead put its logic inside +// the previous CopyArray() function, arrays with different sizes +// would lead to different copies of the template code. +template +void CopyArray(const T* from, size_t size, U* to) { + for (size_t i = 0; i != size; i++) { + internal::CopyArray(from[i], to + i); + } +} + +// The relation between an NativeArray object (see below) and the +// native array it represents. +enum RelationToSource { + kReference, // The NativeArray references the native array. + kCopy // The NativeArray makes a copy of the native array and + // owns the copy. +}; + +// Adapts a native array to a read-only STL-style container. Instead +// of the complete STL container concept, this adaptor only implements +// members useful for Google Mock's container matchers. New members +// should be added as needed. To simplify the implementation, we only +// support Element being a raw type (i.e. having no top-level const or +// reference modifier). It's the client's responsibility to satisfy +// this requirement. Element can be an array type itself (hence +// multi-dimensional arrays are supported). +template +class NativeArray { + public: + // STL-style container typedefs. + typedef Element value_type; + typedef Element* iterator; + typedef const Element* const_iterator; + + // Constructs from a native array. + NativeArray(const Element* array, size_t count, RelationToSource relation) { + Init(array, count, relation); + } + + // Copy constructor. + NativeArray(const NativeArray& rhs) { + Init(rhs.array_, rhs.size_, rhs.relation_to_source_); + } + + ~NativeArray() { + // Ensures that the user doesn't instantiate NativeArray with a + // const or reference type. + static_cast(StaticAssertTypeEqHelper()); + if (relation_to_source_ == kCopy) + delete[] array_; + } + + // STL-style container methods. + size_t size() const { return size_; } + const_iterator begin() const { return array_; } + const_iterator end() const { return array_ + size_; } + bool operator==(const NativeArray& rhs) const { + return size() == rhs.size() && + ArrayEq(begin(), size(), rhs.begin()); + } + + private: + // Initializes this object; makes a copy of the input array if + // 'relation' is kCopy. + void Init(const Element* array, size_t a_size, RelationToSource relation) { + if (relation == kReference) { + array_ = array; + } else { + Element* const copy = new Element[a_size]; + CopyArray(array, a_size, copy); + array_ = copy; + } + size_ = a_size; + relation_to_source_ = relation; + } + + const Element* array_; + size_t size_; + RelationToSource relation_to_source_; + + GTEST_DISALLOW_ASSIGN_(NativeArray); +}; + +} // namespace internal +} // namespace testing + +#define GTEST_MESSAGE_AT_(file, line, message, result_type) \ + ::testing::internal::AssertHelper(result_type, file, line, message) \ + = ::testing::Message() + +#define GTEST_MESSAGE_(message, result_type) \ + GTEST_MESSAGE_AT_(__FILE__, __LINE__, message, result_type) + +#define GTEST_FATAL_FAILURE_(message) \ + return GTEST_MESSAGE_(message, ::testing::TestPartResult::kFatalFailure) + +#define GTEST_NONFATAL_FAILURE_(message) \ + GTEST_MESSAGE_(message, ::testing::TestPartResult::kNonFatalFailure) + +#define GTEST_SUCCESS_(message) \ + GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess) + +// Suppresses MSVC warnings 4072 (unreachable code) for the code following +// statement if it returns or throws (or doesn't return or throw in some +// situations). +#define GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) \ + if (::testing::internal::AlwaysTrue()) { statement; } + +#define GTEST_TEST_THROW_(statement, expected_exception, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::ConstCharPtr gtest_msg = "") { \ + bool gtest_caught_expected = false; \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (expected_exception const&) { \ + gtest_caught_expected = true; \ + } \ + catch (...) { \ + gtest_msg.value = \ + "Expected: " #statement " throws an exception of type " \ + #expected_exception ".\n Actual: it throws a different type."; \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \ + } \ + if (!gtest_caught_expected) { \ + gtest_msg.value = \ + "Expected: " #statement " throws an exception of type " \ + #expected_exception ".\n Actual: it throws nothing."; \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__): \ + fail(gtest_msg.value) + +#define GTEST_TEST_NO_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (...) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \ + fail("Expected: " #statement " doesn't throw an exception.\n" \ + " Actual: it throws.") + +#define GTEST_TEST_ANY_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + bool gtest_caught_any = false; \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (...) { \ + gtest_caught_any = true; \ + } \ + if (!gtest_caught_any) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \ + fail("Expected: " #statement " throws an exception.\n" \ + " Actual: it doesn't.") + + +// Implements Boolean test assertions such as EXPECT_TRUE. expression can be +// either a boolean expression or an AssertionResult. text is a textual +// represenation of expression as it was passed into the EXPECT_TRUE. +#define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (const ::testing::AssertionResult gtest_ar_ = \ + ::testing::AssertionResult(expression)) \ + ; \ + else \ + fail(::testing::internal::GetBoolAssertionFailureMessage(\ + gtest_ar_, text, #actual, #expected).c_str()) + +#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \ + fail("Expected: " #statement " doesn't generate new fatal " \ + "failures in the current thread.\n" \ + " Actual: it does.") + +// Expands to the name of the class that implements the given test. +#define GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \ + test_case_name##_##test_name##_Test + +// Helper macro for defining tests. +#define GTEST_TEST_(test_case_name, test_name, parent_class, parent_id)\ +class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\ + public:\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\ + private:\ + virtual void TestBody();\ + static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\ + GTEST_DISALLOW_COPY_AND_ASSIGN_(\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\ +};\ +\ +::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)\ + ::test_info_ =\ + ::testing::internal::MakeAndRegisterTestInfo(\ + #test_case_name, #test_name, NULL, NULL, \ + (parent_id), \ + parent_class::SetUpTestCase, \ + parent_class::TearDownTestCase, \ + new ::testing::internal::TestFactoryImpl<\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>);\ +void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the public API for death tests. It is +// #included by gtest.h so a user doesn't need to include this +// directly. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines internal utilities needed for implementing +// death tests. They are subject to change without notice. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ + + +#include + +namespace testing { +namespace internal { + +GTEST_DECLARE_string_(internal_run_death_test); + +// Names of the flags (needed for parsing Google Test flags). +const char kDeathTestStyleFlag[] = "death_test_style"; +const char kDeathTestUseFork[] = "death_test_use_fork"; +const char kInternalRunDeathTestFlag[] = "internal_run_death_test"; + +#if GTEST_HAS_DEATH_TEST + +// DeathTest is a class that hides much of the complexity of the +// GTEST_DEATH_TEST_ macro. It is abstract; its static Create method +// returns a concrete class that depends on the prevailing death test +// style, as defined by the --gtest_death_test_style and/or +// --gtest_internal_run_death_test flags. + +// In describing the results of death tests, these terms are used with +// the corresponding definitions: +// +// exit status: The integer exit information in the format specified +// by wait(2) +// exit code: The integer code passed to exit(3), _exit(2), or +// returned from main() +class GTEST_API_ DeathTest { + public: + // Create returns false if there was an error determining the + // appropriate action to take for the current death test; for example, + // if the gtest_death_test_style flag is set to an invalid value. + // The LastMessage method will return a more detailed message in that + // case. Otherwise, the DeathTest pointer pointed to by the "test" + // argument is set. If the death test should be skipped, the pointer + // is set to NULL; otherwise, it is set to the address of a new concrete + // DeathTest object that controls the execution of the current test. + static bool Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test); + DeathTest(); + virtual ~DeathTest() { } + + // A helper class that aborts a death test when it's deleted. + class ReturnSentinel { + public: + explicit ReturnSentinel(DeathTest* test) : test_(test) { } + ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); } + private: + DeathTest* const test_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel); + } GTEST_ATTRIBUTE_UNUSED_; + + // An enumeration of possible roles that may be taken when a death + // test is encountered. EXECUTE means that the death test logic should + // be executed immediately. OVERSEE means that the program should prepare + // the appropriate environment for a child process to execute the death + // test, then wait for it to complete. + enum TestRole { OVERSEE_TEST, EXECUTE_TEST }; + + // An enumeration of the three reasons that a test might be aborted. + enum AbortReason { + TEST_ENCOUNTERED_RETURN_STATEMENT, + TEST_THREW_EXCEPTION, + TEST_DID_NOT_DIE + }; + + // Assumes one of the above roles. + virtual TestRole AssumeRole() = 0; + + // Waits for the death test to finish and returns its status. + virtual int Wait() = 0; + + // Returns true if the death test passed; that is, the test process + // exited during the test, its exit status matches a user-supplied + // predicate, and its stderr output matches a user-supplied regular + // expression. + // The user-supplied predicate may be a macro expression rather + // than a function pointer or functor, or else Wait and Passed could + // be combined. + virtual bool Passed(bool exit_status_ok) = 0; + + // Signals that the death test did not die as expected. + virtual void Abort(AbortReason reason) = 0; + + // Returns a human-readable outcome message regarding the outcome of + // the last death test. + static const char* LastMessage(); + + static void set_last_death_test_message(const String& message); + + private: + // A string containing a description of the outcome of the last death test. + static String last_death_test_message_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest); +}; + +// Factory interface for death tests. May be mocked out for testing. +class DeathTestFactory { + public: + virtual ~DeathTestFactory() { } + virtual bool Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test) = 0; +}; + +// A concrete DeathTestFactory implementation for normal use. +class DefaultDeathTestFactory : public DeathTestFactory { + public: + virtual bool Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test); +}; + +// Returns true if exit_status describes a process that was terminated +// by a signal, or exited normally with a nonzero exit code. +GTEST_API_ bool ExitedUnsuccessfully(int exit_status); + +// Traps C++ exceptions escaping statement and reports them as test +// failures. Note that trapping SEH exceptions is not implemented here. +# if GTEST_HAS_EXCEPTIONS +# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } catch (const ::std::exception& gtest_exception) { \ + fprintf(\ + stderr, \ + "\n%s: Caught std::exception-derived exception escaping the " \ + "death test statement. Exception message: %s\n", \ + ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \ + gtest_exception.what()); \ + fflush(stderr); \ + death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ + } catch (...) { \ + death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ + } + +# else +# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) + +# endif + +// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*, +// ASSERT_EXIT*, and EXPECT_EXIT*. +# define GTEST_DEATH_TEST_(statement, predicate, regex, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + const ::testing::internal::RE& gtest_regex = (regex); \ + ::testing::internal::DeathTest* gtest_dt; \ + if (!::testing::internal::DeathTest::Create(#statement, >est_regex, \ + __FILE__, __LINE__, >est_dt)) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ + } \ + if (gtest_dt != NULL) { \ + ::testing::internal::scoped_ptr< ::testing::internal::DeathTest> \ + gtest_dt_ptr(gtest_dt); \ + switch (gtest_dt->AssumeRole()) { \ + case ::testing::internal::DeathTest::OVERSEE_TEST: \ + if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ + } \ + break; \ + case ::testing::internal::DeathTest::EXECUTE_TEST: { \ + ::testing::internal::DeathTest::ReturnSentinel \ + gtest_sentinel(gtest_dt); \ + GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \ + gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \ + break; \ + } \ + default: \ + break; \ + } \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__): \ + fail(::testing::internal::DeathTest::LastMessage()) +// The symbol "fail" here expands to something into which a message +// can be streamed. + +// A class representing the parsed contents of the +// --gtest_internal_run_death_test flag, as it existed when +// RUN_ALL_TESTS was called. +class InternalRunDeathTestFlag { + public: + InternalRunDeathTestFlag(const String& a_file, + int a_line, + int an_index, + int a_write_fd) + : file_(a_file), line_(a_line), index_(an_index), + write_fd_(a_write_fd) {} + + ~InternalRunDeathTestFlag() { + if (write_fd_ >= 0) + posix::Close(write_fd_); + } + + String file() const { return file_; } + int line() const { return line_; } + int index() const { return index_; } + int write_fd() const { return write_fd_; } + + private: + String file_; + int line_; + int index_; + int write_fd_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag); +}; + +// Returns a newly created InternalRunDeathTestFlag object with fields +// initialized from the GTEST_FLAG(internal_run_death_test) flag if +// the flag is specified; otherwise returns NULL. +InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag(); + +#else // GTEST_HAS_DEATH_TEST + +// This macro is used for implementing macros such as +// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where +// death tests are not supported. Those macros must compile on such systems +// iff EXPECT_DEATH and ASSERT_DEATH compile with the same parameters on +// systems that support death tests. This allows one to write such a macro +// on a system that does not support death tests and be sure that it will +// compile on a death-test supporting system. +// +// Parameters: +// statement - A statement that a macro such as EXPECT_DEATH would test +// for program termination. This macro has to make sure this +// statement is compiled but not executed, to ensure that +// EXPECT_DEATH_IF_SUPPORTED compiles with a certain +// parameter iff EXPECT_DEATH compiles with it. +// regex - A regex that a macro such as EXPECT_DEATH would use to test +// the output of statement. This parameter has to be +// compiled but not evaluated by this macro, to ensure that +// this macro only accepts expressions that a macro such as +// EXPECT_DEATH would accept. +// terminator - Must be an empty statement for EXPECT_DEATH_IF_SUPPORTED +// and a return statement for ASSERT_DEATH_IF_SUPPORTED. +// This ensures that ASSERT_DEATH_IF_SUPPORTED will not +// compile inside functions where ASSERT_DEATH doesn't +// compile. +// +// The branch that has an always false condition is used to ensure that +// statement and regex are compiled (and thus syntactically correct) but +// never executed. The unreachable code macro protects the terminator +// statement from generating an 'unreachable code' warning in case +// statement unconditionally returns or throws. The Message constructor at +// the end allows the syntax of streaming additional messages into the +// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH. +# define GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, terminator) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + GTEST_LOG_(WARNING) \ + << "Death tests are not supported on this platform.\n" \ + << "Statement '" #statement "' cannot be verified."; \ + } else if (::testing::internal::AlwaysFalse()) { \ + ::testing::internal::RE::PartialMatch(".*", (regex)); \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + terminator; \ + } else \ + ::testing::Message() + +#endif // GTEST_HAS_DEATH_TEST + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ + +namespace testing { + +// This flag controls the style of death tests. Valid values are "threadsafe", +// meaning that the death test child process will re-execute the test binary +// from the start, running only a single death test, or "fast", +// meaning that the child process will execute the test logic immediately +// after forking. +GTEST_DECLARE_string_(death_test_style); + +#if GTEST_HAS_DEATH_TEST + +// The following macros are useful for writing death tests. + +// Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is +// executed: +// +// 1. It generates a warning if there is more than one active +// thread. This is because it's safe to fork() or clone() only +// when there is a single thread. +// +// 2. The parent process clone()s a sub-process and runs the death +// test in it; the sub-process exits with code 0 at the end of the +// death test, if it hasn't exited already. +// +// 3. The parent process waits for the sub-process to terminate. +// +// 4. The parent process checks the exit code and error message of +// the sub-process. +// +// Examples: +// +// ASSERT_DEATH(server.SendMessage(56, "Hello"), "Invalid port number"); +// for (int i = 0; i < 5; i++) { +// EXPECT_DEATH(server.ProcessRequest(i), +// "Invalid request .* in ProcessRequest()") +// << "Failed to die on request " << i); +// } +// +// ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), "Exiting"); +// +// bool KilledBySIGHUP(int exit_code) { +// return WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGHUP; +// } +// +// ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, "Hanging up!"); +// +// On the regular expressions used in death tests: +// +// On POSIX-compliant systems (*nix), we use the library, +// which uses the POSIX extended regex syntax. +// +// On other platforms (e.g. Windows), we only support a simple regex +// syntax implemented as part of Google Test. This limited +// implementation should be enough most of the time when writing +// death tests; though it lacks many features you can find in PCRE +// or POSIX extended regex syntax. For example, we don't support +// union ("x|y"), grouping ("(xy)"), brackets ("[xy]"), and +// repetition count ("x{5,7}"), among others. +// +// Below is the syntax that we do support. We chose it to be a +// subset of both PCRE and POSIX extended regex, so it's easy to +// learn wherever you come from. In the following: 'A' denotes a +// literal character, period (.), or a single \\ escape sequence; +// 'x' and 'y' denote regular expressions; 'm' and 'n' are for +// natural numbers. +// +// c matches any literal character c +// \\d matches any decimal digit +// \\D matches any character that's not a decimal digit +// \\f matches \f +// \\n matches \n +// \\r matches \r +// \\s matches any ASCII whitespace, including \n +// \\S matches any character that's not a whitespace +// \\t matches \t +// \\v matches \v +// \\w matches any letter, _, or decimal digit +// \\W matches any character that \\w doesn't match +// \\c matches any literal character c, which must be a punctuation +// . matches any single character except \n +// A? matches 0 or 1 occurrences of A +// A* matches 0 or many occurrences of A +// A+ matches 1 or many occurrences of A +// ^ matches the beginning of a string (not that of each line) +// $ matches the end of a string (not that of each line) +// xy matches x followed by y +// +// If you accidentally use PCRE or POSIX extended regex features +// not implemented by us, you will get a run-time failure. In that +// case, please try to rewrite your regular expression within the +// above syntax. +// +// This implementation is *not* meant to be as highly tuned or robust +// as a compiled regex library, but should perform well enough for a +// death test, which already incurs significant overhead by launching +// a child process. +// +// Known caveats: +// +// A "threadsafe" style death test obtains the path to the test +// program from argv[0] and re-executes it in the sub-process. For +// simplicity, the current implementation doesn't search the PATH +// when launching the sub-process. This means that the user must +// invoke the test program via a path that contains at least one +// path separator (e.g. path/to/foo_test and +// /absolute/path/to/bar_test are fine, but foo_test is not). This +// is rarely a problem as people usually don't put the test binary +// directory in PATH. +// +// TODO(wan@google.com): make thread-safe death tests search the PATH. + +// Asserts that a given statement causes the program to exit, with an +// integer exit status that satisfies predicate, and emitting error output +// that matches regex. +# define ASSERT_EXIT(statement, predicate, regex) \ + GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_) + +// Like ASSERT_EXIT, but continues on to successive tests in the +// test case, if any: +# define EXPECT_EXIT(statement, predicate, regex) \ + GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_) + +// Asserts that a given statement causes the program to exit, either by +// explicitly exiting with a nonzero exit code or being killed by a +// signal, and emitting error output that matches regex. +# define ASSERT_DEATH(statement, regex) \ + ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex) + +// Like ASSERT_DEATH, but continues on to successive tests in the +// test case, if any: +# define EXPECT_DEATH(statement, regex) \ + EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex) + +// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*: + +// Tests that an exit code describes a normal exit with a given exit code. +class GTEST_API_ ExitedWithCode { + public: + explicit ExitedWithCode(int exit_code); + bool operator()(int exit_status) const; + private: + // No implementation - assignment is unsupported. + void operator=(const ExitedWithCode& other); + + const int exit_code_; +}; + +# if !GTEST_OS_WINDOWS +// Tests that an exit code describes an exit due to termination by a +// given signal. +class GTEST_API_ KilledBySignal { + public: + explicit KilledBySignal(int signum); + bool operator()(int exit_status) const; + private: + const int signum_; +}; +# endif // !GTEST_OS_WINDOWS + +// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode. +// The death testing framework causes this to have interesting semantics, +// since the sideeffects of the call are only visible in opt mode, and not +// in debug mode. +// +// In practice, this can be used to test functions that utilize the +// LOG(DFATAL) macro using the following style: +// +// int DieInDebugOr12(int* sideeffect) { +// if (sideeffect) { +// *sideeffect = 12; +// } +// LOG(DFATAL) << "death"; +// return 12; +// } +// +// TEST(TestCase, TestDieOr12WorksInDgbAndOpt) { +// int sideeffect = 0; +// // Only asserts in dbg. +// EXPECT_DEBUG_DEATH(DieInDebugOr12(&sideeffect), "death"); +// +// #ifdef NDEBUG +// // opt-mode has sideeffect visible. +// EXPECT_EQ(12, sideeffect); +// #else +// // dbg-mode no visible sideeffect. +// EXPECT_EQ(0, sideeffect); +// #endif +// } +// +// This will assert that DieInDebugReturn12InOpt() crashes in debug +// mode, usually due to a DCHECK or LOG(DFATAL), but returns the +// appropriate fallback value (12 in this case) in opt mode. If you +// need to test that a function has appropriate side-effects in opt +// mode, include assertions against the side-effects. A general +// pattern for this is: +// +// EXPECT_DEBUG_DEATH({ +// // Side-effects here will have an effect after this statement in +// // opt mode, but none in debug mode. +// EXPECT_EQ(12, DieInDebugOr12(&sideeffect)); +// }, "death"); +// +# ifdef NDEBUG + +# define EXPECT_DEBUG_DEATH(statement, regex) \ + do { statement; } while (::testing::internal::AlwaysFalse()) + +# define ASSERT_DEBUG_DEATH(statement, regex) \ + do { statement; } while (::testing::internal::AlwaysFalse()) + +# else + +# define EXPECT_DEBUG_DEATH(statement, regex) \ + EXPECT_DEATH(statement, regex) + +# define ASSERT_DEBUG_DEATH(statement, regex) \ + ASSERT_DEATH(statement, regex) + +# endif // NDEBUG for EXPECT_DEBUG_DEATH +#endif // GTEST_HAS_DEATH_TEST + +// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and +// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if +// death tests are supported; otherwise they just issue a warning. This is +// useful when you are combining death test assertions with normal test +// assertions in one test. +#if GTEST_HAS_DEATH_TEST +# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ + EXPECT_DEATH(statement, regex) +# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ + ASSERT_DEATH(statement, regex) +#else +# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ + GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, ) +# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ + GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, return) +#endif + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the Message class. +// +// IMPORTANT NOTE: Due to limitation of the C++ language, we have to +// leave some internal implementation details in this header file. +// They are clearly marked by comments like this: +// +// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +// +// Such code is NOT meant to be used by a user directly, and is subject +// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user +// program! + +#ifndef GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ +#define GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ + +#include + + +namespace testing { + +// The Message class works like an ostream repeater. +// +// Typical usage: +// +// 1. You stream a bunch of values to a Message object. +// It will remember the text in a stringstream. +// 2. Then you stream the Message object to an ostream. +// This causes the text in the Message to be streamed +// to the ostream. +// +// For example; +// +// testing::Message foo; +// foo << 1 << " != " << 2; +// std::cout << foo; +// +// will print "1 != 2". +// +// Message is not intended to be inherited from. In particular, its +// destructor is not virtual. +// +// Note that stringstream behaves differently in gcc and in MSVC. You +// can stream a NULL char pointer to it in the former, but not in the +// latter (it causes an access violation if you do). The Message +// class hides this difference by treating a NULL char pointer as +// "(null)". +class GTEST_API_ Message { + private: + // The type of basic IO manipulators (endl, ends, and flush) for + // narrow streams. + typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&); + + public: + // Constructs an empty Message. + // We allocate the stringstream separately because otherwise each use of + // ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's + // stack frame leading to huge stack frames in some cases; gcc does not reuse + // the stack space. + Message() : ss_(new ::std::stringstream) { + // By default, we want there to be enough precision when printing + // a double to a Message. + *ss_ << std::setprecision(std::numeric_limits::digits10 + 2); + } + + // Copy constructor. + Message(const Message& msg) : ss_(new ::std::stringstream) { // NOLINT + *ss_ << msg.GetString(); + } + + // Constructs a Message from a C-string. + explicit Message(const char* str) : ss_(new ::std::stringstream) { + *ss_ << str; + } + +#if GTEST_OS_SYMBIAN + // Streams a value (either a pointer or not) to this object. + template + inline Message& operator <<(const T& value) { + StreamHelper(typename internal::is_pointer::type(), value); + return *this; + } +#else + // Streams a non-pointer value to this object. + template + inline Message& operator <<(const T& val) { + ::GTestStreamToHelper(ss_.get(), val); + return *this; + } + + // Streams a pointer value to this object. + // + // This function is an overload of the previous one. When you + // stream a pointer to a Message, this definition will be used as it + // is more specialized. (The C++ Standard, section + // [temp.func.order].) If you stream a non-pointer, then the + // previous definition will be used. + // + // The reason for this overload is that streaming a NULL pointer to + // ostream is undefined behavior. Depending on the compiler, you + // may get "0", "(nil)", "(null)", or an access violation. To + // ensure consistent result across compilers, we always treat NULL + // as "(null)". + template + inline Message& operator <<(T* const& pointer) { // NOLINT + if (pointer == NULL) { + *ss_ << "(null)"; + } else { + ::GTestStreamToHelper(ss_.get(), pointer); + } + return *this; + } +#endif // GTEST_OS_SYMBIAN + + // Since the basic IO manipulators are overloaded for both narrow + // and wide streams, we have to provide this specialized definition + // of operator <<, even though its body is the same as the + // templatized version above. Without this definition, streaming + // endl or other basic IO manipulators to Message will confuse the + // compiler. + Message& operator <<(BasicNarrowIoManip val) { + *ss_ << val; + return *this; + } + + // Instead of 1/0, we want to see true/false for bool values. + Message& operator <<(bool b) { + return *this << (b ? "true" : "false"); + } + + // These two overloads allow streaming a wide C string to a Message + // using the UTF-8 encoding. + Message& operator <<(const wchar_t* wide_c_str) { + return *this << internal::String::ShowWideCString(wide_c_str); + } + Message& operator <<(wchar_t* wide_c_str) { + return *this << internal::String::ShowWideCString(wide_c_str); + } + +#if GTEST_HAS_STD_WSTRING + // Converts the given wide string to a narrow string using the UTF-8 + // encoding, and streams the result to this Message object. + Message& operator <<(const ::std::wstring& wstr); +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_GLOBAL_WSTRING + // Converts the given wide string to a narrow string using the UTF-8 + // encoding, and streams the result to this Message object. + Message& operator <<(const ::wstring& wstr); +#endif // GTEST_HAS_GLOBAL_WSTRING + + // Gets the text streamed to this object so far as a String. + // Each '\0' character in the buffer is replaced with "\\0". + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + internal::String GetString() const { + return internal::StringStreamToString(ss_.get()); + } + + private: + +#if GTEST_OS_SYMBIAN + // These are needed as the Nokia Symbian Compiler cannot decide between + // const T& and const T* in a function template. The Nokia compiler _can_ + // decide between class template specializations for T and T*, so a + // tr1::type_traits-like is_pointer works, and we can overload on that. + template + inline void StreamHelper(internal::true_type /*dummy*/, T* pointer) { + if (pointer == NULL) { + *ss_ << "(null)"; + } else { + ::GTestStreamToHelper(ss_.get(), pointer); + } + } + template + inline void StreamHelper(internal::false_type /*dummy*/, const T& value) { + ::GTestStreamToHelper(ss_.get(), value); + } +#endif // GTEST_OS_SYMBIAN + + // We'll hold the text streamed to this object here. + const internal::scoped_ptr< ::std::stringstream> ss_; + + // We declare (but don't implement) this to prevent the compiler + // from implementing the assignment operator. + void operator=(const Message&); +}; + +// Streams a Message to an ostream. +inline std::ostream& operator <<(std::ostream& os, const Message& sb) { + return os << sb.GetString(); +} + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ +// This file was GENERATED by command: +// pump.py gtest-param-test.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: vladl@google.com (Vlad Losev) +// +// Macros and functions for implementing parameterized tests +// in Google C++ Testing Framework (Google Test) +// +// This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ + + +// Value-parameterized tests allow you to test your code with different +// parameters without writing multiple copies of the same test. +// +// Here is how you use value-parameterized tests: + +#if 0 + +// To write value-parameterized tests, first you should define a fixture +// class. It is usually derived from testing::TestWithParam (see below for +// another inheritance scheme that's sometimes useful in more complicated +// class hierarchies), where the type of your parameter values. +// TestWithParam is itself derived from testing::Test. T can be any +// copyable type. If it's a raw pointer, you are responsible for managing the +// lifespan of the pointed values. + +class FooTest : public ::testing::TestWithParam { + // You can implement all the usual class fixture members here. +}; + +// Then, use the TEST_P macro to define as many parameterized tests +// for this fixture as you want. The _P suffix is for "parameterized" +// or "pattern", whichever you prefer to think. + +TEST_P(FooTest, DoesBlah) { + // Inside a test, access the test parameter with the GetParam() method + // of the TestWithParam class: + EXPECT_TRUE(foo.Blah(GetParam())); + ... +} + +TEST_P(FooTest, HasBlahBlah) { + ... +} + +// Finally, you can use INSTANTIATE_TEST_CASE_P to instantiate the test +// case with any set of parameters you want. Google Test defines a number +// of functions for generating test parameters. They return what we call +// (surprise!) parameter generators. Here is a summary of them, which +// are all in the testing namespace: +// +// +// Range(begin, end [, step]) - Yields values {begin, begin+step, +// begin+step+step, ...}. The values do not +// include end. step defaults to 1. +// Values(v1, v2, ..., vN) - Yields values {v1, v2, ..., vN}. +// ValuesIn(container) - Yields values from a C-style array, an STL +// ValuesIn(begin,end) container, or an iterator range [begin, end). +// Bool() - Yields sequence {false, true}. +// Combine(g1, g2, ..., gN) - Yields all combinations (the Cartesian product +// for the math savvy) of the values generated +// by the N generators. +// +// For more details, see comments at the definitions of these functions below +// in this file. +// +// The following statement will instantiate tests from the FooTest test case +// each with parameter values "meeny", "miny", and "moe". + +INSTANTIATE_TEST_CASE_P(InstantiationName, + FooTest, + Values("meeny", "miny", "moe")); + +// To distinguish different instances of the pattern, (yes, you +// can instantiate it more then once) the first argument to the +// INSTANTIATE_TEST_CASE_P macro is a prefix that will be added to the +// actual test case name. Remember to pick unique prefixes for different +// instantiations. The tests from the instantiation above will have +// these names: +// +// * InstantiationName/FooTest.DoesBlah/0 for "meeny" +// * InstantiationName/FooTest.DoesBlah/1 for "miny" +// * InstantiationName/FooTest.DoesBlah/2 for "moe" +// * InstantiationName/FooTest.HasBlahBlah/0 for "meeny" +// * InstantiationName/FooTest.HasBlahBlah/1 for "miny" +// * InstantiationName/FooTest.HasBlahBlah/2 for "moe" +// +// You can use these names in --gtest_filter. +// +// This statement will instantiate all tests from FooTest again, each +// with parameter values "cat" and "dog": + +const char* pets[] = {"cat", "dog"}; +INSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets)); + +// The tests from the instantiation above will have these names: +// +// * AnotherInstantiationName/FooTest.DoesBlah/0 for "cat" +// * AnotherInstantiationName/FooTest.DoesBlah/1 for "dog" +// * AnotherInstantiationName/FooTest.HasBlahBlah/0 for "cat" +// * AnotherInstantiationName/FooTest.HasBlahBlah/1 for "dog" +// +// Please note that INSTANTIATE_TEST_CASE_P will instantiate all tests +// in the given test case, whether their definitions come before or +// AFTER the INSTANTIATE_TEST_CASE_P statement. +// +// Please also note that generator expressions (including parameters to the +// generators) are evaluated in InitGoogleTest(), after main() has started. +// This allows the user on one hand, to adjust generator parameters in order +// to dynamically determine a set of tests to run and on the other hand, +// give the user a chance to inspect the generated tests with Google Test +// reflection API before RUN_ALL_TESTS() is executed. +// +// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc +// for more examples. +// +// In the future, we plan to publish the API for defining new parameter +// generators. But for now this interface remains part of the internal +// implementation and is subject to change. +// +// +// A parameterized test fixture must be derived from testing::Test and from +// testing::WithParamInterface, where T is the type of the parameter +// values. Inheriting from TestWithParam satisfies that requirement because +// TestWithParam inherits from both Test and WithParamInterface. In more +// complicated hierarchies, however, it is occasionally useful to inherit +// separately from Test and WithParamInterface. For example: + +class BaseTest : public ::testing::Test { + // You can inherit all the usual members for a non-parameterized test + // fixture here. +}; + +class DerivedTest : public BaseTest, public ::testing::WithParamInterface { + // The usual test fixture members go here too. +}; + +TEST_F(BaseTest, HasFoo) { + // This is an ordinary non-parameterized test. +} + +TEST_P(DerivedTest, DoesBlah) { + // GetParam works just the same here as if you inherit from TestWithParam. + EXPECT_TRUE(foo.Blah(GetParam())); +} + +#endif // 0 + + +#if !GTEST_OS_SYMBIAN +# include +#endif + +// scripts/fuse_gtest.py depends on gtest's own header being #included +// *unconditionally*. Therefore these #includes cannot be moved +// inside #if GTEST_HAS_PARAM_TEST. +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) + +// Type and function utilities for implementing parameterized tests. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ + +#include +#include +#include + +// scripts/fuse_gtest.py depends on gtest's own header being #included +// *unconditionally*. Therefore these #includes cannot be moved +// inside #if GTEST_HAS_PARAM_TEST. +// Copyright 2003 Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: Dan Egnor (egnor@google.com) +// +// A "smart" pointer type with reference tracking. Every pointer to a +// particular object is kept on a circular linked list. When the last pointer +// to an object is destroyed or reassigned, the object is deleted. +// +// Used properly, this deletes the object when the last reference goes away. +// There are several caveats: +// - Like all reference counting schemes, cycles lead to leaks. +// - Each smart pointer is actually two pointers (8 bytes instead of 4). +// - Every time a pointer is assigned, the entire list of pointers to that +// object is traversed. This class is therefore NOT SUITABLE when there +// will often be more than two or three pointers to a particular object. +// - References are only tracked as long as linked_ptr<> objects are copied. +// If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS +// will happen (double deletion). +// +// A good use of this class is storing object references in STL containers. +// You can safely put linked_ptr<> in a vector<>. +// Other uses may not be as good. +// +// Note: If you use an incomplete type with linked_ptr<>, the class +// *containing* linked_ptr<> must have a constructor and destructor (even +// if they do nothing!). +// +// Bill Gibbons suggested we use something like this. +// +// Thread Safety: +// Unlike other linked_ptr implementations, in this implementation +// a linked_ptr object is thread-safe in the sense that: +// - it's safe to copy linked_ptr objects concurrently, +// - it's safe to copy *from* a linked_ptr and read its underlying +// raw pointer (e.g. via get()) concurrently, and +// - it's safe to write to two linked_ptrs that point to the same +// shared object concurrently. +// TODO(wan@google.com): rename this to safe_linked_ptr to avoid +// confusion with normal linked_ptr. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_ + +#include +#include + + +namespace testing { +namespace internal { + +// Protects copying of all linked_ptr objects. +GTEST_API_ GTEST_DECLARE_STATIC_MUTEX_(g_linked_ptr_mutex); + +// This is used internally by all instances of linked_ptr<>. It needs to be +// a non-template class because different types of linked_ptr<> can refer to +// the same object (linked_ptr(obj) vs linked_ptr(obj)). +// So, it needs to be possible for different types of linked_ptr to participate +// in the same circular linked list, so we need a single class type here. +// +// DO NOT USE THIS CLASS DIRECTLY YOURSELF. Use linked_ptr. +class linked_ptr_internal { + public: + // Create a new circle that includes only this instance. + void join_new() { + next_ = this; + } + + // Many linked_ptr operations may change p.link_ for some linked_ptr + // variable p in the same circle as this object. Therefore we need + // to prevent two such operations from occurring concurrently. + // + // Note that different types of linked_ptr objects can coexist in a + // circle (e.g. linked_ptr, linked_ptr, and + // linked_ptr). Therefore we must use a single mutex to + // protect all linked_ptr objects. This can create serious + // contention in production code, but is acceptable in a testing + // framework. + + // Join an existing circle. + // L < g_linked_ptr_mutex + void join(linked_ptr_internal const* ptr) { + MutexLock lock(&g_linked_ptr_mutex); + + linked_ptr_internal const* p = ptr; + while (p->next_ != ptr) p = p->next_; + p->next_ = this; + next_ = ptr; + } + + // Leave whatever circle we're part of. Returns true if we were the + // last member of the circle. Once this is done, you can join() another. + // L < g_linked_ptr_mutex + bool depart() { + MutexLock lock(&g_linked_ptr_mutex); + + if (next_ == this) return true; + linked_ptr_internal const* p = next_; + while (p->next_ != this) p = p->next_; + p->next_ = next_; + return false; + } + + private: + mutable linked_ptr_internal const* next_; +}; + +template +class linked_ptr { + public: + typedef T element_type; + + // Take over ownership of a raw pointer. This should happen as soon as + // possible after the object is created. + explicit linked_ptr(T* ptr = NULL) { capture(ptr); } + ~linked_ptr() { depart(); } + + // Copy an existing linked_ptr<>, adding ourselves to the list of references. + template linked_ptr(linked_ptr const& ptr) { copy(&ptr); } + linked_ptr(linked_ptr const& ptr) { // NOLINT + assert(&ptr != this); + copy(&ptr); + } + + // Assignment releases the old value and acquires the new. + template linked_ptr& operator=(linked_ptr const& ptr) { + depart(); + copy(&ptr); + return *this; + } + + linked_ptr& operator=(linked_ptr const& ptr) { + if (&ptr != this) { + depart(); + copy(&ptr); + } + return *this; + } + + // Smart pointer members. + void reset(T* ptr = NULL) { + depart(); + capture(ptr); + } + T* get() const { return value_; } + T* operator->() const { return value_; } + T& operator*() const { return *value_; } + + bool operator==(T* p) const { return value_ == p; } + bool operator!=(T* p) const { return value_ != p; } + template + bool operator==(linked_ptr const& ptr) const { + return value_ == ptr.get(); + } + template + bool operator!=(linked_ptr const& ptr) const { + return value_ != ptr.get(); + } + + private: + template + friend class linked_ptr; + + T* value_; + linked_ptr_internal link_; + + void depart() { + if (link_.depart()) delete value_; + } + + void capture(T* ptr) { + value_ = ptr; + link_.join_new(); + } + + template void copy(linked_ptr const* ptr) { + value_ = ptr->get(); + if (value_) + link_.join(&ptr->link_); + else + link_.join_new(); + } +}; + +template inline +bool operator==(T* ptr, const linked_ptr& x) { + return ptr == x.get(); +} + +template inline +bool operator!=(T* ptr, const linked_ptr& x) { + return ptr != x.get(); +} + +// A function to convert T* into linked_ptr +// Doing e.g. make_linked_ptr(new FooBarBaz(arg)) is a shorter notation +// for linked_ptr >(new FooBarBaz(arg)) +template +linked_ptr make_linked_ptr(T* ptr) { + return linked_ptr(ptr); +} + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Test - The Google C++ Testing Framework +// +// This file implements a universal value printer that can print a +// value of any type T: +// +// void ::testing::internal::UniversalPrinter::Print(value, ostream_ptr); +// +// A user can teach this function how to print a class type T by +// defining either operator<<() or PrintTo() in the namespace that +// defines T. More specifically, the FIRST defined function in the +// following list will be used (assuming T is defined in namespace +// foo): +// +// 1. foo::PrintTo(const T&, ostream*) +// 2. operator<<(ostream&, const T&) defined in either foo or the +// global namespace. +// +// If none of the above is defined, it will print the debug string of +// the value if it is a protocol buffer, or print the raw bytes in the +// value otherwise. +// +// To aid debugging: when T is a reference type, the address of the +// value is also printed; when T is a (const) char pointer, both the +// pointer value and the NUL-terminated string it points to are +// printed. +// +// We also provide some convenient wrappers: +// +// // Prints a value to a string. For a (const or not) char +// // pointer, the NUL-terminated string (but not the pointer) is +// // printed. +// std::string ::testing::PrintToString(const T& value); +// +// // Prints a value tersely: for a reference type, the referenced +// // value (but not the address) is printed; for a (const or not) char +// // pointer, the NUL-terminated string (but not the pointer) is +// // printed. +// void ::testing::internal::UniversalTersePrint(const T& value, ostream*); +// +// // Prints value using the type inferred by the compiler. The difference +// // from UniversalTersePrint() is that this function prints both the +// // pointer and the NUL-terminated string for a (const or not) char pointer. +// void ::testing::internal::UniversalPrint(const T& value, ostream*); +// +// // Prints the fields of a tuple tersely to a string vector, one +// // element for each field. Tuple support must be enabled in +// // gtest-port.h. +// std::vector UniversalTersePrintTupleFieldsToStrings( +// const Tuple& value); +// +// Known limitation: +// +// The print primitives print the elements of an STL-style container +// using the compiler-inferred type of *iter where iter is a +// const_iterator of the container. When const_iterator is an input +// iterator but not a forward iterator, this inferred type may not +// match value_type, and the print output may be incorrect. In +// practice, this is rarely a problem as for most containers +// const_iterator is a forward iterator. We'll fix this if there's an +// actual need for it. Note that this fix cannot rely on value_type +// being defined as many user-defined container types don't have +// value_type. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ + +#include // NOLINT +#include +#include +#include +#include + +namespace testing { + +// Definitions in the 'internal' and 'internal2' name spaces are +// subject to change without notice. DO NOT USE THEM IN USER CODE! +namespace internal2 { + +// Prints the given number of bytes in the given object to the given +// ostream. +GTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes, + size_t count, + ::std::ostream* os); + +// For selecting which printer to use when a given type has neither << +// nor PrintTo(). +enum TypeKind { + kProtobuf, // a protobuf type + kConvertibleToInteger, // a type implicitly convertible to BiggestInt + // (e.g. a named or unnamed enum type) + kOtherType // anything else +}; + +// TypeWithoutFormatter::PrintValue(value, os) is called +// by the universal printer to print a value of type T when neither +// operator<< nor PrintTo() is defined for T, where kTypeKind is the +// "kind" of T as defined by enum TypeKind. +template +class TypeWithoutFormatter { + public: + // This default version is called when kTypeKind is kOtherType. + static void PrintValue(const T& value, ::std::ostream* os) { + PrintBytesInObjectTo(reinterpret_cast(&value), + sizeof(value), os); + } +}; + +// We print a protobuf using its ShortDebugString() when the string +// doesn't exceed this many characters; otherwise we print it using +// DebugString() for better readability. +const size_t kProtobufOneLinerMaxLength = 50; + +template +class TypeWithoutFormatter { + public: + static void PrintValue(const T& value, ::std::ostream* os) { + const ::testing::internal::string short_str = value.ShortDebugString(); + const ::testing::internal::string pretty_str = + short_str.length() <= kProtobufOneLinerMaxLength ? + short_str : ("\n" + value.DebugString()); + *os << ("<" + pretty_str + ">"); + } +}; + +template +class TypeWithoutFormatter { + public: + // Since T has no << operator or PrintTo() but can be implicitly + // converted to BiggestInt, we print it as a BiggestInt. + // + // Most likely T is an enum type (either named or unnamed), in which + // case printing it as an integer is the desired behavior. In case + // T is not an enum, printing it as an integer is the best we can do + // given that it has no user-defined printer. + static void PrintValue(const T& value, ::std::ostream* os) { + const internal::BiggestInt kBigInt = value; + *os << kBigInt; + } +}; + +// Prints the given value to the given ostream. If the value is a +// protocol message, its debug string is printed; if it's an enum or +// of a type implicitly convertible to BiggestInt, it's printed as an +// integer; otherwise the bytes in the value are printed. This is +// what UniversalPrinter::Print() does when it knows nothing about +// type T and T has neither << operator nor PrintTo(). +// +// A user can override this behavior for a class type Foo by defining +// a << operator in the namespace where Foo is defined. +// +// We put this operator in namespace 'internal2' instead of 'internal' +// to simplify the implementation, as much code in 'internal' needs to +// use << in STL, which would conflict with our own << were it defined +// in 'internal'. +// +// Note that this operator<< takes a generic std::basic_ostream type instead of the more restricted std::ostream. If +// we define it to take an std::ostream instead, we'll get an +// "ambiguous overloads" compiler error when trying to print a type +// Foo that supports streaming to std::basic_ostream, as the compiler cannot tell whether +// operator<<(std::ostream&, const T&) or +// operator<<(std::basic_stream, const Foo&) is more +// specific. +template +::std::basic_ostream& operator<<( + ::std::basic_ostream& os, const T& x) { + TypeWithoutFormatter::value ? kProtobuf : + internal::ImplicitlyConvertible::value ? + kConvertibleToInteger : kOtherType)>::PrintValue(x, &os); + return os; +} + +} // namespace internal2 +} // namespace testing + +// This namespace MUST NOT BE NESTED IN ::testing, or the name look-up +// magic needed for implementing UniversalPrinter won't work. +namespace testing_internal { + +// Used to print a value that is not an STL-style container when the +// user doesn't define PrintTo() for it. +template +void DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) { + // With the following statement, during unqualified name lookup, + // testing::internal2::operator<< appears as if it was declared in + // the nearest enclosing namespace that contains both + // ::testing_internal and ::testing::internal2, i.e. the global + // namespace. For more details, refer to the C++ Standard section + // 7.3.4-1 [namespace.udir]. This allows us to fall back onto + // testing::internal2::operator<< in case T doesn't come with a << + // operator. + // + // We cannot write 'using ::testing::internal2::operator<<;', which + // gcc 3.3 fails to compile due to a compiler bug. + using namespace ::testing::internal2; // NOLINT + + // Assuming T is defined in namespace foo, in the next statement, + // the compiler will consider all of: + // + // 1. foo::operator<< (thanks to Koenig look-up), + // 2. ::operator<< (as the current namespace is enclosed in ::), + // 3. testing::internal2::operator<< (thanks to the using statement above). + // + // The operator<< whose type matches T best will be picked. + // + // We deliberately allow #2 to be a candidate, as sometimes it's + // impossible to define #1 (e.g. when foo is ::std, defining + // anything in it is undefined behavior unless you are a compiler + // vendor.). + *os << value; +} + +} // namespace testing_internal + +namespace testing { +namespace internal { + +// UniversalPrinter::Print(value, ostream_ptr) prints the given +// value to the given ostream. The caller must ensure that +// 'ostream_ptr' is not NULL, or the behavior is undefined. +// +// We define UniversalPrinter as a class template (as opposed to a +// function template), as we need to partially specialize it for +// reference types, which cannot be done with function templates. +template +class UniversalPrinter; + +template +void UniversalPrint(const T& value, ::std::ostream* os); + +// Used to print an STL-style container when the user doesn't define +// a PrintTo() for it. +template +void DefaultPrintTo(IsContainer /* dummy */, + false_type /* is not a pointer */, + const C& container, ::std::ostream* os) { + const size_t kMaxCount = 32; // The maximum number of elements to print. + *os << '{'; + size_t count = 0; + for (typename C::const_iterator it = container.begin(); + it != container.end(); ++it, ++count) { + if (count > 0) { + *os << ','; + if (count == kMaxCount) { // Enough has been printed. + *os << " ..."; + break; + } + } + *os << ' '; + // We cannot call PrintTo(*it, os) here as PrintTo() doesn't + // handle *it being a native array. + internal::UniversalPrint(*it, os); + } + + if (count > 0) { + *os << ' '; + } + *os << '}'; +} + +// Used to print a pointer that is neither a char pointer nor a member +// pointer, when the user doesn't define PrintTo() for it. (A member +// variable pointer or member function pointer doesn't really point to +// a location in the address space. Their representation is +// implementation-defined. Therefore they will be printed as raw +// bytes.) +template +void DefaultPrintTo(IsNotContainer /* dummy */, + true_type /* is a pointer */, + T* p, ::std::ostream* os) { + if (p == NULL) { + *os << "NULL"; + } else { + // C++ doesn't allow casting from a function pointer to any object + // pointer. + // + // IsTrue() silences warnings: "Condition is always true", + // "unreachable code". + if (IsTrue(ImplicitlyConvertible::value)) { + // T is not a function type. We just call << to print p, + // relying on ADL to pick up user-defined << for their pointer + // types, if any. + *os << p; + } else { + // T is a function type, so '*os << p' doesn't do what we want + // (it just prints p as bool). We want to print p as a const + // void*. However, we cannot cast it to const void* directly, + // even using reinterpret_cast, as earlier versions of gcc + // (e.g. 3.4.5) cannot compile the cast when p is a function + // pointer. Casting to UInt64 first solves the problem. + *os << reinterpret_cast( + reinterpret_cast(p)); + } + } +} + +// Used to print a non-container, non-pointer value when the user +// doesn't define PrintTo() for it. +template +void DefaultPrintTo(IsNotContainer /* dummy */, + false_type /* is not a pointer */, + const T& value, ::std::ostream* os) { + ::testing_internal::DefaultPrintNonContainerTo(value, os); +} + +// Prints the given value using the << operator if it has one; +// otherwise prints the bytes in it. This is what +// UniversalPrinter::Print() does when PrintTo() is not specialized +// or overloaded for type T. +// +// A user can override this behavior for a class type Foo by defining +// an overload of PrintTo() in the namespace where Foo is defined. We +// give the user this option as sometimes defining a << operator for +// Foo is not desirable (e.g. the coding style may prevent doing it, +// or there is already a << operator but it doesn't do what the user +// wants). +template +void PrintTo(const T& value, ::std::ostream* os) { + // DefaultPrintTo() is overloaded. The type of its first two + // arguments determine which version will be picked. If T is an + // STL-style container, the version for container will be called; if + // T is a pointer, the pointer version will be called; otherwise the + // generic version will be called. + // + // Note that we check for container types here, prior to we check + // for protocol message types in our operator<<. The rationale is: + // + // For protocol messages, we want to give people a chance to + // override Google Mock's format by defining a PrintTo() or + // operator<<. For STL containers, other formats can be + // incompatible with Google Mock's format for the container + // elements; therefore we check for container types here to ensure + // that our format is used. + // + // The second argument of DefaultPrintTo() is needed to bypass a bug + // in Symbian's C++ compiler that prevents it from picking the right + // overload between: + // + // PrintTo(const T& x, ...); + // PrintTo(T* x, ...); + DefaultPrintTo(IsContainerTest(0), is_pointer(), value, os); +} + +// The following list of PrintTo() overloads tells +// UniversalPrinter::Print() how to print standard types (built-in +// types, strings, plain arrays, and pointers). + +// Overloads for various char types. +GTEST_API_ void PrintTo(unsigned char c, ::std::ostream* os); +GTEST_API_ void PrintTo(signed char c, ::std::ostream* os); +inline void PrintTo(char c, ::std::ostream* os) { + // When printing a plain char, we always treat it as unsigned. This + // way, the output won't be affected by whether the compiler thinks + // char is signed or not. + PrintTo(static_cast(c), os); +} + +// Overloads for other simple built-in types. +inline void PrintTo(bool x, ::std::ostream* os) { + *os << (x ? "true" : "false"); +} + +// Overload for wchar_t type. +// Prints a wchar_t as a symbol if it is printable or as its internal +// code otherwise and also as its decimal code (except for L'\0'). +// The L'\0' char is printed as "L'\\0'". The decimal code is printed +// as signed integer when wchar_t is implemented by the compiler +// as a signed type and is printed as an unsigned integer when wchar_t +// is implemented as an unsigned type. +GTEST_API_ void PrintTo(wchar_t wc, ::std::ostream* os); + +// Overloads for C strings. +GTEST_API_ void PrintTo(const char* s, ::std::ostream* os); +inline void PrintTo(char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} + +// signed/unsigned char is often used for representing binary data, so +// we print pointers to it as void* to be safe. +inline void PrintTo(const signed char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(signed char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(const unsigned char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(unsigned char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} + +// MSVC can be configured to define wchar_t as a typedef of unsigned +// short. It defines _NATIVE_WCHAR_T_DEFINED when wchar_t is a native +// type. When wchar_t is a typedef, defining an overload for const +// wchar_t* would cause unsigned short* be printed as a wide string, +// possibly causing invalid memory accesses. +#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED) +// Overloads for wide C strings +GTEST_API_ void PrintTo(const wchar_t* s, ::std::ostream* os); +inline void PrintTo(wchar_t* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +#endif + +// Overload for C arrays. Multi-dimensional arrays are printed +// properly. + +// Prints the given number of elements in an array, without printing +// the curly braces. +template +void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) { + UniversalPrint(a[0], os); + for (size_t i = 1; i != count; i++) { + *os << ", "; + UniversalPrint(a[i], os); + } +} + +// Overloads for ::string and ::std::string. +#if GTEST_HAS_GLOBAL_STRING +GTEST_API_ void PrintStringTo(const ::string&s, ::std::ostream* os); +inline void PrintTo(const ::string& s, ::std::ostream* os) { + PrintStringTo(s, os); +} +#endif // GTEST_HAS_GLOBAL_STRING + +GTEST_API_ void PrintStringTo(const ::std::string&s, ::std::ostream* os); +inline void PrintTo(const ::std::string& s, ::std::ostream* os) { + PrintStringTo(s, os); +} + +// Overloads for ::wstring and ::std::wstring. +#if GTEST_HAS_GLOBAL_WSTRING +GTEST_API_ void PrintWideStringTo(const ::wstring&s, ::std::ostream* os); +inline void PrintTo(const ::wstring& s, ::std::ostream* os) { + PrintWideStringTo(s, os); +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +#if GTEST_HAS_STD_WSTRING +GTEST_API_ void PrintWideStringTo(const ::std::wstring&s, ::std::ostream* os); +inline void PrintTo(const ::std::wstring& s, ::std::ostream* os) { + PrintWideStringTo(s, os); +} +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_TR1_TUPLE +// Overload for ::std::tr1::tuple. Needed for printing function arguments, +// which are packed as tuples. + +// Helper function for printing a tuple. T must be instantiated with +// a tuple type. +template +void PrintTupleTo(const T& t, ::std::ostream* os); + +// Overloaded PrintTo() for tuples of various arities. We support +// tuples of up-to 10 fields. The following implementation works +// regardless of whether tr1::tuple is implemented using the +// non-standard variadic template feature or not. + +inline void PrintTo(const ::std::tr1::tuple<>& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo( + const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} +#endif // GTEST_HAS_TR1_TUPLE + +// Overload for std::pair. +template +void PrintTo(const ::std::pair& value, ::std::ostream* os) { + *os << '('; + // We cannot use UniversalPrint(value.first, os) here, as T1 may be + // a reference type. The same for printing value.second. + UniversalPrinter::Print(value.first, os); + *os << ", "; + UniversalPrinter::Print(value.second, os); + *os << ')'; +} + +// Implements printing a non-reference type T by letting the compiler +// pick the right overload of PrintTo() for T. +template +class UniversalPrinter { + public: + // MSVC warns about adding const to a function type, so we want to + // disable the warning. +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4180) // Temporarily disables warning 4180. +#endif // _MSC_VER + + // Note: we deliberately don't call this PrintTo(), as that name + // conflicts with ::testing::internal::PrintTo in the body of the + // function. + static void Print(const T& value, ::std::ostream* os) { + // By default, ::testing::internal::PrintTo() is used for printing + // the value. + // + // Thanks to Koenig look-up, if T is a class and has its own + // PrintTo() function defined in its namespace, that function will + // be visible here. Since it is more specific than the generic ones + // in ::testing::internal, it will be picked by the compiler in the + // following statement - exactly what we want. + PrintTo(value, os); + } + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif // _MSC_VER +}; + +// UniversalPrintArray(begin, len, os) prints an array of 'len' +// elements, starting at address 'begin'. +template +void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) { + if (len == 0) { + *os << "{}"; + } else { + *os << "{ "; + const size_t kThreshold = 18; + const size_t kChunkSize = 8; + // If the array has more than kThreshold elements, we'll have to + // omit some details by printing only the first and the last + // kChunkSize elements. + // TODO(wan@google.com): let the user control the threshold using a flag. + if (len <= kThreshold) { + PrintRawArrayTo(begin, len, os); + } else { + PrintRawArrayTo(begin, kChunkSize, os); + *os << ", ..., "; + PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os); + } + *os << " }"; + } +} +// This overload prints a (const) char array compactly. +GTEST_API_ void UniversalPrintArray(const char* begin, + size_t len, + ::std::ostream* os); + +// Implements printing an array type T[N]. +template +class UniversalPrinter { + public: + // Prints the given array, omitting some elements when there are too + // many. + static void Print(const T (&a)[N], ::std::ostream* os) { + UniversalPrintArray(a, N, os); + } +}; + +// Implements printing a reference type T&. +template +class UniversalPrinter { + public: + // MSVC warns about adding const to a function type, so we want to + // disable the warning. +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4180) // Temporarily disables warning 4180. +#endif // _MSC_VER + + static void Print(const T& value, ::std::ostream* os) { + // Prints the address of the value. We use reinterpret_cast here + // as static_cast doesn't compile when T is a function type. + *os << "@" << reinterpret_cast(&value) << " "; + + // Then prints the value itself. + UniversalPrint(value, os); + } + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif // _MSC_VER +}; + +// Prints a value tersely: for a reference type, the referenced value +// (but not the address) is printed; for a (const) char pointer, the +// NUL-terminated string (but not the pointer) is printed. +template +void UniversalTersePrint(const T& value, ::std::ostream* os) { + UniversalPrint(value, os); +} +inline void UniversalTersePrint(const char* str, ::std::ostream* os) { + if (str == NULL) { + *os << "NULL"; + } else { + UniversalPrint(string(str), os); + } +} +inline void UniversalTersePrint(char* str, ::std::ostream* os) { + UniversalTersePrint(static_cast(str), os); +} + +// Prints a value using the type inferred by the compiler. The +// difference between this and UniversalTersePrint() is that for a +// (const) char pointer, this prints both the pointer and the +// NUL-terminated string. +template +void UniversalPrint(const T& value, ::std::ostream* os) { + UniversalPrinter::Print(value, os); +} + +#if GTEST_HAS_TR1_TUPLE +typedef ::std::vector Strings; + +// This helper template allows PrintTo() for tuples and +// UniversalTersePrintTupleFieldsToStrings() to be defined by +// induction on the number of tuple fields. The idea is that +// TuplePrefixPrinter::PrintPrefixTo(t, os) prints the first N +// fields in tuple t, and can be defined in terms of +// TuplePrefixPrinter. + +// The inductive case. +template +struct TuplePrefixPrinter { + // Prints the first N fields of a tuple. + template + static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) { + TuplePrefixPrinter::PrintPrefixTo(t, os); + *os << ", "; + UniversalPrinter::type> + ::Print(::std::tr1::get(t), os); + } + + // Tersely prints the first N fields of a tuple to a string vector, + // one element for each field. + template + static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) { + TuplePrefixPrinter::TersePrintPrefixToStrings(t, strings); + ::std::stringstream ss; + UniversalTersePrint(::std::tr1::get(t), &ss); + strings->push_back(ss.str()); + } +}; + +// Base cases. +template <> +struct TuplePrefixPrinter<0> { + template + static void PrintPrefixTo(const Tuple&, ::std::ostream*) {} + + template + static void TersePrintPrefixToStrings(const Tuple&, Strings*) {} +}; +// We have to specialize the entire TuplePrefixPrinter<> class +// template here, even though the definition of +// TersePrintPrefixToStrings() is the same as the generic version, as +// Embarcadero (formerly CodeGear, formerly Borland) C++ doesn't +// support specializing a method template of a class template. +template <> +struct TuplePrefixPrinter<1> { + template + static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) { + UniversalPrinter::type>:: + Print(::std::tr1::get<0>(t), os); + } + + template + static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) { + ::std::stringstream ss; + UniversalTersePrint(::std::tr1::get<0>(t), &ss); + strings->push_back(ss.str()); + } +}; + +// Helper function for printing a tuple. T must be instantiated with +// a tuple type. +template +void PrintTupleTo(const T& t, ::std::ostream* os) { + *os << "("; + TuplePrefixPrinter< ::std::tr1::tuple_size::value>:: + PrintPrefixTo(t, os); + *os << ")"; +} + +// Prints the fields of a tuple tersely to a string vector, one +// element for each field. See the comment before +// UniversalTersePrint() for how we define "tersely". +template +Strings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) { + Strings result; + TuplePrefixPrinter< ::std::tr1::tuple_size::value>:: + TersePrintPrefixToStrings(value, &result); + return result; +} +#endif // GTEST_HAS_TR1_TUPLE + +} // namespace internal + +template +::std::string PrintToString(const T& value) { + ::std::stringstream ss; + internal::UniversalTersePrint(value, &ss); + return ss.str(); +} + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ + +#if GTEST_HAS_PARAM_TEST + +namespace testing { +namespace internal { + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Outputs a message explaining invalid registration of different +// fixture class for the same test case. This may happen when +// TEST_P macro is used to define two tests with the same name +// but in different namespaces. +GTEST_API_ void ReportInvalidTestCaseType(const char* test_case_name, + const char* file, int line); + +template class ParamGeneratorInterface; +template class ParamGenerator; + +// Interface for iterating over elements provided by an implementation +// of ParamGeneratorInterface. +template +class ParamIteratorInterface { + public: + virtual ~ParamIteratorInterface() {} + // A pointer to the base generator instance. + // Used only for the purposes of iterator comparison + // to make sure that two iterators belong to the same generator. + virtual const ParamGeneratorInterface* BaseGenerator() const = 0; + // Advances iterator to point to the next element + // provided by the generator. The caller is responsible + // for not calling Advance() on an iterator equal to + // BaseGenerator()->End(). + virtual void Advance() = 0; + // Clones the iterator object. Used for implementing copy semantics + // of ParamIterator. + virtual ParamIteratorInterface* Clone() const = 0; + // Dereferences the current iterator and provides (read-only) access + // to the pointed value. It is the caller's responsibility not to call + // Current() on an iterator equal to BaseGenerator()->End(). + // Used for implementing ParamGenerator::operator*(). + virtual const T* Current() const = 0; + // Determines whether the given iterator and other point to the same + // element in the sequence generated by the generator. + // Used for implementing ParamGenerator::operator==(). + virtual bool Equals(const ParamIteratorInterface& other) const = 0; +}; + +// Class iterating over elements provided by an implementation of +// ParamGeneratorInterface. It wraps ParamIteratorInterface +// and implements the const forward iterator concept. +template +class ParamIterator { + public: + typedef T value_type; + typedef const T& reference; + typedef ptrdiff_t difference_type; + + // ParamIterator assumes ownership of the impl_ pointer. + ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {} + ParamIterator& operator=(const ParamIterator& other) { + if (this != &other) + impl_.reset(other.impl_->Clone()); + return *this; + } + + const T& operator*() const { return *impl_->Current(); } + const T* operator->() const { return impl_->Current(); } + // Prefix version of operator++. + ParamIterator& operator++() { + impl_->Advance(); + return *this; + } + // Postfix version of operator++. + ParamIterator operator++(int /*unused*/) { + ParamIteratorInterface* clone = impl_->Clone(); + impl_->Advance(); + return ParamIterator(clone); + } + bool operator==(const ParamIterator& other) const { + return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_); + } + bool operator!=(const ParamIterator& other) const { + return !(*this == other); + } + + private: + friend class ParamGenerator; + explicit ParamIterator(ParamIteratorInterface* impl) : impl_(impl) {} + scoped_ptr > impl_; +}; + +// ParamGeneratorInterface is the binary interface to access generators +// defined in other translation units. +template +class ParamGeneratorInterface { + public: + typedef T ParamType; + + virtual ~ParamGeneratorInterface() {} + + // Generator interface definition + virtual ParamIteratorInterface* Begin() const = 0; + virtual ParamIteratorInterface* End() const = 0; +}; + +// Wraps ParamGeneratorInterface and provides general generator syntax +// compatible with the STL Container concept. +// This class implements copy initialization semantics and the contained +// ParamGeneratorInterface instance is shared among all copies +// of the original object. This is possible because that instance is immutable. +template +class ParamGenerator { + public: + typedef ParamIterator iterator; + + explicit ParamGenerator(ParamGeneratorInterface* impl) : impl_(impl) {} + ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {} + + ParamGenerator& operator=(const ParamGenerator& other) { + impl_ = other.impl_; + return *this; + } + + iterator begin() const { return iterator(impl_->Begin()); } + iterator end() const { return iterator(impl_->End()); } + + private: + linked_ptr > impl_; +}; + +// Generates values from a range of two comparable values. Can be used to +// generate sequences of user-defined types that implement operator+() and +// operator<(). +// This class is used in the Range() function. +template +class RangeGenerator : public ParamGeneratorInterface { + public: + RangeGenerator(T begin, T end, IncrementT step) + : begin_(begin), end_(end), + step_(step), end_index_(CalculateEndIndex(begin, end, step)) {} + virtual ~RangeGenerator() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, begin_, 0, step_); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, end_, end_index_, step_); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, T value, int index, + IncrementT step) + : base_(base), value_(value), index_(index), step_(step) {} + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + virtual void Advance() { + value_ = value_ + step_; + index_++; + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const T* Current() const { return &value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const int other_index = + CheckedDowncastToActualType(&other)->index_; + return index_ == other_index; + } + + private: + Iterator(const Iterator& other) + : ParamIteratorInterface(), + base_(other.base_), value_(other.value_), index_(other.index_), + step_(other.step_) {} + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + T value_; + int index_; + const IncrementT step_; + }; // class RangeGenerator::Iterator + + static int CalculateEndIndex(const T& begin, + const T& end, + const IncrementT& step) { + int end_index = 0; + for (T i = begin; i < end; i = i + step) + end_index++; + return end_index; + } + + // No implementation - assignment is unsupported. + void operator=(const RangeGenerator& other); + + const T begin_; + const T end_; + const IncrementT step_; + // The index for the end() iterator. All the elements in the generated + // sequence are indexed (0-based) to aid iterator comparison. + const int end_index_; +}; // class RangeGenerator + + +// Generates values from a pair of STL-style iterators. Used in the +// ValuesIn() function. The elements are copied from the source range +// since the source can be located on the stack, and the generator +// is likely to persist beyond that stack frame. +template +class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface { + public: + template + ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end) + : container_(begin, end) {} + virtual ~ValuesInIteratorRangeGenerator() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, container_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, container_.end()); + } + + private: + typedef typename ::std::vector ContainerType; + + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + typename ContainerType::const_iterator iterator) + : base_(base), iterator_(iterator) {} + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + virtual void Advance() { + ++iterator_; + value_.reset(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + // We need to use cached value referenced by iterator_ because *iterator_ + // can return a temporary object (and of type other then T), so just + // having "return &*iterator_;" doesn't work. + // value_ is updated here and not in Advance() because Advance() + // can advance iterator_ beyond the end of the range, and we cannot + // detect that fact. The client code, on the other hand, is + // responsible for not calling Current() on an out-of-range iterator. + virtual const T* Current() const { + if (value_.get() == NULL) + value_.reset(new T(*iterator_)); + return value_.get(); + } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + return iterator_ == + CheckedDowncastToActualType(&other)->iterator_; + } + + private: + Iterator(const Iterator& other) + // The explicit constructor call suppresses a false warning + // emitted by gcc when supplied with the -Wextra option. + : ParamIteratorInterface(), + base_(other.base_), + iterator_(other.iterator_) {} + + const ParamGeneratorInterface* const base_; + typename ContainerType::const_iterator iterator_; + // A cached value of *iterator_. We keep it here to allow access by + // pointer in the wrapping iterator's operator->(). + // value_ needs to be mutable to be accessed in Current(). + // Use of scoped_ptr helps manage cached value's lifetime, + // which is bound by the lifespan of the iterator itself. + mutable scoped_ptr value_; + }; // class ValuesInIteratorRangeGenerator::Iterator + + // No implementation - assignment is unsupported. + void operator=(const ValuesInIteratorRangeGenerator& other); + + const ContainerType container_; +}; // class ValuesInIteratorRangeGenerator + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Stores a parameter value and later creates tests parameterized with that +// value. +template +class ParameterizedTestFactory : public TestFactoryBase { + public: + typedef typename TestClass::ParamType ParamType; + explicit ParameterizedTestFactory(ParamType parameter) : + parameter_(parameter) {} + virtual Test* CreateTest() { + TestClass::SetParam(¶meter_); + return new TestClass(); + } + + private: + const ParamType parameter_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// TestMetaFactoryBase is a base class for meta-factories that create +// test factories for passing into MakeAndRegisterTestInfo function. +template +class TestMetaFactoryBase { + public: + virtual ~TestMetaFactoryBase() {} + + virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0; +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// TestMetaFactory creates test factories for passing into +// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives +// ownership of test factory pointer, same factory object cannot be passed +// into that method twice. But ParameterizedTestCaseInfo is going to call +// it for each Test/Parameter value combination. Thus it needs meta factory +// creator class. +template +class TestMetaFactory + : public TestMetaFactoryBase { + public: + typedef typename TestCase::ParamType ParamType; + + TestMetaFactory() {} + + virtual TestFactoryBase* CreateTestFactory(ParamType parameter) { + return new ParameterizedTestFactory(parameter); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestCaseInfoBase is a generic interface +// to ParameterizedTestCaseInfo classes. ParameterizedTestCaseInfoBase +// accumulates test information provided by TEST_P macro invocations +// and generators provided by INSTANTIATE_TEST_CASE_P macro invocations +// and uses that information to register all resulting test instances +// in RegisterTests method. The ParameterizeTestCaseRegistry class holds +// a collection of pointers to the ParameterizedTestCaseInfo objects +// and calls RegisterTests() on each of them when asked. +class ParameterizedTestCaseInfoBase { + public: + virtual ~ParameterizedTestCaseInfoBase() {} + + // Base part of test case name for display purposes. + virtual const string& GetTestCaseName() const = 0; + // Test case id to verify identity. + virtual TypeId GetTestCaseTypeId() const = 0; + // UnitTest class invokes this method to register tests in this + // test case right before running them in RUN_ALL_TESTS macro. + // This method should not be called more then once on any single + // instance of a ParameterizedTestCaseInfoBase derived class. + virtual void RegisterTests() = 0; + + protected: + ParameterizedTestCaseInfoBase() {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfoBase); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestCaseInfo accumulates tests obtained from TEST_P +// macro invocations for a particular test case and generators +// obtained from INSTANTIATE_TEST_CASE_P macro invocations for that +// test case. It registers tests with all values generated by all +// generators when asked. +template +class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase { + public: + // ParamType and GeneratorCreationFunc are private types but are required + // for declarations of public methods AddTestPattern() and + // AddTestCaseInstantiation(). + typedef typename TestCase::ParamType ParamType; + // A function that returns an instance of appropriate generator type. + typedef ParamGenerator(GeneratorCreationFunc)(); + + explicit ParameterizedTestCaseInfo(const char* name) + : test_case_name_(name) {} + + // Test case base name for display purposes. + virtual const string& GetTestCaseName() const { return test_case_name_; } + // Test case id to verify identity. + virtual TypeId GetTestCaseTypeId() const { return GetTypeId(); } + // TEST_P macro uses AddTestPattern() to record information + // about a single test in a LocalTestInfo structure. + // test_case_name is the base name of the test case (without invocation + // prefix). test_base_name is the name of an individual test without + // parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is + // test case base name and DoBar is test base name. + void AddTestPattern(const char* test_case_name, + const char* test_base_name, + TestMetaFactoryBase* meta_factory) { + tests_.push_back(linked_ptr(new TestInfo(test_case_name, + test_base_name, + meta_factory))); + } + // INSTANTIATE_TEST_CASE_P macro uses AddGenerator() to record information + // about a generator. + int AddTestCaseInstantiation(const string& instantiation_name, + GeneratorCreationFunc* func, + const char* /* file */, + int /* line */) { + instantiations_.push_back(::std::make_pair(instantiation_name, func)); + return 0; // Return value used only to run this method in namespace scope. + } + // UnitTest class invokes this method to register tests in this test case + // test cases right before running tests in RUN_ALL_TESTS macro. + // This method should not be called more then once on any single + // instance of a ParameterizedTestCaseInfoBase derived class. + // UnitTest has a guard to prevent from calling this method more then once. + virtual void RegisterTests() { + for (typename TestInfoContainer::iterator test_it = tests_.begin(); + test_it != tests_.end(); ++test_it) { + linked_ptr test_info = *test_it; + for (typename InstantiationContainer::iterator gen_it = + instantiations_.begin(); gen_it != instantiations_.end(); + ++gen_it) { + const string& instantiation_name = gen_it->first; + ParamGenerator generator((*gen_it->second)()); + + Message test_case_name_stream; + if ( !instantiation_name.empty() ) + test_case_name_stream << instantiation_name << "/"; + test_case_name_stream << test_info->test_case_base_name; + + int i = 0; + for (typename ParamGenerator::iterator param_it = + generator.begin(); + param_it != generator.end(); ++param_it, ++i) { + Message test_name_stream; + test_name_stream << test_info->test_base_name << "/" << i; + MakeAndRegisterTestInfo( + test_case_name_stream.GetString().c_str(), + test_name_stream.GetString().c_str(), + NULL, // No type parameter. + PrintToString(*param_it).c_str(), + GetTestCaseTypeId(), + TestCase::SetUpTestCase, + TestCase::TearDownTestCase, + test_info->test_meta_factory->CreateTestFactory(*param_it)); + } // for param_it + } // for gen_it + } // for test_it + } // RegisterTests + + private: + // LocalTestInfo structure keeps information about a single test registered + // with TEST_P macro. + struct TestInfo { + TestInfo(const char* a_test_case_base_name, + const char* a_test_base_name, + TestMetaFactoryBase* a_test_meta_factory) : + test_case_base_name(a_test_case_base_name), + test_base_name(a_test_base_name), + test_meta_factory(a_test_meta_factory) {} + + const string test_case_base_name; + const string test_base_name; + const scoped_ptr > test_meta_factory; + }; + typedef ::std::vector > TestInfoContainer; + // Keeps pairs of + // received from INSTANTIATE_TEST_CASE_P macros. + typedef ::std::vector > + InstantiationContainer; + + const string test_case_name_; + TestInfoContainer tests_; + InstantiationContainer instantiations_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfo); +}; // class ParameterizedTestCaseInfo + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestCaseRegistry contains a map of ParameterizedTestCaseInfoBase +// classes accessed by test case names. TEST_P and INSTANTIATE_TEST_CASE_P +// macros use it to locate their corresponding ParameterizedTestCaseInfo +// descriptors. +class ParameterizedTestCaseRegistry { + public: + ParameterizedTestCaseRegistry() {} + ~ParameterizedTestCaseRegistry() { + for (TestCaseInfoContainer::iterator it = test_case_infos_.begin(); + it != test_case_infos_.end(); ++it) { + delete *it; + } + } + + // Looks up or creates and returns a structure containing information about + // tests and instantiations of a particular test case. + template + ParameterizedTestCaseInfo* GetTestCasePatternHolder( + const char* test_case_name, + const char* file, + int line) { + ParameterizedTestCaseInfo* typed_test_info = NULL; + for (TestCaseInfoContainer::iterator it = test_case_infos_.begin(); + it != test_case_infos_.end(); ++it) { + if ((*it)->GetTestCaseName() == test_case_name) { + if ((*it)->GetTestCaseTypeId() != GetTypeId()) { + // Complain about incorrect usage of Google Test facilities + // and terminate the program since we cannot guaranty correct + // test case setup and tear-down in this case. + ReportInvalidTestCaseType(test_case_name, file, line); + posix::Abort(); + } else { + // At this point we are sure that the object we found is of the same + // type we are looking for, so we downcast it to that type + // without further checks. + typed_test_info = CheckedDowncastToActualType< + ParameterizedTestCaseInfo >(*it); + } + break; + } + } + if (typed_test_info == NULL) { + typed_test_info = new ParameterizedTestCaseInfo(test_case_name); + test_case_infos_.push_back(typed_test_info); + } + return typed_test_info; + } + void RegisterTests() { + for (TestCaseInfoContainer::iterator it = test_case_infos_.begin(); + it != test_case_infos_.end(); ++it) { + (*it)->RegisterTests(); + } + } + + private: + typedef ::std::vector TestCaseInfoContainer; + + TestCaseInfoContainer test_case_infos_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseRegistry); +}; + +} // namespace internal +} // namespace testing + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ +// This file was GENERATED by command: +// pump.py gtest-param-util-generated.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) + +// Type and function utilities for implementing parameterized tests. +// This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +// Currently Google Test supports at most 50 arguments in Values, +// and at most 10 arguments in Combine. Please contact +// googletestframework@googlegroups.com if you need more. +// Please note that the number of arguments to Combine is limited +// by the maximum arity of the implementation of tr1::tuple which is +// currently set at 10. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_ + +// scripts/fuse_gtest.py depends on gtest's own header being #included +// *unconditionally*. Therefore these #includes cannot be moved +// inside #if GTEST_HAS_PARAM_TEST. + +#if GTEST_HAS_PARAM_TEST + +namespace testing { + +// Forward declarations of ValuesIn(), which is implemented in +// include/gtest/gtest-param-test.h. +template +internal::ParamGenerator< + typename ::testing::internal::IteratorTraits::value_type> +ValuesIn(ForwardIterator begin, ForwardIterator end); + +template +internal::ParamGenerator ValuesIn(const T (&array)[N]); + +template +internal::ParamGenerator ValuesIn( + const Container& container); + +namespace internal { + +// Used in the Values() function to provide polymorphic capabilities. +template +class ValueArray1 { + public: + explicit ValueArray1(T1 v1) : v1_(v1) {} + + template + operator ParamGenerator() const { return ValuesIn(&v1_, &v1_ + 1); } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray1& other); + + const T1 v1_; +}; + +template +class ValueArray2 { + public: + ValueArray2(T1 v1, T2 v2) : v1_(v1), v2_(v2) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray2& other); + + const T1 v1_; + const T2 v2_; +}; + +template +class ValueArray3 { + public: + ValueArray3(T1 v1, T2 v2, T3 v3) : v1_(v1), v2_(v2), v3_(v3) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray3& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; +}; + +template +class ValueArray4 { + public: + ValueArray4(T1 v1, T2 v2, T3 v3, T4 v4) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray4& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; +}; + +template +class ValueArray5 { + public: + ValueArray5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray5& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; +}; + +template +class ValueArray6 { + public: + ValueArray6(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray6& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; +}; + +template +class ValueArray7 { + public: + ValueArray7(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray7& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; +}; + +template +class ValueArray8 { + public: + ValueArray8(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray8& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; +}; + +template +class ValueArray9 { + public: + ValueArray9(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray9& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; +}; + +template +class ValueArray10 { + public: + ValueArray10(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray10& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; +}; + +template +class ValueArray11 { + public: + ValueArray11(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray11& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; +}; + +template +class ValueArray12 { + public: + ValueArray12(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray12& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; +}; + +template +class ValueArray13 { + public: + ValueArray13(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray13& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; +}; + +template +class ValueArray14 { + public: + ValueArray14(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray14& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; +}; + +template +class ValueArray15 { + public: + ValueArray15(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray15& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; +}; + +template +class ValueArray16 { + public: + ValueArray16(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray16& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; +}; + +template +class ValueArray17 { + public: + ValueArray17(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, + T17 v17) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray17& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; +}; + +template +class ValueArray18 { + public: + ValueArray18(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray18& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; +}; + +template +class ValueArray19 { + public: + ValueArray19(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray19& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; +}; + +template +class ValueArray20 { + public: + ValueArray20(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray20& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; +}; + +template +class ValueArray21 { + public: + ValueArray21(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray21& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; +}; + +template +class ValueArray22 { + public: + ValueArray22(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray22& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; +}; + +template +class ValueArray23 { + public: + ValueArray23(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, + v23_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray23& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; +}; + +template +class ValueArray24 { + public: + ValueArray24(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray24& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; +}; + +template +class ValueArray25 { + public: + ValueArray25(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, + T25 v25) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray25& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; +}; + +template +class ValueArray26 { + public: + ValueArray26(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray26& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; +}; + +template +class ValueArray27 { + public: + ValueArray27(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), + v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), + v26_(v26), v27_(v27) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray27& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; +}; + +template +class ValueArray28 { + public: + ValueArray28(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), + v25_(v25), v26_(v26), v27_(v27), v28_(v28) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray28& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; +}; + +template +class ValueArray29 { + public: + ValueArray29(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), + v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray29& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; +}; + +template +class ValueArray30 { + public: + ValueArray30(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray30& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; +}; + +template +class ValueArray31 { + public: + ValueArray31(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray31& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; +}; + +template +class ValueArray32 { + public: + ValueArray32(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), + v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray32& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; +}; + +template +class ValueArray33 { + public: + ValueArray33(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, + T33 v33) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray33& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; +}; + +template +class ValueArray34 { + public: + ValueArray34(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray34& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; +}; + +template +class ValueArray35 { + public: + ValueArray35(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), + v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), + v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), + v32_(v32), v33_(v33), v34_(v34), v35_(v35) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, + v35_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray35& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; +}; + +template +class ValueArray36 { + public: + ValueArray36(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), + v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), + v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray36& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; +}; + +template +class ValueArray37 { + public: + ValueArray37(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), + v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), + v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), + v36_(v36), v37_(v37) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray37& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; +}; + +template +class ValueArray38 { + public: + ValueArray38(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray38& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; +}; + +template +class ValueArray39 { + public: + ValueArray39(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray39& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; +}; + +template +class ValueArray40 { + public: + ValueArray40(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), + v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), + v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), + v40_(v40) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray40& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; +}; + +template +class ValueArray41 { + public: + ValueArray41(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, + T41 v41) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray41& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; +}; + +template +class ValueArray42 { + public: + ValueArray42(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41), v42_(v42) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray42& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; +}; + +template +class ValueArray43 { + public: + ValueArray43(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), + v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), + v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), + v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), + v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray43& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; +}; + +template +class ValueArray44 { + public: + ValueArray44(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), + v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), + v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), + v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), + v43_(v43), v44_(v44) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray44& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; +}; + +template +class ValueArray45 { + public: + ValueArray45(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), + v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), + v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), + v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), + v42_(v42), v43_(v43), v44_(v44), v45_(v45) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray45& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; +}; + +template +class ValueArray46 { + public: + ValueArray46(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), + v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray46& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; +}; + +template +class ValueArray47 { + public: + ValueArray47(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), + v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46), + v47_(v47) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, + v47_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray47& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; +}; + +template +class ValueArray48 { + public: + ValueArray48(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), + v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), + v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), + v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), + v46_(v46), v47_(v47), v48_(v48) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_, + v48_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray48& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; + const T48 v48_; +}; + +template +class ValueArray49 { + public: + ValueArray49(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, + T49 v49) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), + v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_, + v48_, v49_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray49& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; + const T48 v48_; + const T49 v49_; +}; + +template +class ValueArray50 { + public: + ValueArray50(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, T49 v49, + T50 v50) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), + v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49), v50_(v50) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_, + v48_, v49_, v50_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray50& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; + const T48 v48_; + const T49 v49_; + const T50 v50_; +}; + +# if GTEST_HAS_COMBINE +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Generates values from the Cartesian product of values produced +// by the argument generators. +// +template +class CartesianProductGenerator2 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator2(const ParamGenerator& g1, + const ParamGenerator& g2) + : g1_(g1), g2_(g2) {} + virtual ~CartesianProductGenerator2() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current2_; + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + ParamType current_value_; + }; // class CartesianProductGenerator2::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator2& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; +}; // class CartesianProductGenerator2 + + +template +class CartesianProductGenerator3 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator3(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3) + : g1_(g1), g2_(g2), g3_(g3) {} + virtual ~CartesianProductGenerator3() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current3_; + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + ParamType current_value_; + }; // class CartesianProductGenerator3::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator3& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; +}; // class CartesianProductGenerator3 + + +template +class CartesianProductGenerator4 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator4(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {} + virtual ~CartesianProductGenerator4() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current4_; + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + ParamType current_value_; + }; // class CartesianProductGenerator4::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator4& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; +}; // class CartesianProductGenerator4 + + +template +class CartesianProductGenerator5 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator5(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {} + virtual ~CartesianProductGenerator5() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current5_; + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + ParamType current_value_; + }; // class CartesianProductGenerator5::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator5& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; +}; // class CartesianProductGenerator5 + + +template +class CartesianProductGenerator6 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator6(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {} + virtual ~CartesianProductGenerator6() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current6_; + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + ParamType current_value_; + }; // class CartesianProductGenerator6::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator6& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; +}; // class CartesianProductGenerator6 + + +template +class CartesianProductGenerator7 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator7(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {} + virtual ~CartesianProductGenerator7() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current7_; + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + ParamType current_value_; + }; // class CartesianProductGenerator7::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator7& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; +}; // class CartesianProductGenerator7 + + +template +class CartesianProductGenerator8 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator8(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7, + const ParamGenerator& g8) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), + g8_(g8) {} + virtual ~CartesianProductGenerator8() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin(), g8_, g8_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_, + g8_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7, + const ParamGenerator& g8, + const typename ParamGenerator::iterator& current8) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7), + begin8_(g8.begin()), end8_(g8.end()), current8_(current8) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current8_; + if (current8_ == end8_) { + current8_ = begin8_; + ++current7_; + } + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_ && + current8_ == typed_other->current8_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_), + begin8_(other.begin8_), + end8_(other.end8_), + current8_(other.current8_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_, *current8_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_ || + current8_ == end8_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + const typename ParamGenerator::iterator begin8_; + const typename ParamGenerator::iterator end8_; + typename ParamGenerator::iterator current8_; + ParamType current_value_; + }; // class CartesianProductGenerator8::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator8& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; + const ParamGenerator g8_; +}; // class CartesianProductGenerator8 + + +template +class CartesianProductGenerator9 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator9(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7, + const ParamGenerator& g8, const ParamGenerator& g9) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9) {} + virtual ~CartesianProductGenerator9() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_, + g8_.end(), g9_, g9_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7, + const ParamGenerator& g8, + const typename ParamGenerator::iterator& current8, + const ParamGenerator& g9, + const typename ParamGenerator::iterator& current9) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7), + begin8_(g8.begin()), end8_(g8.end()), current8_(current8), + begin9_(g9.begin()), end9_(g9.end()), current9_(current9) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current9_; + if (current9_ == end9_) { + current9_ = begin9_; + ++current8_; + } + if (current8_ == end8_) { + current8_ = begin8_; + ++current7_; + } + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_ && + current8_ == typed_other->current8_ && + current9_ == typed_other->current9_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_), + begin8_(other.begin8_), + end8_(other.end8_), + current8_(other.current8_), + begin9_(other.begin9_), + end9_(other.end9_), + current9_(other.current9_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_, *current8_, + *current9_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_ || + current8_ == end8_ || + current9_ == end9_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + const typename ParamGenerator::iterator begin8_; + const typename ParamGenerator::iterator end8_; + typename ParamGenerator::iterator current8_; + const typename ParamGenerator::iterator begin9_; + const typename ParamGenerator::iterator end9_; + typename ParamGenerator::iterator current9_; + ParamType current_value_; + }; // class CartesianProductGenerator9::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator9& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; + const ParamGenerator g8_; + const ParamGenerator g9_; +}; // class CartesianProductGenerator9 + + +template +class CartesianProductGenerator10 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator10(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7, + const ParamGenerator& g8, const ParamGenerator& g9, + const ParamGenerator& g10) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9), g10_(g10) {} + virtual ~CartesianProductGenerator10() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin(), g10_, g10_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_, + g8_.end(), g9_, g9_.end(), g10_, g10_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7, + const ParamGenerator& g8, + const typename ParamGenerator::iterator& current8, + const ParamGenerator& g9, + const typename ParamGenerator::iterator& current9, + const ParamGenerator& g10, + const typename ParamGenerator::iterator& current10) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7), + begin8_(g8.begin()), end8_(g8.end()), current8_(current8), + begin9_(g9.begin()), end9_(g9.end()), current9_(current9), + begin10_(g10.begin()), end10_(g10.end()), current10_(current10) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current10_; + if (current10_ == end10_) { + current10_ = begin10_; + ++current9_; + } + if (current9_ == end9_) { + current9_ = begin9_; + ++current8_; + } + if (current8_ == end8_) { + current8_ = begin8_; + ++current7_; + } + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_ && + current8_ == typed_other->current8_ && + current9_ == typed_other->current9_ && + current10_ == typed_other->current10_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_), + begin8_(other.begin8_), + end8_(other.end8_), + current8_(other.current8_), + begin9_(other.begin9_), + end9_(other.end9_), + current9_(other.current9_), + begin10_(other.begin10_), + end10_(other.end10_), + current10_(other.current10_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_, *current8_, + *current9_, *current10_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_ || + current8_ == end8_ || + current9_ == end9_ || + current10_ == end10_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + const typename ParamGenerator::iterator begin8_; + const typename ParamGenerator::iterator end8_; + typename ParamGenerator::iterator current8_; + const typename ParamGenerator::iterator begin9_; + const typename ParamGenerator::iterator end9_; + typename ParamGenerator::iterator current9_; + const typename ParamGenerator::iterator begin10_; + const typename ParamGenerator::iterator end10_; + typename ParamGenerator::iterator current10_; + ParamType current_value_; + }; // class CartesianProductGenerator10::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator10& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; + const ParamGenerator g8_; + const ParamGenerator g9_; + const ParamGenerator g10_; +}; // class CartesianProductGenerator10 + + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Helper classes providing Combine() with polymorphic features. They allow +// casting CartesianProductGeneratorN to ParamGenerator if T is +// convertible to U. +// +template +class CartesianProductHolder2 { + public: +CartesianProductHolder2(const Generator1& g1, const Generator2& g2) + : g1_(g1), g2_(g2) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator2( + static_cast >(g1_), + static_cast >(g2_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder2& other); + + const Generator1 g1_; + const Generator2 g2_; +}; // class CartesianProductHolder2 + +template +class CartesianProductHolder3 { + public: +CartesianProductHolder3(const Generator1& g1, const Generator2& g2, + const Generator3& g3) + : g1_(g1), g2_(g2), g3_(g3) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator3( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder3& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; +}; // class CartesianProductHolder3 + +template +class CartesianProductHolder4 { + public: +CartesianProductHolder4(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator4( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder4& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; +}; // class CartesianProductHolder4 + +template +class CartesianProductHolder5 { + public: +CartesianProductHolder5(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator5( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder5& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; +}; // class CartesianProductHolder5 + +template +class CartesianProductHolder6 { + public: +CartesianProductHolder6(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator6( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder6& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; +}; // class CartesianProductHolder6 + +template +class CartesianProductHolder7 { + public: +CartesianProductHolder7(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator7( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder7& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; +}; // class CartesianProductHolder7 + +template +class CartesianProductHolder8 { + public: +CartesianProductHolder8(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7, const Generator8& g8) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), + g8_(g8) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator8( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_), + static_cast >(g8_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder8& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; + const Generator8 g8_; +}; // class CartesianProductHolder8 + +template +class CartesianProductHolder9 { + public: +CartesianProductHolder9(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7, const Generator8& g8, + const Generator9& g9) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator9( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_), + static_cast >(g8_), + static_cast >(g9_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder9& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; + const Generator8 g8_; + const Generator9 g9_; +}; // class CartesianProductHolder9 + +template +class CartesianProductHolder10 { + public: +CartesianProductHolder10(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7, const Generator8& g8, + const Generator9& g9, const Generator10& g10) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9), g10_(g10) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator10( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_), + static_cast >(g8_), + static_cast >(g9_), + static_cast >(g10_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder10& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; + const Generator8 g8_; + const Generator9 g9_; + const Generator10 g10_; +}; // class CartesianProductHolder10 + +# endif // GTEST_HAS_COMBINE + +} // namespace internal +} // namespace testing + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_ + +#if GTEST_HAS_PARAM_TEST + +namespace testing { + +// Functions producing parameter generators. +// +// Google Test uses these generators to produce parameters for value- +// parameterized tests. When a parameterized test case is instantiated +// with a particular generator, Google Test creates and runs tests +// for each element in the sequence produced by the generator. +// +// In the following sample, tests from test case FooTest are instantiated +// each three times with parameter values 3, 5, and 8: +// +// class FooTest : public TestWithParam { ... }; +// +// TEST_P(FooTest, TestThis) { +// } +// TEST_P(FooTest, TestThat) { +// } +// INSTANTIATE_TEST_CASE_P(TestSequence, FooTest, Values(3, 5, 8)); +// + +// Range() returns generators providing sequences of values in a range. +// +// Synopsis: +// Range(start, end) +// - returns a generator producing a sequence of values {start, start+1, +// start+2, ..., }. +// Range(start, end, step) +// - returns a generator producing a sequence of values {start, start+step, +// start+step+step, ..., }. +// Notes: +// * The generated sequences never include end. For example, Range(1, 5) +// returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2) +// returns a generator producing {1, 3, 5, 7}. +// * start and end must have the same type. That type may be any integral or +// floating-point type or a user defined type satisfying these conditions: +// * It must be assignable (have operator=() defined). +// * It must have operator+() (operator+(int-compatible type) for +// two-operand version). +// * It must have operator<() defined. +// Elements in the resulting sequences will also have that type. +// * Condition start < end must be satisfied in order for resulting sequences +// to contain any elements. +// +template +internal::ParamGenerator Range(T start, T end, IncrementT step) { + return internal::ParamGenerator( + new internal::RangeGenerator(start, end, step)); +} + +template +internal::ParamGenerator Range(T start, T end) { + return Range(start, end, 1); +} + +// ValuesIn() function allows generation of tests with parameters coming from +// a container. +// +// Synopsis: +// ValuesIn(const T (&array)[N]) +// - returns a generator producing sequences with elements from +// a C-style array. +// ValuesIn(const Container& container) +// - returns a generator producing sequences with elements from +// an STL-style container. +// ValuesIn(Iterator begin, Iterator end) +// - returns a generator producing sequences with elements from +// a range [begin, end) defined by a pair of STL-style iterators. These +// iterators can also be plain C pointers. +// +// Please note that ValuesIn copies the values from the containers +// passed in and keeps them to generate tests in RUN_ALL_TESTS(). +// +// Examples: +// +// This instantiates tests from test case StringTest +// each with C-string values of "foo", "bar", and "baz": +// +// const char* strings[] = {"foo", "bar", "baz"}; +// INSTANTIATE_TEST_CASE_P(StringSequence, SrtingTest, ValuesIn(strings)); +// +// This instantiates tests from test case StlStringTest +// each with STL strings with values "a" and "b": +// +// ::std::vector< ::std::string> GetParameterStrings() { +// ::std::vector< ::std::string> v; +// v.push_back("a"); +// v.push_back("b"); +// return v; +// } +// +// INSTANTIATE_TEST_CASE_P(CharSequence, +// StlStringTest, +// ValuesIn(GetParameterStrings())); +// +// +// This will also instantiate tests from CharTest +// each with parameter values 'a' and 'b': +// +// ::std::list GetParameterChars() { +// ::std::list list; +// list.push_back('a'); +// list.push_back('b'); +// return list; +// } +// ::std::list l = GetParameterChars(); +// INSTANTIATE_TEST_CASE_P(CharSequence2, +// CharTest, +// ValuesIn(l.begin(), l.end())); +// +template +internal::ParamGenerator< + typename ::testing::internal::IteratorTraits::value_type> +ValuesIn(ForwardIterator begin, ForwardIterator end) { + typedef typename ::testing::internal::IteratorTraits + ::value_type ParamType; + return internal::ParamGenerator( + new internal::ValuesInIteratorRangeGenerator(begin, end)); +} + +template +internal::ParamGenerator ValuesIn(const T (&array)[N]) { + return ValuesIn(array, array + N); +} + +template +internal::ParamGenerator ValuesIn( + const Container& container) { + return ValuesIn(container.begin(), container.end()); +} + +// Values() allows generating tests from explicitly specified list of +// parameters. +// +// Synopsis: +// Values(T v1, T v2, ..., T vN) +// - returns a generator producing sequences with elements v1, v2, ..., vN. +// +// For example, this instantiates tests from test case BarTest each +// with values "one", "two", and "three": +// +// INSTANTIATE_TEST_CASE_P(NumSequence, BarTest, Values("one", "two", "three")); +// +// This instantiates tests from test case BazTest each with values 1, 2, 3.5. +// The exact type of values will depend on the type of parameter in BazTest. +// +// INSTANTIATE_TEST_CASE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5)); +// +// Currently, Values() supports from 1 to 50 parameters. +// +template +internal::ValueArray1 Values(T1 v1) { + return internal::ValueArray1(v1); +} + +template +internal::ValueArray2 Values(T1 v1, T2 v2) { + return internal::ValueArray2(v1, v2); +} + +template +internal::ValueArray3 Values(T1 v1, T2 v2, T3 v3) { + return internal::ValueArray3(v1, v2, v3); +} + +template +internal::ValueArray4 Values(T1 v1, T2 v2, T3 v3, T4 v4) { + return internal::ValueArray4(v1, v2, v3, v4); +} + +template +internal::ValueArray5 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5) { + return internal::ValueArray5(v1, v2, v3, v4, v5); +} + +template +internal::ValueArray6 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6) { + return internal::ValueArray6(v1, v2, v3, v4, v5, v6); +} + +template +internal::ValueArray7 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6, T7 v7) { + return internal::ValueArray7(v1, v2, v3, v4, v5, + v6, v7); +} + +template +internal::ValueArray8 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8) { + return internal::ValueArray8(v1, v2, v3, v4, + v5, v6, v7, v8); +} + +template +internal::ValueArray9 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9) { + return internal::ValueArray9(v1, v2, v3, + v4, v5, v6, v7, v8, v9); +} + +template +internal::ValueArray10 Values(T1 v1, + T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10) { + return internal::ValueArray10(v1, + v2, v3, v4, v5, v6, v7, v8, v9, v10); +} + +template +internal::ValueArray11 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11) { + return internal::ValueArray11(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11); +} + +template +internal::ValueArray12 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12) { + return internal::ValueArray12(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12); +} + +template +internal::ValueArray13 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13) { + return internal::ValueArray13(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13); +} + +template +internal::ValueArray14 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) { + return internal::ValueArray14(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, + v14); +} + +template +internal::ValueArray15 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) { + return internal::ValueArray15(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, + v13, v14, v15); +} + +template +internal::ValueArray16 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16) { + return internal::ValueArray16(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, + v12, v13, v14, v15, v16); +} + +template +internal::ValueArray17 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17) { + return internal::ValueArray17(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, + v11, v12, v13, v14, v15, v16, v17); +} + +template +internal::ValueArray18 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, + T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18) { + return internal::ValueArray18(v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18); +} + +template +internal::ValueArray19 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, + T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, + T15 v15, T16 v16, T17 v17, T18 v18, T19 v19) { + return internal::ValueArray19(v1, v2, v3, v4, v5, v6, v7, v8, + v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19); +} + +template +internal::ValueArray20 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20) { + return internal::ValueArray20(v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20); +} + +template +internal::ValueArray21 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21) { + return internal::ValueArray21(v1, v2, v3, v4, v5, v6, + v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21); +} + +template +internal::ValueArray22 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22) { + return internal::ValueArray22(v1, v2, v3, v4, + v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22); +} + +template +internal::ValueArray23 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23) { + return internal::ValueArray23(v1, v2, v3, + v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23); +} + +template +internal::ValueArray24 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24) { + return internal::ValueArray24(v1, v2, + v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, + v19, v20, v21, v22, v23, v24); +} + +template +internal::ValueArray25 Values(T1 v1, + T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, + T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, + T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25) { + return internal::ValueArray25(v1, + v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, + v18, v19, v20, v21, v22, v23, v24, v25); +} + +template +internal::ValueArray26 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26) { + return internal::ValueArray26(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, + v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26); +} + +template +internal::ValueArray27 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27) { + return internal::ValueArray27(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, + v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27); +} + +template +internal::ValueArray28 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28) { + return internal::ValueArray28(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, + v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, + v28); +} + +template +internal::ValueArray29 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29) { + return internal::ValueArray29(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, + v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, + v27, v28, v29); +} + +template +internal::ValueArray30 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, + T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, + T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) { + return internal::ValueArray30(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, + v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, + v26, v27, v28, v29, v30); +} + +template +internal::ValueArray31 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) { + return internal::ValueArray31(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, + v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, + v25, v26, v27, v28, v29, v30, v31); +} + +template +internal::ValueArray32 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32) { + return internal::ValueArray32(v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32); +} + +template +internal::ValueArray33 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, + T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33) { + return internal::ValueArray33(v1, v2, v3, v4, v5, v6, v7, v8, + v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32, v33); +} + +template +internal::ValueArray34 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, + T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, + T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, + T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, + T31 v31, T32 v32, T33 v33, T34 v34) { + return internal::ValueArray34(v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, + v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34); +} + +template +internal::ValueArray35 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, + T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, + T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35) { + return internal::ValueArray35(v1, v2, v3, v4, v5, v6, + v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, + v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35); +} + +template +internal::ValueArray36 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, + T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, + T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36) { + return internal::ValueArray36(v1, v2, v3, v4, + v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, + v34, v35, v36); +} + +template +internal::ValueArray37 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, + T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, + T37 v37) { + return internal::ValueArray37(v1, v2, v3, + v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, + v34, v35, v36, v37); +} + +template +internal::ValueArray38 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, + T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, + T37 v37, T38 v38) { + return internal::ValueArray38(v1, v2, + v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, + v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, + v33, v34, v35, v36, v37, v38); +} + +template +internal::ValueArray39 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, + T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, + T37 v37, T38 v38, T39 v39) { + return internal::ValueArray39(v1, + v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, + v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, + v32, v33, v34, v35, v36, v37, v38, v39); +} + +template +internal::ValueArray40 Values(T1 v1, + T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, + T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, + T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, + T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, + T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) { + return internal::ValueArray40(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, + v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, + v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40); +} + +template +internal::ValueArray41 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41) { + return internal::ValueArray41(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, + v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, + v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41); +} + +template +internal::ValueArray42 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42) { + return internal::ValueArray42(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, + v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, + v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, + v42); +} + +template +internal::ValueArray43 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43) { + return internal::ValueArray43(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, + v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, + v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, + v41, v42, v43); +} + +template +internal::ValueArray44 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44) { + return internal::ValueArray44(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, + v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, + v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, + v40, v41, v42, v43, v44); +} + +template +internal::ValueArray45 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, + T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, + T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, + T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, + T41 v41, T42 v42, T43 v43, T44 v44, T45 v45) { + return internal::ValueArray45(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, + v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, + v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, + v39, v40, v41, v42, v43, v44, v45); +} + +template +internal::ValueArray46 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, + T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) { + return internal::ValueArray46(v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, + v38, v39, v40, v41, v42, v43, v44, v45, v46); +} + +template +internal::ValueArray47 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, + T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) { + return internal::ValueArray47(v1, v2, v3, v4, v5, v6, v7, v8, + v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, + v38, v39, v40, v41, v42, v43, v44, v45, v46, v47); +} + +template +internal::ValueArray48 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, + T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, + T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, + T48 v48) { + return internal::ValueArray48(v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, + v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, + v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48); +} + +template +internal::ValueArray49 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, + T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, + T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, + T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, + T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, + T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, + T47 v47, T48 v48, T49 v49) { + return internal::ValueArray49(v1, v2, v3, v4, v5, v6, + v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, + v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, + v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49); +} + +template +internal::ValueArray50 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, + T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, + T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, + T38 v38, T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, + T46 v46, T47 v47, T48 v48, T49 v49, T50 v50) { + return internal::ValueArray50(v1, v2, v3, v4, + v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, + v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, + v48, v49, v50); +} + +// Bool() allows generating tests with parameters in a set of (false, true). +// +// Synopsis: +// Bool() +// - returns a generator producing sequences with elements {false, true}. +// +// It is useful when testing code that depends on Boolean flags. Combinations +// of multiple flags can be tested when several Bool()'s are combined using +// Combine() function. +// +// In the following example all tests in the test case FlagDependentTest +// will be instantiated twice with parameters false and true. +// +// class FlagDependentTest : public testing::TestWithParam { +// virtual void SetUp() { +// external_flag = GetParam(); +// } +// } +// INSTANTIATE_TEST_CASE_P(BoolSequence, FlagDependentTest, Bool()); +// +inline internal::ParamGenerator Bool() { + return Values(false, true); +} + +# if GTEST_HAS_COMBINE +// Combine() allows the user to combine two or more sequences to produce +// values of a Cartesian product of those sequences' elements. +// +// Synopsis: +// Combine(gen1, gen2, ..., genN) +// - returns a generator producing sequences with elements coming from +// the Cartesian product of elements from the sequences generated by +// gen1, gen2, ..., genN. The sequence elements will have a type of +// tuple where T1, T2, ..., TN are the types +// of elements from sequences produces by gen1, gen2, ..., genN. +// +// Combine can have up to 10 arguments. This number is currently limited +// by the maximum number of elements in the tuple implementation used by Google +// Test. +// +// Example: +// +// This will instantiate tests in test case AnimalTest each one with +// the parameter values tuple("cat", BLACK), tuple("cat", WHITE), +// tuple("dog", BLACK), and tuple("dog", WHITE): +// +// enum Color { BLACK, GRAY, WHITE }; +// class AnimalTest +// : public testing::TestWithParam > {...}; +// +// TEST_P(AnimalTest, AnimalLooksNice) {...} +// +// INSTANTIATE_TEST_CASE_P(AnimalVariations, AnimalTest, +// Combine(Values("cat", "dog"), +// Values(BLACK, WHITE))); +// +// This will instantiate tests in FlagDependentTest with all variations of two +// Boolean flags: +// +// class FlagDependentTest +// : public testing::TestWithParam > { +// virtual void SetUp() { +// // Assigns external_flag_1 and external_flag_2 values from the tuple. +// tie(external_flag_1, external_flag_2) = GetParam(); +// } +// }; +// +// TEST_P(FlagDependentTest, TestFeature1) { +// // Test your code using external_flag_1 and external_flag_2 here. +// } +// INSTANTIATE_TEST_CASE_P(TwoBoolSequence, FlagDependentTest, +// Combine(Bool(), Bool())); +// +template +internal::CartesianProductHolder2 Combine( + const Generator1& g1, const Generator2& g2) { + return internal::CartesianProductHolder2( + g1, g2); +} + +template +internal::CartesianProductHolder3 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3) { + return internal::CartesianProductHolder3( + g1, g2, g3); +} + +template +internal::CartesianProductHolder4 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4) { + return internal::CartesianProductHolder4( + g1, g2, g3, g4); +} + +template +internal::CartesianProductHolder5 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5) { + return internal::CartesianProductHolder5( + g1, g2, g3, g4, g5); +} + +template +internal::CartesianProductHolder6 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6) { + return internal::CartesianProductHolder6( + g1, g2, g3, g4, g5, g6); +} + +template +internal::CartesianProductHolder7 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7) { + return internal::CartesianProductHolder7( + g1, g2, g3, g4, g5, g6, g7); +} + +template +internal::CartesianProductHolder8 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7, const Generator8& g8) { + return internal::CartesianProductHolder8( + g1, g2, g3, g4, g5, g6, g7, g8); +} + +template +internal::CartesianProductHolder9 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7, const Generator8& g8, const Generator9& g9) { + return internal::CartesianProductHolder9( + g1, g2, g3, g4, g5, g6, g7, g8, g9); +} + +template +internal::CartesianProductHolder10 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7, const Generator8& g8, const Generator9& g9, + const Generator10& g10) { + return internal::CartesianProductHolder10( + g1, g2, g3, g4, g5, g6, g7, g8, g9, g10); +} +# endif // GTEST_HAS_COMBINE + + + +# define TEST_P(test_case_name, test_name) \ + class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \ + : public test_case_name { \ + public: \ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \ + virtual void TestBody(); \ + private: \ + static int AddToRegistry() { \ + ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \ + GetTestCasePatternHolder(\ + #test_case_name, __FILE__, __LINE__)->AddTestPattern(\ + #test_case_name, \ + #test_name, \ + new ::testing::internal::TestMetaFactory< \ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>()); \ + return 0; \ + } \ + static int gtest_registering_dummy_; \ + GTEST_DISALLOW_COPY_AND_ASSIGN_(\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \ + }; \ + int GTEST_TEST_CLASS_NAME_(test_case_name, \ + test_name)::gtest_registering_dummy_ = \ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \ + void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() + +# define INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator) \ + ::testing::internal::ParamGenerator \ + gtest_##prefix##test_case_name##_EvalGenerator_() { return generator; } \ + int gtest_##prefix##test_case_name##_dummy_ = \ + ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \ + GetTestCasePatternHolder(\ + #test_case_name, __FILE__, __LINE__)->AddTestCaseInstantiation(\ + #prefix, \ + >est_##prefix##test_case_name##_EvalGenerator_, \ + __FILE__, __LINE__) + +} // namespace testing + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Google C++ Testing Framework definitions useful in production code. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_ + +// When you need to test the private or protected members of a class, +// use the FRIEND_TEST macro to declare your tests as friends of the +// class. For example: +// +// class MyClass { +// private: +// void MyMethod(); +// FRIEND_TEST(MyClassTest, MyMethod); +// }; +// +// class MyClassTest : public testing::Test { +// // ... +// }; +// +// TEST_F(MyClassTest, MyMethod) { +// // Can call MyClass::MyMethod() here. +// } + +#define FRIEND_TEST(test_case_name, test_name)\ +friend class test_case_name##_##test_name##_Test + +#endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// + +#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ +#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ + +#include +#include + +namespace testing { + +// A copyable object representing the result of a test part (i.e. an +// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()). +// +// Don't inherit from TestPartResult as its destructor is not virtual. +class GTEST_API_ TestPartResult { + public: + // The possible outcomes of a test part (i.e. an assertion or an + // explicit SUCCEED(), FAIL(), or ADD_FAILURE()). + enum Type { + kSuccess, // Succeeded. + kNonFatalFailure, // Failed but the test can continue. + kFatalFailure // Failed and the test should be terminated. + }; + + // C'tor. TestPartResult does NOT have a default constructor. + // Always use this constructor (with parameters) to create a + // TestPartResult object. + TestPartResult(Type a_type, + const char* a_file_name, + int a_line_number, + const char* a_message) + : type_(a_type), + file_name_(a_file_name), + line_number_(a_line_number), + summary_(ExtractSummary(a_message)), + message_(a_message) { + } + + // Gets the outcome of the test part. + Type type() const { return type_; } + + // Gets the name of the source file where the test part took place, or + // NULL if it's unknown. + const char* file_name() const { return file_name_.c_str(); } + + // Gets the line in the source file where the test part took place, + // or -1 if it's unknown. + int line_number() const { return line_number_; } + + // Gets the summary of the failure message. + const char* summary() const { return summary_.c_str(); } + + // Gets the message associated with the test part. + const char* message() const { return message_.c_str(); } + + // Returns true iff the test part passed. + bool passed() const { return type_ == kSuccess; } + + // Returns true iff the test part failed. + bool failed() const { return type_ != kSuccess; } + + // Returns true iff the test part non-fatally failed. + bool nonfatally_failed() const { return type_ == kNonFatalFailure; } + + // Returns true iff the test part fatally failed. + bool fatally_failed() const { return type_ == kFatalFailure; } + private: + Type type_; + + // Gets the summary of the failure message by omitting the stack + // trace in it. + static internal::String ExtractSummary(const char* message); + + // The name of the source file where the test part took place, or + // NULL if the source file is unknown. + internal::String file_name_; + // The line in the source file where the test part took place, or -1 + // if the line number is unknown. + int line_number_; + internal::String summary_; // The test failure summary. + internal::String message_; // The test failure message. +}; + +// Prints a TestPartResult object. +std::ostream& operator<<(std::ostream& os, const TestPartResult& result); + +// An array of TestPartResult objects. +// +// Don't inherit from TestPartResultArray as its destructor is not +// virtual. +class GTEST_API_ TestPartResultArray { + public: + TestPartResultArray() {} + + // Appends the given TestPartResult to the array. + void Append(const TestPartResult& result); + + // Returns the TestPartResult at the given index (0-based). + const TestPartResult& GetTestPartResult(int index) const; + + // Returns the number of TestPartResult objects in the array. + int size() const; + + private: + std::vector array_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray); +}; + +// This interface knows how to report a test part result. +class TestPartResultReporterInterface { + public: + virtual ~TestPartResultReporterInterface() {} + + virtual void ReportTestPartResult(const TestPartResult& result) = 0; +}; + +namespace internal { + +// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a +// statement generates new fatal failures. To do so it registers itself as the +// current test part result reporter. Besides checking if fatal failures were +// reported, it only delegates the reporting to the former result reporter. +// The original result reporter is restored in the destructor. +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +class GTEST_API_ HasNewFatalFailureHelper + : public TestPartResultReporterInterface { + public: + HasNewFatalFailureHelper(); + virtual ~HasNewFatalFailureHelper(); + virtual void ReportTestPartResult(const TestPartResult& result); + bool has_new_fatal_failure() const { return has_new_fatal_failure_; } + private: + bool has_new_fatal_failure_; + TestPartResultReporterInterface* original_reporter_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper); +}; + +} // namespace internal + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ + +// This header implements typed tests and type-parameterized tests. + +// Typed (aka type-driven) tests repeat the same test for types in a +// list. You must know which types you want to test with when writing +// typed tests. Here's how you do it: + +#if 0 + +// First, define a fixture class template. It should be parameterized +// by a type. Remember to derive it from testing::Test. +template +class FooTest : public testing::Test { + public: + ... + typedef std::list List; + static T shared_; + T value_; +}; + +// Next, associate a list of types with the test case, which will be +// repeated for each type in the list. The typedef is necessary for +// the macro to parse correctly. +typedef testing::Types MyTypes; +TYPED_TEST_CASE(FooTest, MyTypes); + +// If the type list contains only one type, you can write that type +// directly without Types<...>: +// TYPED_TEST_CASE(FooTest, int); + +// Then, use TYPED_TEST() instead of TEST_F() to define as many typed +// tests for this test case as you want. +TYPED_TEST(FooTest, DoesBlah) { + // Inside a test, refer to TypeParam to get the type parameter. + // Since we are inside a derived class template, C++ requires use to + // visit the members of FooTest via 'this'. + TypeParam n = this->value_; + + // To visit static members of the fixture, add the TestFixture:: + // prefix. + n += TestFixture::shared_; + + // To refer to typedefs in the fixture, add the "typename + // TestFixture::" prefix. + typename TestFixture::List values; + values.push_back(n); + ... +} + +TYPED_TEST(FooTest, HasPropertyA) { ... } + +#endif // 0 + +// Type-parameterized tests are abstract test patterns parameterized +// by a type. Compared with typed tests, type-parameterized tests +// allow you to define the test pattern without knowing what the type +// parameters are. The defined pattern can be instantiated with +// different types any number of times, in any number of translation +// units. +// +// If you are designing an interface or concept, you can define a +// suite of type-parameterized tests to verify properties that any +// valid implementation of the interface/concept should have. Then, +// each implementation can easily instantiate the test suite to verify +// that it conforms to the requirements, without having to write +// similar tests repeatedly. Here's an example: + +#if 0 + +// First, define a fixture class template. It should be parameterized +// by a type. Remember to derive it from testing::Test. +template +class FooTest : public testing::Test { + ... +}; + +// Next, declare that you will define a type-parameterized test case +// (the _P suffix is for "parameterized" or "pattern", whichever you +// prefer): +TYPED_TEST_CASE_P(FooTest); + +// Then, use TYPED_TEST_P() to define as many type-parameterized tests +// for this type-parameterized test case as you want. +TYPED_TEST_P(FooTest, DoesBlah) { + // Inside a test, refer to TypeParam to get the type parameter. + TypeParam n = 0; + ... +} + +TYPED_TEST_P(FooTest, HasPropertyA) { ... } + +// Now the tricky part: you need to register all test patterns before +// you can instantiate them. The first argument of the macro is the +// test case name; the rest are the names of the tests in this test +// case. +REGISTER_TYPED_TEST_CASE_P(FooTest, + DoesBlah, HasPropertyA); + +// Finally, you are free to instantiate the pattern with the types you +// want. If you put the above code in a header file, you can #include +// it in multiple C++ source files and instantiate it multiple times. +// +// To distinguish different instances of the pattern, the first +// argument to the INSTANTIATE_* macro is a prefix that will be added +// to the actual test case name. Remember to pick unique prefixes for +// different instances. +typedef testing::Types MyTypes; +INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes); + +// If the type list contains only one type, you can write that type +// directly without Types<...>: +// INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, int); + +#endif // 0 + + +// Implements typed tests. + +#if GTEST_HAS_TYPED_TEST + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the name of the typedef for the type parameters of the +// given test case. +# define GTEST_TYPE_PARAMS_(TestCaseName) gtest_type_params_##TestCaseName##_ + +// The 'Types' template argument below must have spaces around it +// since some compilers may choke on '>>' when passing a template +// instance (e.g. Types) +# define TYPED_TEST_CASE(CaseName, Types) \ + typedef ::testing::internal::TypeList< Types >::type \ + GTEST_TYPE_PARAMS_(CaseName) + +# define TYPED_TEST(CaseName, TestName) \ + template \ + class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \ + : public CaseName { \ + private: \ + typedef CaseName TestFixture; \ + typedef gtest_TypeParam_ TypeParam; \ + virtual void TestBody(); \ + }; \ + bool gtest_##CaseName##_##TestName##_registered_ GTEST_ATTRIBUTE_UNUSED_ = \ + ::testing::internal::TypeParameterizedTest< \ + CaseName, \ + ::testing::internal::TemplateSel< \ + GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \ + GTEST_TYPE_PARAMS_(CaseName)>::Register(\ + "", #CaseName, #TestName, 0); \ + template \ + void GTEST_TEST_CLASS_NAME_(CaseName, TestName)::TestBody() + +#endif // GTEST_HAS_TYPED_TEST + +// Implements type-parameterized tests. + +#if GTEST_HAS_TYPED_TEST_P + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the namespace name that the type-parameterized tests for +// the given type-parameterized test case are defined in. The exact +// name of the namespace is subject to change without notice. +# define GTEST_CASE_NAMESPACE_(TestCaseName) \ + gtest_case_##TestCaseName##_ + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the name of the variable used to remember the names of +// the defined tests in the given test case. +# define GTEST_TYPED_TEST_CASE_P_STATE_(TestCaseName) \ + gtest_typed_test_case_p_state_##TestCaseName##_ + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY. +// +// Expands to the name of the variable used to remember the names of +// the registered tests in the given test case. +# define GTEST_REGISTERED_TEST_NAMES_(TestCaseName) \ + gtest_registered_test_names_##TestCaseName##_ + +// The variables defined in the type-parameterized test macros are +// static as typically these macros are used in a .h file that can be +// #included in multiple translation units linked together. +# define TYPED_TEST_CASE_P(CaseName) \ + static ::testing::internal::TypedTestCasePState \ + GTEST_TYPED_TEST_CASE_P_STATE_(CaseName) + +# define TYPED_TEST_P(CaseName, TestName) \ + namespace GTEST_CASE_NAMESPACE_(CaseName) { \ + template \ + class TestName : public CaseName { \ + private: \ + typedef CaseName TestFixture; \ + typedef gtest_TypeParam_ TypeParam; \ + virtual void TestBody(); \ + }; \ + static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \ + GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).AddTestName(\ + __FILE__, __LINE__, #CaseName, #TestName); \ + } \ + template \ + void GTEST_CASE_NAMESPACE_(CaseName)::TestName::TestBody() + +# define REGISTER_TYPED_TEST_CASE_P(CaseName, ...) \ + namespace GTEST_CASE_NAMESPACE_(CaseName) { \ + typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \ + } \ + static const char* const GTEST_REGISTERED_TEST_NAMES_(CaseName) = \ + GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).VerifyRegisteredTestNames(\ + __FILE__, __LINE__, #__VA_ARGS__) + +// The 'Types' template argument below must have spaces around it +// since some compilers may choke on '>>' when passing a template +// instance (e.g. Types) +# define INSTANTIATE_TYPED_TEST_CASE_P(Prefix, CaseName, Types) \ + bool gtest_##Prefix##_##CaseName GTEST_ATTRIBUTE_UNUSED_ = \ + ::testing::internal::TypeParameterizedTestCase::type>::Register(\ + #Prefix, #CaseName, GTEST_REGISTERED_TEST_NAMES_(CaseName)) + +#endif // GTEST_HAS_TYPED_TEST_P + +#endif // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ + +// Depending on the platform, different string classes are available. +// On Linux, in addition to ::std::string, Google also makes use of +// class ::string, which has the same interface as ::std::string, but +// has a different implementation. +// +// The user can define GTEST_HAS_GLOBAL_STRING to 1 to indicate that +// ::string is available AND is a distinct type to ::std::string, or +// define it to 0 to indicate otherwise. +// +// If the user's ::std::string and ::string are the same class due to +// aliasing, he should define GTEST_HAS_GLOBAL_STRING to 0. +// +// If the user doesn't define GTEST_HAS_GLOBAL_STRING, it is defined +// heuristically. + +namespace testing { + +// Declares the flags. + +// This flag temporary enables the disabled tests. +GTEST_DECLARE_bool_(also_run_disabled_tests); + +// This flag brings the debugger on an assertion failure. +GTEST_DECLARE_bool_(break_on_failure); + +// This flag controls whether Google Test catches all test-thrown exceptions +// and logs them as failures. +GTEST_DECLARE_bool_(catch_exceptions); + +// This flag enables using colors in terminal output. Available values are +// "yes" to enable colors, "no" (disable colors), or "auto" (the default) +// to let Google Test decide. +GTEST_DECLARE_string_(color); + +// This flag sets up the filter to select by name using a glob pattern +// the tests to run. If the filter is not given all tests are executed. +GTEST_DECLARE_string_(filter); + +// This flag causes the Google Test to list tests. None of the tests listed +// are actually run if the flag is provided. +GTEST_DECLARE_bool_(list_tests); + +// This flag controls whether Google Test emits a detailed XML report to a file +// in addition to its normal textual output. +GTEST_DECLARE_string_(output); + +// This flags control whether Google Test prints the elapsed time for each +// test. +GTEST_DECLARE_bool_(print_time); + +// This flag specifies the random number seed. +GTEST_DECLARE_int32_(random_seed); + +// This flag sets how many times the tests are repeated. The default value +// is 1. If the value is -1 the tests are repeating forever. +GTEST_DECLARE_int32_(repeat); + +// This flag controls whether Google Test includes Google Test internal +// stack frames in failure stack traces. +GTEST_DECLARE_bool_(show_internal_stack_frames); + +// When this flag is specified, tests' order is randomized on every iteration. +GTEST_DECLARE_bool_(shuffle); + +// This flag specifies the maximum number of stack frames to be +// printed in a failure message. +GTEST_DECLARE_int32_(stack_trace_depth); + +// When this flag is specified, a failed assertion will throw an +// exception if exceptions are enabled, or exit the program with a +// non-zero code otherwise. +GTEST_DECLARE_bool_(throw_on_failure); + +// When this flag is set with a "host:port" string, on supported +// platforms test results are streamed to the specified port on +// the specified host machine. +GTEST_DECLARE_string_(stream_result_to); + +// The upper limit for valid stack trace depths. +const int kMaxStackTraceDepth = 100; + +namespace internal { + +class AssertHelper; +class DefaultGlobalTestPartResultReporter; +class ExecDeathTest; +class NoExecDeathTest; +class FinalSuccessChecker; +class GTestFlagSaver; +class TestResultAccessor; +class TestEventListenersAccessor; +class TestEventRepeater; +class WindowsDeathTest; +class UnitTestImpl* GetUnitTestImpl(); +void ReportFailureInUnknownLocation(TestPartResult::Type result_type, + const String& message); + +// Converts a streamable value to a String. A NULL pointer is +// converted to "(null)". When the input value is a ::string, +// ::std::string, ::wstring, or ::std::wstring object, each NUL +// character in it is replaced with "\\0". +// Declared in gtest-internal.h but defined here, so that it has access +// to the definition of the Message class, required by the ARM +// compiler. +template +String StreamableToString(const T& streamable) { + return (Message() << streamable).GetString(); +} + +} // namespace internal + +// The friend relationship of some of these classes is cyclic. +// If we don't forward declare them the compiler might confuse the classes +// in friendship clauses with same named classes on the scope. +class Test; +class TestCase; +class TestInfo; +class UnitTest; + +// A class for indicating whether an assertion was successful. When +// the assertion wasn't successful, the AssertionResult object +// remembers a non-empty message that describes how it failed. +// +// To create an instance of this class, use one of the factory functions +// (AssertionSuccess() and AssertionFailure()). +// +// This class is useful for two purposes: +// 1. Defining predicate functions to be used with Boolean test assertions +// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts +// 2. Defining predicate-format functions to be +// used with predicate assertions (ASSERT_PRED_FORMAT*, etc). +// +// For example, if you define IsEven predicate: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5))) +// will print the message +// +// Value of: IsEven(Fib(5)) +// Actual: false (5 is odd) +// Expected: true +// +// instead of a more opaque +// +// Value of: IsEven(Fib(5)) +// Actual: false +// Expected: true +// +// in case IsEven is a simple Boolean predicate. +// +// If you expect your predicate to be reused and want to support informative +// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up +// about half as often as positive ones in our tests), supply messages for +// both success and failure cases: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess() << n << " is even"; +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print +// +// Value of: IsEven(Fib(6)) +// Actual: true (8 is even) +// Expected: false +// +// NB: Predicates that support negative Boolean assertions have reduced +// performance in positive ones so be careful not to use them in tests +// that have lots (tens of thousands) of positive Boolean assertions. +// +// To use this class with EXPECT_PRED_FORMAT assertions such as: +// +// // Verifies that Foo() returns an even number. +// EXPECT_PRED_FORMAT1(IsEven, Foo()); +// +// you need to define: +// +// testing::AssertionResult IsEven(const char* expr, int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() +// << "Expected: " << expr << " is even\n Actual: it's " << n; +// } +// +// If Foo() returns 5, you will see the following message: +// +// Expected: Foo() is even +// Actual: it's 5 +// +class GTEST_API_ AssertionResult { + public: + // Copy constructor. + // Used in EXPECT_TRUE/FALSE(assertion_result). + AssertionResult(const AssertionResult& other); + // Used in the EXPECT_TRUE/FALSE(bool_expression). + explicit AssertionResult(bool success) : success_(success) {} + + // Returns true iff the assertion succeeded. + operator bool() const { return success_; } // NOLINT + + // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. + AssertionResult operator!() const; + + // Returns the text streamed into this AssertionResult. Test assertions + // use it when they fail (i.e., the predicate's outcome doesn't match the + // assertion's expectation). When nothing has been streamed into the + // object, returns an empty string. + const char* message() const { + return message_.get() != NULL ? message_->c_str() : ""; + } + // TODO(vladl@google.com): Remove this after making sure no clients use it. + // Deprecated; please use message() instead. + const char* failure_message() const { return message(); } + + // Streams a custom failure message into this object. + template AssertionResult& operator<<(const T& value) { + AppendMessage(Message() << value); + return *this; + } + + // Allows streaming basic output manipulators such as endl or flush into + // this object. + AssertionResult& operator<<( + ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) { + AppendMessage(Message() << basic_manipulator); + return *this; + } + + private: + // Appends the contents of message to message_. + void AppendMessage(const Message& a_message) { + if (message_.get() == NULL) + message_.reset(new ::std::string); + message_->append(a_message.GetString().c_str()); + } + + // Stores result of the assertion predicate. + bool success_; + // Stores the message describing the condition in case the expectation + // construct is not satisfied with the predicate's outcome. + // Referenced via a pointer to avoid taking too much stack frame space + // with test assertions. + internal::scoped_ptr< ::std::string> message_; + + GTEST_DISALLOW_ASSIGN_(AssertionResult); +}; + +// Makes a successful assertion result. +GTEST_API_ AssertionResult AssertionSuccess(); + +// Makes a failed assertion result. +GTEST_API_ AssertionResult AssertionFailure(); + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << msg. +GTEST_API_ AssertionResult AssertionFailure(const Message& msg); + +// The abstract class that all tests inherit from. +// +// In Google Test, a unit test program contains one or many TestCases, and +// each TestCase contains one or many Tests. +// +// When you define a test using the TEST macro, you don't need to +// explicitly derive from Test - the TEST macro automatically does +// this for you. +// +// The only time you derive from Test is when defining a test fixture +// to be used a TEST_F. For example: +// +// class FooTest : public testing::Test { +// protected: +// virtual void SetUp() { ... } +// virtual void TearDown() { ... } +// ... +// }; +// +// TEST_F(FooTest, Bar) { ... } +// TEST_F(FooTest, Baz) { ... } +// +// Test is not copyable. +class GTEST_API_ Test { + public: + friend class TestInfo; + + // Defines types for pointers to functions that set up and tear down + // a test case. + typedef internal::SetUpTestCaseFunc SetUpTestCaseFunc; + typedef internal::TearDownTestCaseFunc TearDownTestCaseFunc; + + // The d'tor is virtual as we intend to inherit from Test. + virtual ~Test(); + + // Sets up the stuff shared by all tests in this test case. + // + // Google Test will call Foo::SetUpTestCase() before running the first + // test in test case Foo. Hence a sub-class can define its own + // SetUpTestCase() method to shadow the one defined in the super + // class. + static void SetUpTestCase() {} + + // Tears down the stuff shared by all tests in this test case. + // + // Google Test will call Foo::TearDownTestCase() after running the last + // test in test case Foo. Hence a sub-class can define its own + // TearDownTestCase() method to shadow the one defined in the super + // class. + static void TearDownTestCase() {} + + // Returns true iff the current test has a fatal failure. + static bool HasFatalFailure(); + + // Returns true iff the current test has a non-fatal failure. + static bool HasNonfatalFailure(); + + // Returns true iff the current test has a (either fatal or + // non-fatal) failure. + static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); } + + // Logs a property for the current test. Only the last value for a given + // key is remembered. + // These are public static so they can be called from utility functions + // that are not members of the test fixture. + // The arguments are const char* instead strings, as Google Test is used + // on platforms where string doesn't compile. + // + // Note that a driving consideration for these RecordProperty methods + // was to produce xml output suited to the Greenspan charting utility, + // which at present will only chart values that fit in a 32-bit int. It + // is the user's responsibility to restrict their values to 32-bit ints + // if they intend them to be used with Greenspan. + static void RecordProperty(const char* key, const char* value); + static void RecordProperty(const char* key, int value); + + protected: + // Creates a Test object. + Test(); + + // Sets up the test fixture. + virtual void SetUp(); + + // Tears down the test fixture. + virtual void TearDown(); + + private: + // Returns true iff the current test has the same fixture class as + // the first test in the current test case. + static bool HasSameFixtureClass(); + + // Runs the test after the test fixture has been set up. + // + // A sub-class must implement this to define the test logic. + // + // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM. + // Instead, use the TEST or TEST_F macro. + virtual void TestBody() = 0; + + // Sets up, executes, and tears down the test. + void Run(); + + // Deletes self. We deliberately pick an unusual name for this + // internal method to avoid clashing with names used in user TESTs. + void DeleteSelf_() { delete this; } + + // Uses a GTestFlagSaver to save and restore all Google Test flags. + const internal::GTestFlagSaver* const gtest_flag_saver_; + + // Often a user mis-spells SetUp() as Setup() and spends a long time + // wondering why it is never called by Google Test. The declaration of + // the following method is solely for catching such an error at + // compile time: + // + // - The return type is deliberately chosen to be not void, so it + // will be a conflict if a user declares void Setup() in his test + // fixture. + // + // - This method is private, so it will be another compiler error + // if a user calls it from his test fixture. + // + // DO NOT OVERRIDE THIS FUNCTION. + // + // If you see an error about overriding the following function or + // about it being private, you have mis-spelled SetUp() as Setup(). + struct Setup_should_be_spelled_SetUp {}; + virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; } + + // We disallow copying Tests. + GTEST_DISALLOW_COPY_AND_ASSIGN_(Test); +}; + +typedef internal::TimeInMillis TimeInMillis; + +// A copyable object representing a user specified test property which can be +// output as a key/value string pair. +// +// Don't inherit from TestProperty as its destructor is not virtual. +class TestProperty { + public: + // C'tor. TestProperty does NOT have a default constructor. + // Always use this constructor (with parameters) to create a + // TestProperty object. + TestProperty(const char* a_key, const char* a_value) : + key_(a_key), value_(a_value) { + } + + // Gets the user supplied key. + const char* key() const { + return key_.c_str(); + } + + // Gets the user supplied value. + const char* value() const { + return value_.c_str(); + } + + // Sets a new value, overriding the one supplied in the constructor. + void SetValue(const char* new_value) { + value_ = new_value; + } + + private: + // The key supplied by the user. + internal::String key_; + // The value supplied by the user. + internal::String value_; +}; + +// The result of a single Test. This includes a list of +// TestPartResults, a list of TestProperties, a count of how many +// death tests there are in the Test, and how much time it took to run +// the Test. +// +// TestResult is not copyable. +class GTEST_API_ TestResult { + public: + // Creates an empty TestResult. + TestResult(); + + // D'tor. Do not inherit from TestResult. + ~TestResult(); + + // Gets the number of all test parts. This is the sum of the number + // of successful test parts and the number of failed test parts. + int total_part_count() const; + + // Returns the number of the test properties. + int test_property_count() const; + + // Returns true iff the test passed (i.e. no test part failed). + bool Passed() const { return !Failed(); } + + // Returns true iff the test failed. + bool Failed() const; + + // Returns true iff the test fatally failed. + bool HasFatalFailure() const; + + // Returns true iff the test has a non-fatal failure. + bool HasNonfatalFailure() const; + + // Returns the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns the i-th test part result among all the results. i can range + // from 0 to test_property_count() - 1. If i is not in that range, aborts + // the program. + const TestPartResult& GetTestPartResult(int i) const; + + // Returns the i-th test property. i can range from 0 to + // test_property_count() - 1. If i is not in that range, aborts the + // program. + const TestProperty& GetTestProperty(int i) const; + + private: + friend class TestInfo; + friend class UnitTest; + friend class internal::DefaultGlobalTestPartResultReporter; + friend class internal::ExecDeathTest; + friend class internal::TestResultAccessor; + friend class internal::UnitTestImpl; + friend class internal::WindowsDeathTest; + + // Gets the vector of TestPartResults. + const std::vector& test_part_results() const { + return test_part_results_; + } + + // Gets the vector of TestProperties. + const std::vector& test_properties() const { + return test_properties_; + } + + // Sets the elapsed time. + void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; } + + // Adds a test property to the list. The property is validated and may add + // a non-fatal failure if invalid (e.g., if it conflicts with reserved + // key names). If a property is already recorded for the same key, the + // value will be updated, rather than storing multiple values for the same + // key. + void RecordProperty(const TestProperty& test_property); + + // Adds a failure if the key is a reserved attribute of Google Test + // testcase tags. Returns true if the property is valid. + // TODO(russr): Validate attribute names are legal and human readable. + static bool ValidateTestProperty(const TestProperty& test_property); + + // Adds a test part result to the list. + void AddTestPartResult(const TestPartResult& test_part_result); + + // Returns the death test count. + int death_test_count() const { return death_test_count_; } + + // Increments the death test count, returning the new count. + int increment_death_test_count() { return ++death_test_count_; } + + // Clears the test part results. + void ClearTestPartResults(); + + // Clears the object. + void Clear(); + + // Protects mutable state of the property vector and of owned + // properties, whose values may be updated. + internal::Mutex test_properites_mutex_; + + // The vector of TestPartResults + std::vector test_part_results_; + // The vector of TestProperties + std::vector test_properties_; + // Running count of death tests. + int death_test_count_; + // The elapsed time, in milliseconds. + TimeInMillis elapsed_time_; + + // We disallow copying TestResult. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult); +}; // class TestResult + +// A TestInfo object stores the following information about a test: +// +// Test case name +// Test name +// Whether the test should be run +// A function pointer that creates the test object when invoked +// Test result +// +// The constructor of TestInfo registers itself with the UnitTest +// singleton such that the RUN_ALL_TESTS() macro knows which tests to +// run. +class GTEST_API_ TestInfo { + public: + // Destructs a TestInfo object. This function is not virtual, so + // don't inherit from TestInfo. + ~TestInfo(); + + // Returns the test case name. + const char* test_case_name() const { return test_case_name_.c_str(); } + + // Returns the test name. + const char* name() const { return name_.c_str(); } + + // Returns the name of the parameter type, or NULL if this is not a typed + // or a type-parameterized test. + const char* type_param() const { + if (type_param_.get() != NULL) + return type_param_->c_str(); + return NULL; + } + + // Returns the text representation of the value parameter, or NULL if this + // is not a value-parameterized test. + const char* value_param() const { + if (value_param_.get() != NULL) + return value_param_->c_str(); + return NULL; + } + + // Returns true if this test should run, that is if the test is not disabled + // (or it is disabled but the also_run_disabled_tests flag has been specified) + // and its full name matches the user-specified filter. + // + // Google Test allows the user to filter the tests by their full names. + // The full name of a test Bar in test case Foo is defined as + // "Foo.Bar". Only the tests that match the filter will run. + // + // A filter is a colon-separated list of glob (not regex) patterns, + // optionally followed by a '-' and a colon-separated list of + // negative patterns (tests to exclude). A test is run if it + // matches one of the positive patterns and does not match any of + // the negative patterns. + // + // For example, *A*:Foo.* is a filter that matches any string that + // contains the character 'A' or starts with "Foo.". + bool should_run() const { return should_run_; } + + // Returns the result of the test. + const TestResult* result() const { return &result_; } + + private: + +#if GTEST_HAS_DEATH_TEST + friend class internal::DefaultDeathTestFactory; +#endif // GTEST_HAS_DEATH_TEST + friend class Test; + friend class TestCase; + friend class internal::UnitTestImpl; + friend TestInfo* internal::MakeAndRegisterTestInfo( + const char* test_case_name, const char* name, + const char* type_param, + const char* value_param, + internal::TypeId fixture_class_id, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc, + internal::TestFactoryBase* factory); + + // Constructs a TestInfo object. The newly constructed instance assumes + // ownership of the factory object. + TestInfo(const char* test_case_name, const char* name, + const char* a_type_param, + const char* a_value_param, + internal::TypeId fixture_class_id, + internal::TestFactoryBase* factory); + + // Increments the number of death tests encountered in this test so + // far. + int increment_death_test_count() { + return result_.increment_death_test_count(); + } + + // Creates the test object, runs it, records its result, and then + // deletes it. + void Run(); + + static void ClearTestResult(TestInfo* test_info) { + test_info->result_.Clear(); + } + + // These fields are immutable properties of the test. + const std::string test_case_name_; // Test case name + const std::string name_; // Test name + // Name of the parameter type, or NULL if this is not a typed or a + // type-parameterized test. + const internal::scoped_ptr type_param_; + // Text representation of the value parameter, or NULL if this is not a + // value-parameterized test. + const internal::scoped_ptr value_param_; + const internal::TypeId fixture_class_id_; // ID of the test fixture class + bool should_run_; // True iff this test should run + bool is_disabled_; // True iff this test is disabled + bool matches_filter_; // True if this test matches the + // user-specified filter. + internal::TestFactoryBase* const factory_; // The factory that creates + // the test object + + // This field is mutable and needs to be reset before running the + // test for the second time. + TestResult result_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo); +}; + +// A test case, which consists of a vector of TestInfos. +// +// TestCase is not copyable. +class GTEST_API_ TestCase { + public: + // Creates a TestCase with the given name. + // + // TestCase does NOT have a default constructor. Always use this + // constructor to create a TestCase object. + // + // Arguments: + // + // name: name of the test case + // a_type_param: the name of the test's type parameter, or NULL if + // this is not a type-parameterized test. + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + TestCase(const char* name, const char* a_type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc); + + // Destructor of TestCase. + virtual ~TestCase(); + + // Gets the name of the TestCase. + const char* name() const { return name_.c_str(); } + + // Returns the name of the parameter type, or NULL if this is not a + // type-parameterized test case. + const char* type_param() const { + if (type_param_.get() != NULL) + return type_param_->c_str(); + return NULL; + } + + // Returns true if any test in this test case should run. + bool should_run() const { return should_run_; } + + // Gets the number of successful tests in this test case. + int successful_test_count() const; + + // Gets the number of failed tests in this test case. + int failed_test_count() const; + + // Gets the number of disabled tests in this test case. + int disabled_test_count() const; + + // Get the number of tests in this test case that should run. + int test_to_run_count() const; + + // Gets the number of all tests in this test case. + int total_test_count() const; + + // Returns true iff the test case passed. + bool Passed() const { return !Failed(); } + + // Returns true iff the test case failed. + bool Failed() const { return failed_test_count() > 0; } + + // Returns the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns the i-th test among all the tests. i can range from 0 to + // total_test_count() - 1. If i is not in that range, returns NULL. + const TestInfo* GetTestInfo(int i) const; + + private: + friend class Test; + friend class internal::UnitTestImpl; + + // Gets the (mutable) vector of TestInfos in this TestCase. + std::vector& test_info_list() { return test_info_list_; } + + // Gets the (immutable) vector of TestInfos in this TestCase. + const std::vector& test_info_list() const { + return test_info_list_; + } + + // Returns the i-th test among all the tests. i can range from 0 to + // total_test_count() - 1. If i is not in that range, returns NULL. + TestInfo* GetMutableTestInfo(int i); + + // Sets the should_run member. + void set_should_run(bool should) { should_run_ = should; } + + // Adds a TestInfo to this test case. Will delete the TestInfo upon + // destruction of the TestCase object. + void AddTestInfo(TestInfo * test_info); + + // Clears the results of all tests in this test case. + void ClearResult(); + + // Clears the results of all tests in the given test case. + static void ClearTestCaseResult(TestCase* test_case) { + test_case->ClearResult(); + } + + // Runs every test in this TestCase. + void Run(); + + // Runs SetUpTestCase() for this TestCase. This wrapper is needed + // for catching exceptions thrown from SetUpTestCase(). + void RunSetUpTestCase() { (*set_up_tc_)(); } + + // Runs TearDownTestCase() for this TestCase. This wrapper is + // needed for catching exceptions thrown from TearDownTestCase(). + void RunTearDownTestCase() { (*tear_down_tc_)(); } + + // Returns true iff test passed. + static bool TestPassed(const TestInfo* test_info) { + return test_info->should_run() && test_info->result()->Passed(); + } + + // Returns true iff test failed. + static bool TestFailed(const TestInfo* test_info) { + return test_info->should_run() && test_info->result()->Failed(); + } + + // Returns true iff test is disabled. + static bool TestDisabled(const TestInfo* test_info) { + return test_info->is_disabled_; + } + + // Returns true if the given test should run. + static bool ShouldRunTest(const TestInfo* test_info) { + return test_info->should_run(); + } + + // Shuffles the tests in this test case. + void ShuffleTests(internal::Random* random); + + // Restores the test order to before the first shuffle. + void UnshuffleTests(); + + // Name of the test case. + internal::String name_; + // Name of the parameter type, or NULL if this is not a typed or a + // type-parameterized test. + const internal::scoped_ptr type_param_; + // The vector of TestInfos in their original order. It owns the + // elements in the vector. + std::vector test_info_list_; + // Provides a level of indirection for the test list to allow easy + // shuffling and restoring the test order. The i-th element in this + // vector is the index of the i-th test in the shuffled test list. + std::vector test_indices_; + // Pointer to the function that sets up the test case. + Test::SetUpTestCaseFunc set_up_tc_; + // Pointer to the function that tears down the test case. + Test::TearDownTestCaseFunc tear_down_tc_; + // True iff any test in this test case should run. + bool should_run_; + // Elapsed time, in milliseconds. + TimeInMillis elapsed_time_; + + // We disallow copying TestCases. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase); +}; + +// An Environment object is capable of setting up and tearing down an +// environment. The user should subclass this to define his own +// environment(s). +// +// An Environment object does the set-up and tear-down in virtual +// methods SetUp() and TearDown() instead of the constructor and the +// destructor, as: +// +// 1. You cannot safely throw from a destructor. This is a problem +// as in some cases Google Test is used where exceptions are enabled, and +// we may want to implement ASSERT_* using exceptions where they are +// available. +// 2. You cannot use ASSERT_* directly in a constructor or +// destructor. +class Environment { + public: + // The d'tor is virtual as we need to subclass Environment. + virtual ~Environment() {} + + // Override this to define how to set up the environment. + virtual void SetUp() {} + + // Override this to define how to tear down the environment. + virtual void TearDown() {} + private: + // If you see an error about overriding the following function or + // about it being private, you have mis-spelled SetUp() as Setup(). + struct Setup_should_be_spelled_SetUp {}; + virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; } +}; + +// The interface for tracing execution of tests. The methods are organized in +// the order the corresponding events are fired. +class TestEventListener { + public: + virtual ~TestEventListener() {} + + // Fired before any test activity starts. + virtual void OnTestProgramStart(const UnitTest& unit_test) = 0; + + // Fired before each iteration of tests starts. There may be more than + // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration + // index, starting from 0. + virtual void OnTestIterationStart(const UnitTest& unit_test, + int iteration) = 0; + + // Fired before environment set-up for each iteration of tests starts. + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0; + + // Fired after environment set-up for each iteration of tests ends. + virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0; + + // Fired before the test case starts. + virtual void OnTestCaseStart(const TestCase& test_case) = 0; + + // Fired before the test starts. + virtual void OnTestStart(const TestInfo& test_info) = 0; + + // Fired after a failed assertion or a SUCCEED() invocation. + virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0; + + // Fired after the test ends. + virtual void OnTestEnd(const TestInfo& test_info) = 0; + + // Fired after the test case ends. + virtual void OnTestCaseEnd(const TestCase& test_case) = 0; + + // Fired before environment tear-down for each iteration of tests starts. + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0; + + // Fired after environment tear-down for each iteration of tests ends. + virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0; + + // Fired after each iteration of tests finishes. + virtual void OnTestIterationEnd(const UnitTest& unit_test, + int iteration) = 0; + + // Fired after all test activities have ended. + virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0; +}; + +// The convenience class for users who need to override just one or two +// methods and are not concerned that a possible change to a signature of +// the methods they override will not be caught during the build. For +// comments about each method please see the definition of TestEventListener +// above. +class EmptyTestEventListener : public TestEventListener { + public: + virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationStart(const UnitTest& /*unit_test*/, + int /*iteration*/) {} + virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) {} + virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestCaseStart(const TestCase& /*test_case*/) {} + virtual void OnTestStart(const TestInfo& /*test_info*/) {} + virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) {} + virtual void OnTestEnd(const TestInfo& /*test_info*/) {} + virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {} + virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) {} + virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/, + int /*iteration*/) {} + virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {} +}; + +// TestEventListeners lets users add listeners to track events in Google Test. +class GTEST_API_ TestEventListeners { + public: + TestEventListeners(); + ~TestEventListeners(); + + // Appends an event listener to the end of the list. Google Test assumes + // the ownership of the listener (i.e. it will delete the listener when + // the test program finishes). + void Append(TestEventListener* listener); + + // Removes the given event listener from the list and returns it. It then + // becomes the caller's responsibility to delete the listener. Returns + // NULL if the listener is not found in the list. + TestEventListener* Release(TestEventListener* listener); + + // Returns the standard listener responsible for the default console + // output. Can be removed from the listeners list to shut down default + // console output. Note that removing this object from the listener list + // with Release transfers its ownership to the caller and makes this + // function return NULL the next time. + TestEventListener* default_result_printer() const { + return default_result_printer_; + } + + // Returns the standard listener responsible for the default XML output + // controlled by the --gtest_output=xml flag. Can be removed from the + // listeners list by users who want to shut down the default XML output + // controlled by this flag and substitute it with custom one. Note that + // removing this object from the listener list with Release transfers its + // ownership to the caller and makes this function return NULL the next + // time. + TestEventListener* default_xml_generator() const { + return default_xml_generator_; + } + + private: + friend class TestCase; + friend class TestInfo; + friend class internal::DefaultGlobalTestPartResultReporter; + friend class internal::NoExecDeathTest; + friend class internal::TestEventListenersAccessor; + friend class internal::UnitTestImpl; + + // Returns repeater that broadcasts the TestEventListener events to all + // subscribers. + TestEventListener* repeater(); + + // Sets the default_result_printer attribute to the provided listener. + // The listener is also added to the listener list and previous + // default_result_printer is removed from it and deleted. The listener can + // also be NULL in which case it will not be added to the list. Does + // nothing if the previous and the current listener objects are the same. + void SetDefaultResultPrinter(TestEventListener* listener); + + // Sets the default_xml_generator attribute to the provided listener. The + // listener is also added to the listener list and previous + // default_xml_generator is removed from it and deleted. The listener can + // also be NULL in which case it will not be added to the list. Does + // nothing if the previous and the current listener objects are the same. + void SetDefaultXmlGenerator(TestEventListener* listener); + + // Controls whether events will be forwarded by the repeater to the + // listeners in the list. + bool EventForwardingEnabled() const; + void SuppressEventForwarding(); + + // The actual list of listeners. + internal::TestEventRepeater* repeater_; + // Listener responsible for the standard result output. + TestEventListener* default_result_printer_; + // Listener responsible for the creation of the XML output file. + TestEventListener* default_xml_generator_; + + // We disallow copying TestEventListeners. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners); +}; + +// A UnitTest consists of a vector of TestCases. +// +// This is a singleton class. The only instance of UnitTest is +// created when UnitTest::GetInstance() is first called. This +// instance is never deleted. +// +// UnitTest is not copyable. +// +// This class is thread-safe as long as the methods are called +// according to their specification. +class GTEST_API_ UnitTest { + public: + // Gets the singleton UnitTest object. The first time this method + // is called, a UnitTest object is constructed and returned. + // Consecutive calls will return the same object. + static UnitTest* GetInstance(); + + // Runs all tests in this UnitTest object and prints the result. + // Returns 0 if successful, or 1 otherwise. + // + // This method can only be called from the main thread. + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + int Run() GTEST_MUST_USE_RESULT_; + + // Returns the working directory when the first TEST() or TEST_F() + // was executed. The UnitTest object owns the string. + const char* original_working_dir() const; + + // Returns the TestCase object for the test that's currently running, + // or NULL if no test is running. + const TestCase* current_test_case() const; + + // Returns the TestInfo object for the test that's currently running, + // or NULL if no test is running. + const TestInfo* current_test_info() const; + + // Returns the random seed used at the start of the current test run. + int random_seed() const; + +#if GTEST_HAS_PARAM_TEST + // Returns the ParameterizedTestCaseRegistry object used to keep track of + // value-parameterized tests and instantiate and register them. + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + internal::ParameterizedTestCaseRegistry& parameterized_test_registry(); +#endif // GTEST_HAS_PARAM_TEST + + // Gets the number of successful test cases. + int successful_test_case_count() const; + + // Gets the number of failed test cases. + int failed_test_case_count() const; + + // Gets the number of all test cases. + int total_test_case_count() const; + + // Gets the number of all test cases that contain at least one test + // that should run. + int test_case_to_run_count() const; + + // Gets the number of successful tests. + int successful_test_count() const; + + // Gets the number of failed tests. + int failed_test_count() const; + + // Gets the number of disabled tests. + int disabled_test_count() const; + + // Gets the number of all tests. + int total_test_count() const; + + // Gets the number of tests that should run. + int test_to_run_count() const; + + // Gets the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const; + + // Returns true iff the unit test passed (i.e. all test cases passed). + bool Passed() const; + + // Returns true iff the unit test failed (i.e. some test case failed + // or something outside of all tests failed). + bool Failed() const; + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + const TestCase* GetTestCase(int i) const; + + // Returns the list of event listeners that can be used to track events + // inside Google Test. + TestEventListeners& listeners(); + + private: + // Registers and returns a global test environment. When a test + // program is run, all global test environments will be set-up in + // the order they were registered. After all tests in the program + // have finished, all global test environments will be torn-down in + // the *reverse* order they were registered. + // + // The UnitTest object takes ownership of the given environment. + // + // This method can only be called from the main thread. + Environment* AddEnvironment(Environment* env); + + // Adds a TestPartResult to the current TestResult object. All + // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) + // eventually call this to report their results. The user code + // should use the assertion macros instead of calling this directly. + void AddTestPartResult(TestPartResult::Type result_type, + const char* file_name, + int line_number, + const internal::String& message, + const internal::String& os_stack_trace); + + // Adds a TestProperty to the current TestResult object. If the result already + // contains a property with the same key, the value will be updated. + void RecordPropertyForCurrentTest(const char* key, const char* value); + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + TestCase* GetMutableTestCase(int i); + + // Accessors for the implementation object. + internal::UnitTestImpl* impl() { return impl_; } + const internal::UnitTestImpl* impl() const { return impl_; } + + // These classes and funcions are friends as they need to access private + // members of UnitTest. + friend class Test; + friend class internal::AssertHelper; + friend class internal::ScopedTrace; + friend Environment* AddGlobalTestEnvironment(Environment* env); + friend internal::UnitTestImpl* internal::GetUnitTestImpl(); + friend void internal::ReportFailureInUnknownLocation( + TestPartResult::Type result_type, + const internal::String& message); + + // Creates an empty UnitTest. + UnitTest(); + + // D'tor + virtual ~UnitTest(); + + // Pushes a trace defined by SCOPED_TRACE() on to the per-thread + // Google Test trace stack. + void PushGTestTrace(const internal::TraceInfo& trace); + + // Pops a trace from the per-thread Google Test trace stack. + void PopGTestTrace(); + + // Protects mutable state in *impl_. This is mutable as some const + // methods need to lock it too. + mutable internal::Mutex mutex_; + + // Opaque implementation object. This field is never changed once + // the object is constructed. We don't mark it as const here, as + // doing so will cause a warning in the constructor of UnitTest. + // Mutable state in *impl_ is protected by mutex_. + internal::UnitTestImpl* impl_; + + // We disallow copying UnitTest. + GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest); +}; + +// A convenient wrapper for adding an environment for the test +// program. +// +// You should call this before RUN_ALL_TESTS() is called, probably in +// main(). If you use gtest_main, you need to call this before main() +// starts for it to take effect. For example, you can define a global +// variable like this: +// +// testing::Environment* const foo_env = +// testing::AddGlobalTestEnvironment(new FooEnvironment); +// +// However, we strongly recommend you to write your own main() and +// call AddGlobalTestEnvironment() there, as relying on initialization +// of global variables makes the code harder to read and may cause +// problems when you register multiple environments from different +// translation units and the environments have dependencies among them +// (remember that the compiler doesn't guarantee the order in which +// global variables from different translation units are initialized). +inline Environment* AddGlobalTestEnvironment(Environment* env) { + return UnitTest::GetInstance()->AddEnvironment(env); +} + +// Initializes Google Test. This must be called before calling +// RUN_ALL_TESTS(). In particular, it parses a command line for the +// flags that Google Test recognizes. Whenever a Google Test flag is +// seen, it is removed from argv, and *argc is decremented. +// +// No value is returned. Instead, the Google Test flag variables are +// updated. +// +// Calling the function for the second time has no user-visible effect. +GTEST_API_ void InitGoogleTest(int* argc, char** argv); + +// This overloaded version can be used in Windows programs compiled in +// UNICODE mode. +GTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv); + +namespace internal { + +// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc) +// operand to be used in a failure message. The type (but not value) +// of the other operand may affect the format. This allows us to +// print a char* as a raw pointer when it is compared against another +// char*, and print it as a C string when it is compared against an +// std::string object, for example. +// +// The default implementation ignores the type of the other operand. +// Some specialized versions are used to handle formatting wide or +// narrow C strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +template +String FormatForComparisonFailureMessage(const T1& value, + const T2& /* other_operand */) { + // C++Builder compiles this incorrectly if the namespace isn't explicitly + // given. + return ::testing::PrintToString(value); +} + +// The helper function for {ASSERT|EXPECT}_EQ. +template +AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual) { +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4389) // Temporarily disables warning on + // signed/unsigned mismatch. +#endif + + if (expected == actual) { + return AssertionSuccess(); + } + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif + + return EqFailure(expected_expression, + actual_expression, + FormatForComparisonFailureMessage(expected, actual), + FormatForComparisonFailureMessage(actual, expected), + false); +} + +// With this overloaded version, we allow anonymous enums to be used +// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums +// can be implicitly cast to BiggestInt. +GTEST_API_ AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual); + +// The helper class for {ASSERT|EXPECT}_EQ. The template argument +// lhs_is_null_literal is true iff the first argument to ASSERT_EQ() +// is a null pointer literal. The following default implementation is +// for lhs_is_null_literal being false. +template +class EqHelper { + public: + // This templatized version is for the general case. + template + static AssertionResult Compare(const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } + + // With this overloaded version, we allow anonymous enums to be used + // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous + // enums can be implicitly cast to BiggestInt. + // + // Even though its body looks the same as the above version, we + // cannot merge the two, as it will make anonymous enums unhappy. + static AssertionResult Compare(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } +}; + +// This specialization is used when the first argument to ASSERT_EQ() +// is a null pointer literal, like NULL, false, or 0. +template <> +class EqHelper { + public: + // We define two overloaded versions of Compare(). The first + // version will be picked when the second argument to ASSERT_EQ() is + // NOT a pointer, e.g. ASSERT_EQ(0, AnIntFunction()) or + // EXPECT_EQ(false, a_bool). + template + static AssertionResult Compare( + const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual, + // The following line prevents this overload from being considered if T2 + // is not a pointer type. We need this because ASSERT_EQ(NULL, my_ptr) + // expands to Compare("", "", NULL, my_ptr), which requires a conversion + // to match the Secret* in the other overload, which would otherwise make + // this template match better. + typename EnableIf::value>::type* = 0) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } + + // This version will be picked when the second argument to ASSERT_EQ() is a + // pointer, e.g. ASSERT_EQ(NULL, a_pointer). + template + static AssertionResult Compare( + const char* expected_expression, + const char* actual_expression, + // We used to have a second template parameter instead of Secret*. That + // template parameter would deduce to 'long', making this a better match + // than the first overload even without the first overload's EnableIf. + // Unfortunately, gcc with -Wconversion-null warns when "passing NULL to + // non-pointer argument" (even a deduced integral argument), so the old + // implementation caused warnings in user code. + Secret* /* expected (NULL) */, + T* actual) { + // We already know that 'expected' is a null pointer. + return CmpHelperEQ(expected_expression, actual_expression, + static_cast(NULL), actual); + } +}; + +// A macro for implementing the helper functions needed to implement +// ASSERT_?? and EXPECT_??. It is here just to avoid copy-and-paste +// of similar code. +// +// For each templatized helper function, we also define an overloaded +// version for BiggestInt in order to reduce code bloat and allow +// anonymous enums to be used with {ASSERT|EXPECT}_?? when compiled +// with gcc 4. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +#define GTEST_IMPL_CMP_HELPER_(op_name, op)\ +template \ +AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \ + const T1& val1, const T2& val2) {\ + if (val1 op val2) {\ + return AssertionSuccess();\ + } else {\ + return AssertionFailure() \ + << "Expected: (" << expr1 << ") " #op " (" << expr2\ + << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\ + << " vs " << FormatForComparisonFailureMessage(val2, val1);\ + }\ +}\ +GTEST_API_ AssertionResult CmpHelper##op_name(\ + const char* expr1, const char* expr2, BiggestInt val1, BiggestInt val2) + +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + +// Implements the helper function for {ASSERT|EXPECT}_NE +GTEST_IMPL_CMP_HELPER_(NE, !=); +// Implements the helper function for {ASSERT|EXPECT}_LE +GTEST_IMPL_CMP_HELPER_(LE, <=); +// Implements the helper function for {ASSERT|EXPECT}_LT +GTEST_IMPL_CMP_HELPER_(LT, < ); +// Implements the helper function for {ASSERT|EXPECT}_GE +GTEST_IMPL_CMP_HELPER_(GE, >=); +// Implements the helper function for {ASSERT|EXPECT}_GT +GTEST_IMPL_CMP_HELPER_(GT, > ); + +#undef GTEST_IMPL_CMP_HELPER_ + +// The helper function for {ASSERT|EXPECT}_STREQ. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual); + +// The helper function for {ASSERT|EXPECT}_STRCASEEQ. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual); + +// The helper function for {ASSERT|EXPECT}_STRNE. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + +// The helper function for {ASSERT|EXPECT}_STRCASENE. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + + +// Helper function for *_STREQ on wide strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const wchar_t* expected, + const wchar_t* actual); + +// Helper function for *_STRNE on wide strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const wchar_t* s1, + const wchar_t* s2); + +} // namespace internal + +// IsSubstring() and IsNotSubstring() are intended to be used as the +// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by +// themselves. They check whether needle is a substring of haystack +// (NULL is considered a substring of itself only), and return an +// appropriate error message when they fail. +// +// The {needle,haystack}_expr arguments are the stringified +// expressions that generated the two real arguments. +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack); +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack); +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack); + +#if GTEST_HAS_STD_WSTRING +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack); +#endif // GTEST_HAS_STD_WSTRING + +namespace internal { + +// Helper template function for comparing floating-points. +// +// Template parameter: +// +// RawType: the raw floating-point type (either float or double) +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +template +AssertionResult CmpHelperFloatingPointEQ(const char* expected_expression, + const char* actual_expression, + RawType expected, + RawType actual) { + const FloatingPoint lhs(expected), rhs(actual); + + if (lhs.AlmostEquals(rhs)) { + return AssertionSuccess(); + } + + ::std::stringstream expected_ss; + expected_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << expected; + + ::std::stringstream actual_ss; + actual_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << actual; + + return EqFailure(expected_expression, + actual_expression, + StringStreamToString(&expected_ss), + StringStreamToString(&actual_ss), + false); +} + +// Helper function for implementing ASSERT_NEAR. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1, + const char* expr2, + const char* abs_error_expr, + double val1, + double val2, + double abs_error); + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// A class that enables one to stream messages to assertion macros +class GTEST_API_ AssertHelper { + public: + // Constructor. + AssertHelper(TestPartResult::Type type, + const char* file, + int line, + const char* message); + ~AssertHelper(); + + // Message assignment is a semantic trick to enable assertion + // streaming; see the GTEST_MESSAGE_ macro below. + void operator=(const Message& message) const; + + private: + // We put our data in a struct so that the size of the AssertHelper class can + // be as small as possible. This is important because gcc is incapable of + // re-using stack space even for temporary variables, so every EXPECT_EQ + // reserves stack space for another AssertHelper. + struct AssertHelperData { + AssertHelperData(TestPartResult::Type t, + const char* srcfile, + int line_num, + const char* msg) + : type(t), file(srcfile), line(line_num), message(msg) { } + + TestPartResult::Type const type; + const char* const file; + int const line; + String const message; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData); + }; + + AssertHelperData* const data_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper); +}; + +} // namespace internal + +#if GTEST_HAS_PARAM_TEST +// The pure interface class that all value-parameterized tests inherit from. +// A value-parameterized class must inherit from both ::testing::Test and +// ::testing::WithParamInterface. In most cases that just means inheriting +// from ::testing::TestWithParam, but more complicated test hierarchies +// may need to inherit from Test and WithParamInterface at different levels. +// +// This interface has support for accessing the test parameter value via +// the GetParam() method. +// +// Use it with one of the parameter generator defining functions, like Range(), +// Values(), ValuesIn(), Bool(), and Combine(). +// +// class FooTest : public ::testing::TestWithParam { +// protected: +// FooTest() { +// // Can use GetParam() here. +// } +// virtual ~FooTest() { +// // Can use GetParam() here. +// } +// virtual void SetUp() { +// // Can use GetParam() here. +// } +// virtual void TearDown { +// // Can use GetParam() here. +// } +// }; +// TEST_P(FooTest, DoesBar) { +// // Can use GetParam() method here. +// Foo foo; +// ASSERT_TRUE(foo.DoesBar(GetParam())); +// } +// INSTANTIATE_TEST_CASE_P(OneToTenRange, FooTest, ::testing::Range(1, 10)); + +template +class WithParamInterface { + public: + typedef T ParamType; + virtual ~WithParamInterface() {} + + // The current parameter value. Is also available in the test fixture's + // constructor. This member function is non-static, even though it only + // references static data, to reduce the opportunity for incorrect uses + // like writing 'WithParamInterface::GetParam()' for a test that + // uses a fixture whose parameter type is int. + const ParamType& GetParam() const { return *parameter_; } + + private: + // Sets parameter value. The caller is responsible for making sure the value + // remains alive and unchanged throughout the current test. + static void SetParam(const ParamType* parameter) { + parameter_ = parameter; + } + + // Static value used for accessing parameter during a test lifetime. + static const ParamType* parameter_; + + // TestClass must be a subclass of WithParamInterface and Test. + template friend class internal::ParameterizedTestFactory; +}; + +template +const T* WithParamInterface::parameter_ = NULL; + +// Most value-parameterized classes can ignore the existence of +// WithParamInterface, and can just inherit from ::testing::TestWithParam. + +template +class TestWithParam : public Test, public WithParamInterface { +}; + +#endif // GTEST_HAS_PARAM_TEST + +// Macros for indicating success/failure in test code. + +// ADD_FAILURE unconditionally adds a failure to the current test. +// SUCCEED generates a success - it doesn't automatically make the +// current test successful, as a test is only successful when it has +// no failure. +// +// EXPECT_* verifies that a certain condition is satisfied. If not, +// it behaves like ADD_FAILURE. In particular: +// +// EXPECT_TRUE verifies that a Boolean condition is true. +// EXPECT_FALSE verifies that a Boolean condition is false. +// +// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except +// that they will also abort the current function on failure. People +// usually want the fail-fast behavior of FAIL and ASSERT_*, but those +// writing data-driven tests often find themselves using ADD_FAILURE +// and EXPECT_* more. +// +// Examples: +// +// EXPECT_TRUE(server.StatusIsOK()); +// ASSERT_FALSE(server.HasPendingRequest(port)) +// << "There are still pending requests " << "on port " << port; + +// Generates a nonfatal failure with a generic message. +#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed") + +// Generates a nonfatal failure at the given source file location with +// a generic message. +#define ADD_FAILURE_AT(file, line) \ + GTEST_MESSAGE_AT_(file, line, "Failed", \ + ::testing::TestPartResult::kNonFatalFailure) + +// Generates a fatal failure with a generic message. +#define GTEST_FAIL() GTEST_FATAL_FAILURE_("Failed") + +// Define this macro to 1 to omit the definition of FAIL(), which is a +// generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_FAIL +# define FAIL() GTEST_FAIL() +#endif + +// Generates a success with a generic message. +#define GTEST_SUCCEED() GTEST_SUCCESS_("Succeeded") + +// Define this macro to 1 to omit the definition of SUCCEED(), which +// is a generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_SUCCEED +# define SUCCEED() GTEST_SUCCEED() +#endif + +// Macros for testing exceptions. +// +// * {ASSERT|EXPECT}_THROW(statement, expected_exception): +// Tests that the statement throws the expected exception. +// * {ASSERT|EXPECT}_NO_THROW(statement): +// Tests that the statement doesn't throw any exception. +// * {ASSERT|EXPECT}_ANY_THROW(statement): +// Tests that the statement throws an exception. + +#define EXPECT_THROW(statement, expected_exception) \ + GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_) +#define EXPECT_NO_THROW(statement) \ + GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_) +#define EXPECT_ANY_THROW(statement) \ + GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_) +#define ASSERT_THROW(statement, expected_exception) \ + GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_) +#define ASSERT_NO_THROW(statement) \ + GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_) +#define ASSERT_ANY_THROW(statement) \ + GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_) + +// Boolean assertions. Condition can be either a Boolean expression or an +// AssertionResult. For more information on how to use AssertionResult with +// these macros see comments on that class. +#define EXPECT_TRUE(condition) \ + GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ + GTEST_NONFATAL_FAILURE_) +#define EXPECT_FALSE(condition) \ + GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ + GTEST_NONFATAL_FAILURE_) +#define ASSERT_TRUE(condition) \ + GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ + GTEST_FATAL_FAILURE_) +#define ASSERT_FALSE(condition) \ + GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ + GTEST_FATAL_FAILURE_) + +// Includes the auto-generated header that implements a family of +// generic predicate assertion macros. +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file is AUTOMATICALLY GENERATED on 09/24/2010 by command +// 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND! +// +// Implements a family of generic predicate assertion macros. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ + +// Makes sure this header is not included before gtest.h. +#ifndef GTEST_INCLUDE_GTEST_GTEST_H_ +# error Do not include gtest_pred_impl.h directly. Include gtest.h instead. +#endif // GTEST_INCLUDE_GTEST_GTEST_H_ + +// This header implements a family of generic predicate assertion +// macros: +// +// ASSERT_PRED_FORMAT1(pred_format, v1) +// ASSERT_PRED_FORMAT2(pred_format, v1, v2) +// ... +// +// where pred_format is a function or functor that takes n (in the +// case of ASSERT_PRED_FORMATn) values and their source expression +// text, and returns a testing::AssertionResult. See the definition +// of ASSERT_EQ in gtest.h for an example. +// +// If you don't care about formatting, you can use the more +// restrictive version: +// +// ASSERT_PRED1(pred, v1) +// ASSERT_PRED2(pred, v1, v2) +// ... +// +// where pred is an n-ary function or functor that returns bool, +// and the values v1, v2, ..., must support the << operator for +// streaming to std::ostream. +// +// We also define the EXPECT_* variations. +// +// For now we only support predicates whose arity is at most 5. +// Please email googletestframework@googlegroups.com if you need +// support for higher arities. + +// GTEST_ASSERT_ is the basic statement to which all of the assertions +// in this file reduce. Don't use this in your code. + +#define GTEST_ASSERT_(expression, on_failure) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (const ::testing::AssertionResult gtest_ar = (expression)) \ + ; \ + else \ + on_failure(gtest_ar.failure_message()) + + +// Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use +// this in your code. +template +AssertionResult AssertPred1Helper(const char* pred_text, + const char* e1, + Pred pred, + const T1& v1) { + if (pred(v1)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1. +// Don't use this in your code. +#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, v1),\ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use +// this in your code. +#define GTEST_PRED1_(pred, v1, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \ + #v1, \ + pred, \ + v1), on_failure) + +// Unary predicate assertion macros. +#define EXPECT_PRED_FORMAT1(pred_format, v1) \ + GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED1(pred, v1) \ + GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT1(pred_format, v1) \ + GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED1(pred, v1) \ + GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use +// this in your code. +template +AssertionResult AssertPred2Helper(const char* pred_text, + const char* e1, + const char* e2, + Pred pred, + const T1& v1, + const T2& v2) { + if (pred(v1, v2)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2. +// Don't use this in your code. +#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2),\ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use +// this in your code. +#define GTEST_PRED2_(pred, v1, v2, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \ + #v1, \ + #v2, \ + pred, \ + v1, \ + v2), on_failure) + +// Binary predicate assertion macros. +#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \ + GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED2(pred, v1, v2) \ + GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \ + GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED2(pred, v1, v2) \ + GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use +// this in your code. +template +AssertionResult AssertPred3Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3) { + if (pred(v1, v2, v3)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ", " + << e3 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2 + << "\n" << e3 << " evaluates to " << v3; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3. +// Don't use this in your code. +#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3),\ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use +// this in your code. +#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + pred, \ + v1, \ + v2, \ + v3), on_failure) + +// Ternary predicate assertion macros. +#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \ + GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED3(pred, v1, v2, v3) \ + GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \ + GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED3(pred, v1, v2, v3) \ + GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use +// this in your code. +template +AssertionResult AssertPred4Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + const char* e4, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4) { + if (pred(v1, v2, v3, v4)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ", " + << e3 << ", " + << e4 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2 + << "\n" << e3 << " evaluates to " << v3 + << "\n" << e4 << " evaluates to " << v4; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4. +// Don't use this in your code. +#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4),\ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use +// this in your code. +#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + #v4, \ + pred, \ + v1, \ + v2, \ + v3, \ + v4), on_failure) + +// 4-ary predicate assertion macros. +#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ + GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED4(pred, v1, v2, v3, v4) \ + GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ + GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED4(pred, v1, v2, v3, v4) \ + GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use +// this in your code. +template +AssertionResult AssertPred5Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + const char* e4, + const char* e5, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4, + const T5& v5) { + if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ", " + << e3 << ", " + << e4 << ", " + << e5 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2 + << "\n" << e3 << " evaluates to " << v3 + << "\n" << e4 << " evaluates to " << v4 + << "\n" << e5 << " evaluates to " << v5; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5. +// Don't use this in your code. +#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5),\ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use +// this in your code. +#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + #v4, \ + #v5, \ + pred, \ + v1, \ + v2, \ + v3, \ + v4, \ + v5), on_failure) + +// 5-ary predicate assertion macros. +#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ + GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \ + GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ + GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \ + GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_) + + + +#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ + +// Macros for testing equalities and inequalities. +// +// * {ASSERT|EXPECT}_EQ(expected, actual): Tests that expected == actual +// * {ASSERT|EXPECT}_NE(v1, v2): Tests that v1 != v2 +// * {ASSERT|EXPECT}_LT(v1, v2): Tests that v1 < v2 +// * {ASSERT|EXPECT}_LE(v1, v2): Tests that v1 <= v2 +// * {ASSERT|EXPECT}_GT(v1, v2): Tests that v1 > v2 +// * {ASSERT|EXPECT}_GE(v1, v2): Tests that v1 >= v2 +// +// When they are not, Google Test prints both the tested expressions and +// their actual values. The values must be compatible built-in types, +// or you will get a compiler error. By "compatible" we mean that the +// values can be compared by the respective operator. +// +// Note: +// +// 1. It is possible to make a user-defined type work with +// {ASSERT|EXPECT}_??(), but that requires overloading the +// comparison operators and is thus discouraged by the Google C++ +// Usage Guide. Therefore, you are advised to use the +// {ASSERT|EXPECT}_TRUE() macro to assert that two objects are +// equal. +// +// 2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on +// pointers (in particular, C strings). Therefore, if you use it +// with two C strings, you are testing how their locations in memory +// are related, not how their content is related. To compare two C +// strings by content, use {ASSERT|EXPECT}_STR*(). +// +// 3. {ASSERT|EXPECT}_EQ(expected, actual) is preferred to +// {ASSERT|EXPECT}_TRUE(expected == actual), as the former tells you +// what the actual value is when it fails, and similarly for the +// other comparisons. +// +// 4. Do not depend on the order in which {ASSERT|EXPECT}_??() +// evaluate their arguments, which is undefined. +// +// 5. These macros evaluate their arguments exactly once. +// +// Examples: +// +// EXPECT_NE(5, Foo()); +// EXPECT_EQ(NULL, a_pointer); +// ASSERT_LT(i, array_size); +// ASSERT_GT(records.size(), 0) << "There is no record left."; + +#define EXPECT_EQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal:: \ + EqHelper::Compare, \ + expected, actual) +#define EXPECT_NE(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, expected, actual) +#define EXPECT_LE(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2) +#define EXPECT_LT(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2) +#define EXPECT_GE(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2) +#define EXPECT_GT(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2) + +#define GTEST_ASSERT_EQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal:: \ + EqHelper::Compare, \ + expected, actual) +#define GTEST_ASSERT_NE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2) +#define GTEST_ASSERT_LE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2) +#define GTEST_ASSERT_LT(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2) +#define GTEST_ASSERT_GE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2) +#define GTEST_ASSERT_GT(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2) + +// Define macro GTEST_DONT_DEFINE_ASSERT_XY to 1 to omit the definition of +// ASSERT_XY(), which clashes with some users' own code. + +#if !GTEST_DONT_DEFINE_ASSERT_EQ +# define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_NE +# define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_LE +# define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_LT +# define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_GE +# define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_GT +# define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2) +#endif + +// C String Comparisons. All tests treat NULL and any non-NULL string +// as different. Two NULLs are equal. +// +// * {ASSERT|EXPECT}_STREQ(s1, s2): Tests that s1 == s2 +// * {ASSERT|EXPECT}_STRNE(s1, s2): Tests that s1 != s2 +// * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case +// * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case +// +// For wide or narrow string objects, you can use the +// {ASSERT|EXPECT}_??() macros. +// +// Don't depend on the order in which the arguments are evaluated, +// which is undefined. +// +// These macros evaluate their arguments exactly once. + +#define EXPECT_STREQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual) +#define EXPECT_STRNE(s1, s2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) +#define EXPECT_STRCASEEQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual) +#define EXPECT_STRCASENE(s1, s2)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) + +#define ASSERT_STREQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual) +#define ASSERT_STRNE(s1, s2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) +#define ASSERT_STRCASEEQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual) +#define ASSERT_STRCASENE(s1, s2)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) + +// Macros for comparing floating-point numbers. +// +// * {ASSERT|EXPECT}_FLOAT_EQ(expected, actual): +// Tests that two float values are almost equal. +// * {ASSERT|EXPECT}_DOUBLE_EQ(expected, actual): +// Tests that two double values are almost equal. +// * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error): +// Tests that v1 and v2 are within the given distance to each other. +// +// Google Test uses ULP-based comparison to automatically pick a default +// error bound that is appropriate for the operands. See the +// FloatingPoint template class in gtest-internal.h if you are +// interested in the implementation details. + +#define EXPECT_FLOAT_EQ(expected, actual)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define EXPECT_DOUBLE_EQ(expected, actual)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define ASSERT_FLOAT_EQ(expected, actual)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define ASSERT_DOUBLE_EQ(expected, actual)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define EXPECT_NEAR(val1, val2, abs_error)\ + EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ + val1, val2, abs_error) + +#define ASSERT_NEAR(val1, val2, abs_error)\ + ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ + val1, val2, abs_error) + +// These predicate format functions work on floating-point values, and +// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g. +// +// EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0); + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2, + float val1, float val2); +GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2, + double val1, double val2); + + +#if GTEST_OS_WINDOWS + +// Macros that test for HRESULT failure and success, these are only useful +// on Windows, and rely on Windows SDK macros and APIs to compile. +// +// * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr) +// +// When expr unexpectedly fails or succeeds, Google Test prints the +// expected result and the actual result with both a human-readable +// string representation of the error, if available, as well as the +// hex result code. +# define EXPECT_HRESULT_SUCCEEDED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) + +# define ASSERT_HRESULT_SUCCEEDED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) + +# define EXPECT_HRESULT_FAILED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) + +# define ASSERT_HRESULT_FAILED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) + +#endif // GTEST_OS_WINDOWS + +// Macros that execute statement and check that it doesn't generate new fatal +// failures in the current thread. +// +// * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement); +// +// Examples: +// +// EXPECT_NO_FATAL_FAILURE(Process()); +// ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed"; +// +#define ASSERT_NO_FATAL_FAILURE(statement) \ + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_) +#define EXPECT_NO_FATAL_FAILURE(statement) \ + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_) + +// Causes a trace (including the source file path, the current line +// number, and the given message) to be included in every test failure +// message generated by code in the current scope. The effect is +// undone when the control leaves the current scope. +// +// The message argument can be anything streamable to std::ostream. +// +// In the implementation, we include the current line number as part +// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s +// to appear in the same block - as long as they are on different +// lines. +#define SCOPED_TRACE(message) \ + ::testing::internal::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\ + __FILE__, __LINE__, ::testing::Message() << (message)) + +// Compile-time assertion for type equality. +// StaticAssertTypeEq() compiles iff type1 and type2 are +// the same type. The value it returns is not interesting. +// +// Instead of making StaticAssertTypeEq a class template, we make it a +// function template that invokes a helper class template. This +// prevents a user from misusing StaticAssertTypeEq by +// defining objects of that type. +// +// CAVEAT: +// +// When used inside a method of a class template, +// StaticAssertTypeEq() is effective ONLY IF the method is +// instantiated. For example, given: +// +// template class Foo { +// public: +// void Bar() { testing::StaticAssertTypeEq(); } +// }; +// +// the code: +// +// void Test1() { Foo foo; } +// +// will NOT generate a compiler error, as Foo::Bar() is never +// actually instantiated. Instead, you need: +// +// void Test2() { Foo foo; foo.Bar(); } +// +// to cause a compiler error. +template +bool StaticAssertTypeEq() { + (void)internal::StaticAssertTypeEqHelper(); + return true; +} + +// Defines a test. +// +// The first parameter is the name of the test case, and the second +// parameter is the name of the test within the test case. +// +// The convention is to end the test case name with "Test". For +// example, a test case for the Foo class can be named FooTest. +// +// The user should put his test code between braces after using this +// macro. Example: +// +// TEST(FooTest, InitializesCorrectly) { +// Foo foo; +// EXPECT_TRUE(foo.StatusIsOK()); +// } + +// Note that we call GetTestTypeId() instead of GetTypeId< +// ::testing::Test>() here to get the type ID of testing::Test. This +// is to work around a suspected linker bug when using Google Test as +// a framework on Mac OS X. The bug causes GetTypeId< +// ::testing::Test>() to return different values depending on whether +// the call is from the Google Test framework itself or from user test +// code. GetTestTypeId() is guaranteed to always return the same +// value, as it always calls GetTypeId<>() from the Google Test +// framework. +#define GTEST_TEST(test_case_name, test_name)\ + GTEST_TEST_(test_case_name, test_name, \ + ::testing::Test, ::testing::internal::GetTestTypeId()) + +// Define this macro to 1 to omit the definition of TEST(), which +// is a generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_TEST +# define TEST(test_case_name, test_name) GTEST_TEST(test_case_name, test_name) +#endif + +// Defines a test that uses a test fixture. +// +// The first parameter is the name of the test fixture class, which +// also doubles as the test case name. The second parameter is the +// name of the test within the test case. +// +// A test fixture class must be declared earlier. The user should put +// his test code between braces after using this macro. Example: +// +// class FooTest : public testing::Test { +// protected: +// virtual void SetUp() { b_.AddElement(3); } +// +// Foo a_; +// Foo b_; +// }; +// +// TEST_F(FooTest, InitializesCorrectly) { +// EXPECT_TRUE(a_.StatusIsOK()); +// } +// +// TEST_F(FooTest, ReturnsElementCountCorrectly) { +// EXPECT_EQ(0, a_.size()); +// EXPECT_EQ(1, b_.size()); +// } + +#define TEST_F(test_fixture, test_name)\ + GTEST_TEST_(test_fixture, test_name, test_fixture, \ + ::testing::internal::GetTypeId()) + +// Use this macro in main() to run all tests. It returns 0 if all +// tests are successful, or 1 otherwise. +// +// RUN_ALL_TESTS() should be invoked after the command line has been +// parsed by InitGoogleTest(). + +#define RUN_ALL_TESTS()\ + (::testing::UnitTest::GetInstance()->Run()) + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_H_ diff --git a/src/gtest/gtest_main.cc b/src/gtest/gtest_main.cc new file mode 100755 index 0000000..a09bbe0 --- /dev/null +++ b/src/gtest/gtest_main.cc @@ -0,0 +1,39 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "gtest/gtest.h" + +GTEST_API_ int main(int argc, char **argv) { + std::cout << "Running main() from gtest_main.cc\n"; + + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/caffe.cpp b/tools/caffe.cpp new file mode 100755 index 0000000..9f31b37 --- /dev/null +++ b/tools/caffe.cpp @@ -0,0 +1,374 @@ +#ifdef WITH_PYTHON_LAYER +#include "boost/python.hpp" +namespace bp = boost::python; +#endif + +#include + +#include +#include +#include +#include + +#include "boost/algorithm/string.hpp" +#include "caffe/caffe.hpp" + +using caffe::Blob; +using caffe::Caffe; +using caffe::Net; +using caffe::Layer; +using caffe::Solver; +using caffe::shared_ptr; +using caffe::string; +using caffe::Timer; +using caffe::vector; +using std::ostringstream; + +DEFINE_string(gpu, "", + "Optional; run in GPU mode on given device IDs separated by ','." + "Use '-gpu all' to run on all available GPUs. The effective training " + "batch size is multiplied by the number of devices."); +DEFINE_string(solver, "", + "The solver definition protocol buffer text file."); +DEFINE_string(model, "", + "The model definition protocol buffer text file.."); +DEFINE_string(snapshot, "", + "Optional; the snapshot solver state to resume training."); +DEFINE_string(weights, "", + "Optional; the pretrained weights to initialize finetuning, " + "separated by ','. Cannot be set simultaneously with snapshot."); +DEFINE_int32(iterations, 50, + "The number of iterations to run."); + +// A simple registry for caffe commands. +typedef int (*BrewFunction)(); +typedef std::map BrewMap; +BrewMap g_brew_map; + +#define RegisterBrewFunction(func) \ +namespace { \ +class __Registerer_##func { \ + public: /* NOLINT */ \ + __Registerer_##func() { \ + g_brew_map[#func] = &func; \ + } \ +}; \ +__Registerer_##func g_registerer_##func; \ +} + +static BrewFunction GetBrewFunction(const caffe::string& name) { + if (g_brew_map.count(name)) { + return g_brew_map[name]; + } else { + LOG(ERROR) << "Available caffe actions:"; + for (BrewMap::iterator it = g_brew_map.begin(); + it != g_brew_map.end(); ++it) { + LOG(ERROR) << "\t" << it->first; + } + LOG(FATAL) << "Unknown action: " << name; + return NULL; // not reachable, just to suppress old compiler warnings. + } +} + +// Parse GPU ids or use all available devices +static void get_gpus(vector* gpus) { + if (FLAGS_gpu == "all") { + int count = 0; +#ifndef CPU_ONLY + CUDA_CHECK(cudaGetDeviceCount(&count)); +#else + NO_GPU; +#endif + for (int i = 0; i < count; ++i) { + gpus->push_back(i); + } + } else if (FLAGS_gpu.size()) { + vector strings; + boost::split(strings, FLAGS_gpu, boost::is_any_of(",")); + for (int i = 0; i < strings.size(); ++i) { + gpus->push_back(boost::lexical_cast(strings[i])); + } + } else { + CHECK_EQ(gpus->size(), 0); + } +} + +// caffe commands to call by +// caffe +// +// To add a command, define a function "int command()" and register it with +// RegisterBrewFunction(action); + +// Device Query: show diagnostic information for a GPU device. +int device_query() { + LOG(INFO) << "Querying GPUs " << FLAGS_gpu; + vector gpus; + get_gpus(&gpus); + for (int i = 0; i < gpus.size(); ++i) { + caffe::Caffe::SetDevice(gpus[i]); + caffe::Caffe::DeviceQuery(); + } + return 0; +} +RegisterBrewFunction(device_query); + +// Load the weights from the specified caffemodel(s) into the train and +// test nets. +void CopyLayers(caffe::Solver* solver, const std::string& model_list) { + std::vector model_names; + boost::split(model_names, model_list, boost::is_any_of(",") ); + for (int i = 0; i < model_names.size(); ++i) { + LOG(INFO) << "Finetuning from " << model_names[i]; + solver->net()->CopyTrainedLayersFrom(model_names[i]); + for (int j = 0; j < solver->test_nets().size(); ++j) { + solver->test_nets()[j]->CopyTrainedLayersFrom(model_names[i]); + } + } +} + +// Train / Finetune a model. +int train() { + CHECK_GT(FLAGS_solver.size(), 0) << "Need a solver definition to train."; + CHECK(!FLAGS_snapshot.size() || !FLAGS_weights.size()) + << "Give a snapshot to resume training or weights to finetune " + "but not both."; + + caffe::SolverParameter solver_param; + caffe::ReadProtoFromTextFileOrDie(FLAGS_solver, &solver_param); + + // If the gpus flag is not provided, allow the mode and device to be set + // in the solver prototxt. + if (FLAGS_gpu.size() == 0 + && solver_param.solver_mode() == caffe::SolverParameter_SolverMode_GPU) { + if (solver_param.has_device_id()) { + FLAGS_gpu = "" + + boost::lexical_cast(solver_param.device_id()); + } else { // Set default GPU if unspecified + FLAGS_gpu = "" + boost::lexical_cast(0); + } + } + + vector gpus; + get_gpus(&gpus); + if (gpus.size() == 0) { + Caffe::set_mode(Caffe::CPU); + } else { + ostringstream s; + for (int i = 0; i < gpus.size(); ++i) { + s << (i ? ", " : "") << gpus[i]; + } + LOG(INFO) << "Using GPUs " << s.str(); + + solver_param.set_device_id(gpus[0]); + Caffe::SetDevice(gpus[0]); + Caffe::set_mode(Caffe::GPU); + Caffe::set_solver_count(gpus.size()); + } + + shared_ptr > solver(caffe::GetSolver(solver_param)); + + if (FLAGS_snapshot.size()) { + LOG(INFO) << "Resuming from " << FLAGS_snapshot; + solver->Restore(FLAGS_snapshot.c_str()); + } else if (FLAGS_weights.size()) { + CopyLayers(solver.get(), FLAGS_weights); + } + + if (gpus.size() > 1) { + caffe::P2PSync sync(solver, NULL, solver->param()); + sync.run(gpus); + } else { + LOG(INFO) << "Starting Optimization"; + solver->Solve(); + } + LOG(INFO) << "Optimization Done."; + return 0; +} +RegisterBrewFunction(train); + + +// Test: score a model. +int test() { + CHECK_GT(FLAGS_model.size(), 0) << "Need a model definition to score."; + CHECK_GT(FLAGS_weights.size(), 0) << "Need model weights to score."; + + // Set device id and mode + vector gpus; + get_gpus(&gpus); + if (gpus.size() != 0) { + LOG(INFO) << "Use GPU with device ID " << gpus[0]; + Caffe::SetDevice(gpus[0]); + Caffe::set_mode(Caffe::GPU); + } else { + LOG(INFO) << "Use CPU."; + Caffe::set_mode(Caffe::CPU); + } + // Instantiate the caffe net. + Net caffe_net(FLAGS_model, caffe::TEST); + caffe_net.CopyTrainedLayersFrom(FLAGS_weights); + LOG(INFO) << "Running for " << FLAGS_iterations << " iterations."; + + vector* > bottom_vec; + vector test_score_output_id; + vector test_score; + float loss = 0; + for (int i = 0; i < FLAGS_iterations; ++i) { + float iter_loss; + const vector*>& result = + caffe_net.Forward(bottom_vec, &iter_loss); + loss += iter_loss; + int idx = 0; + for (int j = 0; j < result.size(); ++j) { + const float* result_vec = result[j]->cpu_data(); + for (int k = 0; k < result[j]->count(); ++k, ++idx) { + const float score = result_vec[k]; + if (i == 0) { + test_score.push_back(score); + test_score_output_id.push_back(j); + } else { + test_score[idx] += score; + } + const std::string& output_name = caffe_net.blob_names()[ + caffe_net.output_blob_indices()[j]]; + LOG(INFO) << "Batch " << i << ", " << output_name << " = " << score; + } + } + } + loss /= FLAGS_iterations; + LOG(INFO) << "Loss: " << loss; + for (int i = 0; i < test_score.size(); ++i) { + const std::string& output_name = caffe_net.blob_names()[ + caffe_net.output_blob_indices()[test_score_output_id[i]]]; + const float loss_weight = caffe_net.blob_loss_weights()[ + caffe_net.output_blob_indices()[test_score_output_id[i]]]; + std::ostringstream loss_msg_stream; + const float mean_score = test_score[i] / FLAGS_iterations; + if (loss_weight) { + loss_msg_stream << " (* " << loss_weight + << " = " << loss_weight * mean_score << " loss)"; + } + LOG(INFO) << output_name << " = " << mean_score << loss_msg_stream.str(); + } + + return 0; +} +RegisterBrewFunction(test); + + +// Time: benchmark the execution time of a model. +int time() { + CHECK_GT(FLAGS_model.size(), 0) << "Need a model definition to time."; + + // Set device id and mode + vector gpus; + get_gpus(&gpus); + if (gpus.size() != 0) { + LOG(INFO) << "Use GPU with device ID " << gpus[0]; + Caffe::SetDevice(gpus[0]); + Caffe::set_mode(Caffe::GPU); + } else { + LOG(INFO) << "Use CPU."; + Caffe::set_mode(Caffe::CPU); + } + // Instantiate the caffe net. + Net caffe_net(FLAGS_model, caffe::TRAIN); + + // Do a clean forward and backward pass, so that memory allocation are done + // and future iterations will be more stable. + LOG(INFO) << "Performing Forward"; + // Note that for the speed benchmark, we will assume that the network does + // not take any input blobs. + float initial_loss; + caffe_net.Forward(vector*>(), &initial_loss); + LOG(INFO) << "Initial loss: " << initial_loss; + LOG(INFO) << "Performing Backward"; + caffe_net.Backward(); + + const vector > >& layers = caffe_net.layers(); + const vector*> >& bottom_vecs = caffe_net.bottom_vecs(); + const vector*> >& top_vecs = caffe_net.top_vecs(); + const vector >& bottom_need_backward = + caffe_net.bottom_need_backward(); + LOG(INFO) << "*** Benchmark begins ***"; + LOG(INFO) << "Testing for " << FLAGS_iterations << " iterations."; + Timer total_timer; + total_timer.Start(); + Timer forward_timer; + Timer backward_timer; + Timer timer; + std::vector forward_time_per_layer(layers.size(), 0.0); + std::vector backward_time_per_layer(layers.size(), 0.0); + double forward_time = 0.0; + double backward_time = 0.0; + for (int j = 0; j < FLAGS_iterations; ++j) { + Timer iter_timer; + iter_timer.Start(); + forward_timer.Start(); + for (int i = 0; i < layers.size(); ++i) { + timer.Start(); + layers[i]->Forward(bottom_vecs[i], top_vecs[i]); + forward_time_per_layer[i] += timer.MicroSeconds(); + } + forward_time += forward_timer.MicroSeconds(); + backward_timer.Start(); + for (int i = layers.size() - 1; i >= 0; --i) { + timer.Start(); + layers[i]->Backward(top_vecs[i], bottom_need_backward[i], + bottom_vecs[i]); + backward_time_per_layer[i] += timer.MicroSeconds(); + } + backward_time += backward_timer.MicroSeconds(); + LOG(INFO) << "Iteration: " << j + 1 << " forward-backward time: " + << iter_timer.MilliSeconds() << " ms."; + } + LOG(INFO) << "Average time per layer: "; + for (int i = 0; i < layers.size(); ++i) { + const caffe::string& layername = layers[i]->layer_param().name(); + LOG(INFO) << std::setfill(' ') << std::setw(10) << layername << + "\tforward: " << forward_time_per_layer[i] / 1000 / + FLAGS_iterations << " ms."; + LOG(INFO) << std::setfill(' ') << std::setw(10) << layername << + "\tbackward: " << backward_time_per_layer[i] / 1000 / + FLAGS_iterations << " ms."; + } + total_timer.Stop(); + LOG(INFO) << "Average Forward pass: " << forward_time / 1000 / + FLAGS_iterations << " ms."; + LOG(INFO) << "Average Backward pass: " << backward_time / 1000 / + FLAGS_iterations << " ms."; + LOG(INFO) << "Average Forward-Backward: " << total_timer.MilliSeconds() / + FLAGS_iterations << " ms."; + LOG(INFO) << "Total Time: " << total_timer.MilliSeconds() << " ms."; + LOG(INFO) << "*** Benchmark ends ***"; + return 0; +} +RegisterBrewFunction(time); + +int main(int argc, char** argv) { + // Print output to stderr (while still logging). + FLAGS_alsologtostderr = 1; + // Usage message. + gflags::SetUsageMessage("command line brew\n" + "usage: caffe \n\n" + "commands:\n" + " train train or finetune a model\n" + " test score a model\n" + " device_query show GPU diagnostic information\n" + " time benchmark model execution time"); + // Run tool or show usage. + caffe::GlobalInit(&argc, &argv); + if (argc == 2) { +#ifdef WITH_PYTHON_LAYER + try { +#endif + return GetBrewFunction(caffe::string(argv[1]))(); +#ifdef WITH_PYTHON_LAYER + } catch (bp::error_already_set) { + PyErr_Print(); + return 1; + } +#endif + } else { + gflags::ShowUsageWithFlagsRestrict(argv[0], "tools/caffe"); + } +}