pax_global_header00006660000000000000000000000064147637255450014534gustar00rootroot0000000000000052 comment=4800cee5bacbe3fa58c02278d6d0be0e89cca8ac frugally-deep-0.17.1/000077500000000000000000000000001476372554500143625ustar00rootroot00000000000000frugally-deep-0.17.1/.clang-format000066400000000000000000000000311476372554500167270ustar00rootroot00000000000000--- BasedOnStyle: WebKit frugally-deep-0.17.1/.clang-format-ignore000066400000000000000000000000601476372554500202120ustar00rootroot00000000000000# Ignore generated files ./include_all_in_one/* frugally-deep-0.17.1/.github/000077500000000000000000000000001476372554500157225ustar00rootroot00000000000000frugally-deep-0.17.1/.github/FUNDING.yml000066400000000000000000000000221476372554500175310ustar00rootroot00000000000000github: [dobiasd] frugally-deep-0.17.1/.github/ISSUE_TEMPLATE.md000066400000000000000000000012311476372554500204240ustar00rootroot00000000000000- Before reporting an issue, please make sure you are using the latest versions of frugally-deep, and [the right TensorFlow version](README.md#requirements-and-installation). - Also check out the [FAQ](../FAQ.md). Maybe your problem/question is common and there already is a canonical answer for it. - If you have a model and you get unexpected errors when converting it for or when using it with frugally-deep, please upload and post a link to your model file (`.keras`). - Please try to make the problem reproducible by providing a [SSCCE](http://sscce.org/) as code. - Feel free to also submit any other feedback, suggestions or questions you might have. :) frugally-deep-0.17.1/.github/workflows/000077500000000000000000000000001476372554500177575ustar00rootroot00000000000000frugally-deep-0.17.1/.github/workflows/ci.yml000066400000000000000000000044761476372554500211100ustar00rootroot00000000000000name: ci on: [workflow_dispatch, push, pull_request] jobs: build_gcc: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@main - name: Setup run: | # system libs sudo apt-get install libblas-dev liblapack-dev libatlas-base-dev gfortran # python libs sudo pip3 install --upgrade pip sudo pip3 install numpy scipy h5py "tensorflow==2.18.0" "keras==3.8.0" echo "Python version:" python3 --version echo "Version numbers of TensorFlow and Keras:" python3 -c "import tensorflow as tf; import tensorflow; import keras; print(tf.__version__); print(keras.__version__)" # FunctionalPlus git clone -b 'v0.2.24' --single-branch --depth 1 https://github.com/Dobiasd/FunctionalPlus cd FunctionalPlus mkdir -p build && cd build cmake .. make && sudo make install cd ../.. # Eigen git clone -b '3.4.0' --single-branch --depth 1 https://gitlab.com/libeigen/eigen.git cd eigen mkdir -p build && cd build cmake .. make && sudo make install sudo ln -s /usr/local/include/eigen3/Eigen /usr/local/include/Eigen cd ../.. # json git clone -b 'v3.11.3' --single-branch --depth 1 https://github.com/nlohmann/json cd json mkdir -p build && cd build cmake -DJSON_BuildTests=OFF .. make && sudo make install cd ../.. # Doctest git clone -b 'v2.4.11' --single-branch --depth 1 https://github.com/onqtam/doctest.git cd doctest mkdir -p build && cd build cmake .. -DDOCTEST_WITH_TESTS=OFF -DDOCTEST_WITH_MAIN_IN_STATIC_LIB=OFF make && sudo make install cd ../.. - name: Build run: | # run unit tests mkdir -p build && cd build which g++ g++ --version cmake .. -DFDEEP_BUILD_UNITTEST=ON cmake --build . --target unittest --config Release -- cd .. formatting-check: name: "formatting" runs-on: ubuntu-latest steps: - uses: actions/checkout@main - uses: DoozyX/clang-format-lint-action@master name: "Verify formatting" with: clangFormatVersion: 16 frugally-deep-0.17.1/.gitignore000066400000000000000000000001201476372554500163430ustar00rootroot00000000000000/.vs .vscode *build* *~ _* .mypy_cache /experiments .idea CMakeUserPresets.json frugally-deep-0.17.1/CITATION.cff000066400000000000000000000003141476372554500162520ustar00rootroot00000000000000cff-version: 1.2.0 title: "frugally-deep" url: "https://github.com/Dobiasd/frugally-deep" authors: - family-names: "Hermann" given-names: "Tobias" orcid: "https://orcid.org/0009-0007-4792-4904" frugally-deep-0.17.1/CMakeLists.txt000066400000000000000000000032351476372554500171250ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.2) set(FDEEP_TOP_DIR ${CMAKE_CURRENT_LIST_DIR}) list(APPEND CMAKE_MODULE_PATH "${FDEEP_TOP_DIR}/cmake") include(cmake/hunter.cmake) # default off project(frugally-deep VERSION 0.17.1) message(STATUS "===( ${PROJECT_NAME} ${PROJECT_VERSION} )===") option(FDEEP_BUILD_UNITTEST "Build unit tests" OFF) option(FDEEP_USE_TOOLCHAIN "Use external toolchain" OFF) option(FDEEP_USE_DOUBLE "Use double precision" OFF) if(NOT FDEEP_USE_TOOLCHAIN) include(cmake/toolchain.cmake) endif() add_library(fdeep INTERFACE) add_library(${PROJECT_NAME}::fdeep ALIAS fdeep) target_include_directories(fdeep INTERFACE $ ) if(FDEEP_USE_DOUBLE) target_compile_definitions(fdeep INTERFACE FDEEP_FLOAT_TYPE=double) endif() find_package(Threads REQUIRED) target_link_libraries(fdeep INTERFACE Threads::Threads) hunter_add_package(FunctionalPlus) # default noop if(NOT TARGET FunctionalPlus::fplus) # if not used via subdirectory find_package(FunctionalPlus CONFIG REQUIRED) endif() target_link_libraries(fdeep INTERFACE FunctionalPlus::fplus) hunter_add_package(Eigen) # default noop if(NOT TARGET Eigen3::Eigen) # if not used via subdirectory find_package(Eigen3 CONFIG REQUIRED) endif() target_link_libraries(fdeep INTERFACE Eigen3::Eigen) hunter_add_package(nlohmann_json) # default noop if(NOT TARGET nlohmann_json) # if not used via subdirectory find_package(nlohmann_json CONFIG REQUIRED) endif() target_link_libraries(fdeep INTERFACE nlohmann_json) if(FDEEP_BUILD_UNITTEST) enable_testing() add_subdirectory(test) endif() # pkgconfig installation: include(cmake/pkgconfig.cmake) frugally-deep-0.17.1/FAQ.md000066400000000000000000000510521476372554500153160ustar00rootroot00000000000000frugally-deep FAQ ================= Why is my prediction roughly 100 times slower in C++ than in Python? ------------------------------------------------------------------ Maybe you did not tell your C++ compiler to optimize for speed. For g++ and clang this can be done with `-O3` (and `-march=native`). In the case of Microsoft Visual C++, you need to compile your project not in "Debug" mode but in "Release" mode, and then run it without the debugger attached. Why is my prediction roughly 10 times slower in C++ than in Python? ----------------------------------------------------------------- Maybe you are using your GPU in TensorFlow? Frugally-deep does not support GPUs. If you'd like to [compare the performance](test/Dockerfile) of both libraries, disable the GPU for TensorFlow (`CUDA_VISIBLE_DEVICES=''`). Why is my prediction roughly 4 times slower in C++ than in Python? ---------------------------------------------------------------- TensorFlow uses multiple CPU cores, even for one prediction, if available. Frugally-deep does not do that. If you'd like to [compare the performance](test/Dockerfile) of both libraries, allow only one CPU core to be used for TensorFlow (`taskset --cpu-list 1`). If you want more overall throughput, you can parallelize more on the "outside". See ["Does frugally-deep support multiple CPUs?"](#does-frugally-deep-support-multiple-cpus) for details. Why is my prediction roughly 2 times slower in C++ than in Python? ---------------------------------------------------------------- With single 2D convolutions, frugally-deep is quite fast, depending on the dimensions even faster than TensorFlow. But on some models, TensorFlow applies some fancy runtime optimizations, like kernel fusion, etc. Frugally-deep does not support such things, so on some model types, you might experience an insurmountable performance difference. Why is my application using more memory than expected? ------------------------------------------------------ In case you're using glibc, which is the default libc on most major distributions like Ubuntu, Debian, Arch, etc., memory temporarily allocated during `fdeep::load_model` [might not be freed completely](https://github.com/nlohmann/json#memory-release). To make sure it's given back to the operating system, use [`malloc_trim(0);`](https://manned.org/malloc_trim.3) after calling `fdeep::load_model`. Why do I get an error when loading my `.json` file in C++? ------------------------------------------------------------ Most likely it's one of the following two reasons: - The TensorFlow version used is not the one listed in the [requirements](README.md#requirements-and-installation). - The conversion from `.keras` to `.json` (using `convert_model.py`) was not done with the same version as used when loading the model in C++. In case you've made sure none of the above is the cause, please open [an issue](https://github.com/Dobiasd/frugally-deep/issues) with a minimal example to reproduce the problem. Why does `fdeep::model::predict` take and return multiple `fdeep::tensor`s and not just one tensor? ---------------------------------------------------------------------------------------------------- Only Keras models created with the [sequential API](https://keras.io/getting-started/sequential-model-guide/) must have only one input and output tensor. Models made with the [functional API](https://keras.io/getting-started/functional-api-guide/) can have multiple inputs and outputs. This `fdeep::model::predict` takes (and returns) not one `fdeep::tensor` but an `std::vector` of them (`fdeep::tensors`). Example: ```python from keras.models import Model from keras.layers import Input, Concatenate, Add inputs = [ Input(shape=(240, 320, 3)), Input(shape=(240, 320, 3)) ] outputs = [ Concatenate()([inputs[0], inputs[1]]), Add()([inputs[0], inputs[1]]) ] model = Model(inputs=inputs, outputs=outputs) model.compile(loss='mse', optimizer='nadam') model.save('multi_input_and_output_model.keras') ``` Now in C++, we would then also provide (and receive) two tensors: ```cpp #include int main() { const auto model = fdeep::load_model("multi_input_and_output_model.json"); const auto result = model.predict({ fdeep::tensor(fdeep::tensor_shape(240, 320, 3), 42), fdeep::tensor(fdeep::tensor_shape(240, 320, 3), 43) }); std::cout << fdeep::show_tensors(result) << std::endl; } ``` Keep in mind, giving multiple `fdeep::tensor`s to `fdeep::model::predict` has nothing to do with batch processing because it is not supported. However you can run multiple single predictions in parallel (see the question "Does frugally-deep support multiple CPUs?"), if you want to do that. Does frugally-deep support multiple CPUs? ----------------------------------------- Parallel processing for one single prediction is not supported. However if you have multiple predictions to make, you can make use of the fact that a frugally-deep model is thread-safe, i.e., you can call `model.predict` on the same model instance from different threads simultaneously. This way you may utilize up to as many CPU cores as you have predictions to make. In addition, with `model::predict_multi` there is a convenience function available to handle the parallelism for you. This however is not equivalent to batch processing in Keras, since each forward pass will still be made in isolation. How to do regression vs. classification? ---------------------------------------- `fdeep::model::predict` is the generic prediction. In case you are doing classification, your model might have a softmax as the last layer. Then you will get one tensor with a probability for each possible class. `fdeep::model::predict_class` is a convenience wrapper that will run the forward pass and return the predicted class number, so you don't need to manually find the position in the output tensor with the highest activation. In case you are doing regression resulting in one single value, you can use `fdeep::model::predict_single_output`, which will only return one single floating-point value instead of `tensor`s. Which data format is used internally? ------------------------------------- frugally-deep uses `channels_last` (`height, width, depth/channels`) as its internal `image_data_format`, as does TensorFlow. Everything is handled as a float tensor with rank 5. In case of color images, the first two dimensions of the tensor will have size `1`. Why does my model return different values with frugally-deep compared to Keras? ------------------------------------------------------------------------------- The fact that `fdeep::load_model` (with default settings) did not fail, already proves that your model works the same with frugally-deep as it does with Keras, because when using `convert_model.py` a test case (input and corresponding output values) is generated automatically and saved along with your model. `fdeep::load_model` runs this test to make sure the results of a forward pass in frugally-deep are the same as in Keras. If not, an exception is thrown. So why do you get different values nonetheless when running `fdeep::model::predict`? Probably you are not feeding the exact same values into the model as you do in Python. Especially in the case of images as input, this can be caused by: * different normalization method of the pixel values * different ways to scale (e.g., interpolation mode) the image before using it To check if the input values really are the same, you can print them, in Python and in C++: ```python input = ... print(input) print(input.shape) result = model.predict([input]) print(result) print(result.shape) # result[0].shape in case of multiple output tensors ``` ```cpp const fdeep::tensor input = ... std::cout << fdeep::show_tensor(input) << std::endl; std::cout << fdeep::show_tensor_shape(input.shape()) << std::endl; const auto result = model.predict({input}); std::cout << fdeep::show_tensor_shape(result.front().shape()) << std::endl; std::cout << fdeep::show_tensors(result) << std::endl; ``` And then check if they actually are identical. In case you are creating your `fdeep::tensor input` using `fdeep::tensor_from_bytes`, this way you will also implicitly check if you are using the correct values for `high` and `low` in the call to it. What to do when loading my model with frugally-deep throws an `std::runtime_error` with `test failed`? ------------------------------------------------------------------------------------------------------ Frugally-deep makes sure your model works exactly the same in C++ as it does in Python by running a test when loading. You can soften these tests by increasing `verify_epsilon` in the call to `fdeep::load_model`, or even disable them completely by setting `verify` to `false`. Also, you might want to try to use `double` instead of `float` for more precision, which you can do by inserting: ```cpp #define FDEEP_FLOAT_TYPE double ``` before your first include of `fdeep.hpp`: ```cpp #include ``` Doing so, however, will increase the memory usage of your application and might slow it down a bit. How to silence the logging output of `fdeep::model::load`? ---------------------------------------------------------- You can use `fdeep::dev_null_logger` for this: ```cpp const auto model = fdeep::load_model("model.json", true, fdeep::dev_null_logger); ``` Why does `fdeep::model` not have a default constructor? ------------------------------------------------------- Because an empty model does not make much sense. And instead of letting it by convention just forward the input or raise an exception when `.predict` is invoked, it can only be constructed by `fdeep::load_model` / `fdeep::read_model`. This way it is guaranteed you always have a valid model. In case you would like to, for example, use `fdeep::model` as a member variable of a custom class, and you want to initialize it not directly during construction of your objects, you can express this kind of optionality by using `std::unique_ptr` or `fplus::maybe`. How to use images loaded with [CImg](http://cimg.eu/) as input for a model? --------------------------------------------------------------------------- The following example code shows how to: * load an image using CImg * convert it to a `fdeep::tensor` * use it as input for a forward pass on an image-classification model * print the class number ```cpp #include #include fdeep::tensor cimg_to_tensor(const cimg_library::CImg& image, fdeep::float_type low = 0.0f, fdeep::float_type high = 1.0f) { const int width = image.width(); const int height = image.height(); const int channels = image.spectrum(); std::vector pixels; pixels.reserve(height * width * channels); // CImg stores the pixels of an image non-interleaved: // http://cimg.eu/reference/group__cimg__storage.html // This loop changes the order to interleaved, // e.e. RRRGGGBBB to RGBRGBRGB for 3-channel images. for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { for (int c = 0; c < channels; c++) { pixels.push_back(image(x, y, 0, c)); } } } return fdeep::tensor_from_bytes(pixels.data(), height, width, channels, low, high); } int main() { const cimg_library::CImg image("image.jpg"); const auto model = fdeep::load_model("model.json"); // Use the correct scaling, i.e., low and high. const auto input = cimg_to_tensor(image, 0.0f, 1.0f); const auto result = model.predict_class({input}); std::cout << result << std::endl; } ``` How to use images loaded with [OpenCV](https://opencv.org/) as input for a model? --------------------------------------------------------------------------------- The following example code shows how to: * load an image using OpenCV * convert it to a `fdeep::tensor` * use it as input for a forward pass on an image-classification model * print the class number ```cpp #include #include int main() { const cv::Mat image = cv::imread("image.jpg"); cv::cvtColor(image, image, cv::COLOR_BGR2RGB); assert(image.isContinuous()); const auto model = fdeep::load_model("model.json"); // Use the correct scaling, i.e., low and high. const auto input = fdeep::tensor_from_bytes(image.ptr(), static_cast(image.rows), static_cast(image.cols), static_cast(image.channels()), 0.0f, 1.0f); const auto result = model.predict_class({input}); std::cout << result << std::endl; } ``` How to convert an `fdeep::tensor` to an (OpenCV) image and back? ---------------------------------------------------------------- Example code for how to: * Convert an OpenCV image to an `fdeep::tensor` * Convert an `fdeep::tensor` to an OpenCV image ```cpp #include #include int main() { const cv::Mat image1 = cv::imread("image.jpg"); // convert cv::Mat to fdeep::tensor (image1 to tensor) const fdeep::tensor tensor = fdeep::tensor_from_bytes(image1.ptr(), image1.rows, image1.cols, image1.channels()); // choose the correct pixel type for cv::Mat (gray or RGB/BGR) assert(tensor.shape().depth_ == 1 || tensor.shape().depth_ == 3); const int mat_type = tensor.shape().depth_ == 1 ? CV_8UC1 : CV_8UC3; const int mat_type_float = tensor.shape().depth_ == 1 ? CV_32FC1 : CV_32FC3; // convert fdeep::tensor to byte cv::Mat (tensor to image2) const cv::Mat image2( cv::Size(tensor.shape().width_, tensor.shape().height_), mat_type); fdeep::tensor_into_bytes(tensor, image2.data, image2.rows * image2.cols * image2.channels()); // convert fdeep::tensor to float cv::Mat (tensor to image3) const cv::Mat image3( cv::Size(tensor.shape().width_, tensor.shape().height_), mat_type_float); const auto values = tensor.to_vector(); std::memcpy(image3.data, values.data(), values.size() * sizeof(float)); // normalize float cv::Mat into float cv::Mat (image3 to image4) cv::Mat image4; cv::normalize(image3, image4, 1.0, 0.0, cv::NORM_MINMAX); // normalize float cv::Mat into byte cv::Mat (image3 to image5) cv::Mat tempImage5; cv::Mat image5; cv::normalize(image3, tempImage5, 255.0, 0.0, cv::NORM_MINMAX); tempImage5.convertTo(image5, mat_type); // show images for visual verification cv::imshow("image1", image1); cv::imshow("image2", image2); cv::imshow("image3", image3); cv::imshow("image4", image4); cv::imshow("image5", image5); cv::waitKey(); } ``` How to convert an `Eigen::Matrix` to `fdeep::tensor`? ------------------------------------------------------ You can copy the values from `Eigen::Matrix` to `fdeep::tensor`: ```cpp #include #include #include int main() { // dimensions of the eigen matrix const int rows = 640; const int cols = 480; // matrix having its own memory Eigen::MatrixXf mat(rows, cols); // populate mapped_matrix some way mat(0, 0) = 4.0f; mat(1, 1) = 5.0f; mat(4, 2) = 6.0f; // create fdeep::tensor with its own memory const int tensor_channels = 1; const int tensor_rows = rows; const int tensor_cols = cols; fdeep::tensor_shape tensor_shape(tensor_rows, tensor_cols, tensor_channels); fdeep::tensor t(tensor_shape, 0.0f); // copy the values into tensor for (int y = 0; y < tensor_rows; ++y) { for (int x = 0; x < tensor_cols; ++x) { for (int c = 0; c < tensor_channels; ++c) { t.set(fdeep::tensor_pos(y, x, c), mat(y, x)); } } } // print some values to make sure the mapping is correct std::cout << t.get(fdeep::tensor_pos(0, 0, 0)) << std::endl; std::cout << t.get(fdeep::tensor_pos(1, 1, 0)) << std::endl; std::cout << t.get(fdeep::tensor_pos(4, 2, 0)) << std::endl; } ``` How to fill an `fdeep::tensor` with values, e.g., from an `std::vector`? -------------------------------------------------------------------------------- Of course one can use `fdeep::tensor` as the primary data structure and fill it with values like so: ```cpp #include int main() { fdeep::tensor t(fdeep::tensor_shape(3, 1, 1), 0); t.set(fdeep::tensor_pos(0, 0, 0), 1); t.set(fdeep::tensor_pos(1, 0, 0), 2); t.set(fdeep::tensor_pos(2, 0, 0), 3); } ``` In case one already has an `std::vector` with values, one can just construct a `fdeep::tensor` from it, holding a copy of the values: ```cpp #include int main() { const std::vector v = {1, 2, 3}; const fdeep::tensor t(fdeep::tensor_shape(3, 1, 1), v); } ``` How to convert an `fdeep::tensor` to an `std::vector`? -------------------------------------------------------------- ```cpp #include int main() { const fdeep::tensor tensor( fdeep::tensor_shape(static_cast(4)), std::vector{1, 2, 3, 4}); const std::vector vec = tensor.to_vector(); } ``` How can I use `BatchNormalization` and `Dropout` layers with `training=True`? ----------------------------------------------------------------------------- Frugally-deep does not support `training=True` on the inbound nodes. But if you'd like to remove this flag from the layers in your model, you can use the following function to do so before using `convert_model.py`: ```python3 def remove_training_flags(old_model_path, new_model_path): def do_remove(model): layers = model.layers for layer in layers: for node in layer.inbound_nodes: if "training" in node.call_kwargs and node.call_kwargs["training"] is True: print(f"Removing training=True from inbound node to layer named {layer.name}.") del node.call_kwargs["training"] layer_type = type(layer).__name__ if layer_type in ['Model', 'Sequential', 'Functional']: do_remove(layer) return model do_remove(load_model(old_model_path)).save(new_model_path) ``` Why are `Lambda` layers not supported? ----------------------------------------------- `Lambda` layers in Keras involve custom Python code to be executed. Supporting this in frugally-deep would require having a transpiler from Python to C++, aware of the semantic differences between the data structures too. Since this is not feasible, `Lambda` layers are not supported in frugally-deep. In case you don't find a way to get rid of the `Lambda` layer in your Keras model, feel free to dive into the rabbit hole of [injecting support for your custom layers to frugally-deep](FAQ.md#how-to-use-custom-layers). How to use custom layers? ------------------------- `fdeep::load_model` has a `custom_layer_creators` parameter, which is of the following type: ```cpp const std::unordered_map< std::string, std::function>& ``` It is a dictionary, mapping layer names to custom factory functions. As an example for such a factory function for a simple layer type, please have a look at the definition of `fdeep::internal::create_add_layer`. So, you provide your own factory function, returning an `fdeep::internal::layer_ptr` (`std::shared_ptr`). For the actual implementation of your layer, you need to create a new class, inheriting from `fdeep::internal::layer`. As an example, please have a look at the definition of the `add_layer` class. In summary, the work needed to inject support for a custom layer from userland, i.e., without modifying the actual library, looks as follows: - Create a new layer class, inheriting from `fdeep::layer`, like [so](https://github.com/Dobiasd/frugally-deep/blob/e3e1a6a2e011ef6255d6589a5ec0981c9d0ef1f9/include/fdeep/layers/add_layer.hpp#L16). - Create a new creator function for your layer type, like [so](https://github.com/Dobiasd/frugally-deep/blob/e3e1a6a2e011ef6255d6589a5ec0981c9d0ef1f9/include/fdeep/import_model.hpp#L594) - Pass a `custom_layer_creators` to `fdeep::load_model`, which maps layer names to your custom creators. In case your layer is trainable, i.e., you have some weights attached to it, that also need to be exported from the Python side of things, have a look into `convert_model.py` for how to extend the resulting model `.json` file with the parameters you need. Remark: This feature in general is still experimental and might be subject to change in the future, as the usage of namespace `fdeep::internal` indicates. frugally-deep-0.17.1/INSTALL.md000066400000000000000000000111261476372554500160130ustar00rootroot00000000000000frugally-deep ============= Installation ------------ You can install frugally-deep using cmake as shown below, or (if you prefer) download the [code](https://github.com/Dobiasd/frugally-deep/archive/master.zip) (and the [code](https://github.com/Dobiasd/FunctionalPlus/archive/master.zip) of [FunctionalPlus](https://github.com/Dobiasd/FunctionalPlus)), extract it and tell your compiler to use the `include` directories. ``` git clone -b 'v0.2.24' --single-branch --depth 1 https://github.com/Dobiasd/FunctionalPlus cd FunctionalPlus mkdir -p build && cd build cmake .. make && sudo make install cd ../.. git clone -b '3.4.0' --single-branch --depth 1 https://gitlab.com/libeigen/eigen.git cd eigen mkdir -p build && cd build cmake .. make && sudo make install sudo ln -s /usr/local/include/eigen3/Eigen /usr/local/include/Eigen cd ../.. git clone -b 'v3.11.3' --single-branch --depth 1 https://github.com/nlohmann/json cd json mkdir -p build && cd build cmake -DJSON_BuildTests=OFF .. make && sudo make install cd ../.. git clone https://github.com/Dobiasd/frugally-deep cd frugally-deep mkdir -p build && cd build cmake .. make && sudo make install cd ../.. ``` Building the tests (optional) requires [doctest](https://github.com/onqtam/doctest). Unit Tests are disabled by default – they are enabled and executed by: ``` # install doctest git clone -b 'v2.4.11' --single-branch --depth 1 https://github.com/onqtam/doctest.git cd doctest mkdir -p build && cd build cmake .. -DDOCTEST_WITH_TESTS=OFF -DDOCTEST_WITH_MAIN_IN_STATIC_LIB=OFF make && sudo make install cd ../.. # build unit tests cd frugally-deep mkdir -p build && cd build cmake -DFDEEP_BUILD_UNITTEST=ON .. make unittest cd ../.. ``` The Unit Tests require Python3, please make sure Python3 has been installed correctly. ### Installation using [Conan C/C++ package manager](https://conan.io) Just add a *conanfile.txt* with frugally-deep as a requirement and chose the generator for your project. ``` [requires] frugally-deep/v0.17.1@dobiasd/stable [generators] cmake ``` Then install it: ``` $ conan install conanfile.txt ``` ### Installation using the [Hunter CMake package manager](https://github.com/ruslo/hunter) The [First Step](https://docs.hunter.sh/en/latest/quick-start/boost-components.html#first-step) section of the [Hunter documentation](https://docs.hunter.sh/en/latest/index.html) shows how to get started. Since the version of the package on hunter is out of date, the procedure below covers installation using a locally hosted version of the repo (through submodules). A sample project using this to run VGG16 is available at https://github.com/kmader/fd_demo The basic idea is to use the standard hunter setup but to add a git submodule to your repository containing frugally-deep. Hunter will then use the code in that submodule to build the library (https://docs.hunter.sh/en/latest/user-guides/hunter-user/git-submodule.html?highlight=GIT_SUBMODULE). Your CMakeLists.txt should look something like ```cmake cmake_minimum_required(VERSION 3.0) # minimum requirement for Hunter include("cmake/HunterGate.cmake") # teach your project about Hunter (before project()) HunterGate( # Latest release shown here: https://github.com/ruslo/hunter/releases URL "https://github.com/ruslo/hunter/archive/v0.20.17.tar.gz" SHA1 "d7d1d5446bbf20b78fa5ac1b52ecb67a01c3790e" LOCAL # <----- load cmake/Hunter/config.cmake ) project(sample-frugally-deep) hunter_add_package(frugally-deep) find_package(frugally-deep CONFIG REQUIRED) add_executable(foo foo.cpp) target_link_libraries(foo PUBLIC frugally-deep::fdeep) # add frugally-deep and dependencies (libs/includes/flags/definitions) ``` You will then need to create a `cmake/` directory with the HunterGate script in it ```bash mkdir -p cmake wget https://raw.githubusercontent.com/hunter-packages/gate/master/cmake/HunterGate.cmake -O cmake/HunterGate.cmake ``` Finally you will need a `Hunter/config.cmake` to link to the submodule ```bash mkdir -p cmake/Hunter echo 'hunter_config(frugally-deep GIT_SUBMODULE "lib/frugally-deep")' > cmake/Hunter/config.cmake ``` ### Installation using [vcpkg](https://github.com/microsoft/vcpkg) See [Getting Started](https://github.com/microsoft/vcpkg#getting-started) to get vcpkg up and running. The only step after the installation of vcpkg is to install frugally-deep with ```bash vcpkg install frugally-deep ``` If you need double precision, install frugally-deep with ```bash vcpkg install frugally-deep[double] ``` Then add the following lines to your CMakeFiles.txt: ```cmake find_package(frugally-deep CONFIG REQUIRED) target_link_libraries(main PRIVATE frugally-deep::fdeep) ``` frugally-deep-0.17.1/LICENSE000066400000000000000000000020711476372554500153670ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2016 Tobias Hermann Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. frugally-deep-0.17.1/README.md000066400000000000000000000151231476372554500156430ustar00rootroot00000000000000![logo](logo/fdeep.png) [![CI](https://github.com/Dobiasd/frugally-deep/workflows/ci/badge.svg)](https://github.com/Dobiasd/frugally-deep/actions) [![(License MIT 1.0)](https://img.shields.io/badge/license-MIT%201.0-blue.svg)][license] [license]: LICENSE frugally-deep ============= **Use Keras models in C++ with ease** Table of contents ----------------- * [Introduction](#introduction) * [Usage](#usage) * [Requirements and Installation](#requirements-and-installation) * [FAQ](#faq) Introduction ------------ Would you like to build/train a model using Keras/Python? And would you like to run the prediction (forward pass) on your model in C++ without linking your application against TensorFlow? Then frugally-deep is exactly for you. **frugally-deep** * **is a small header-only library** written in modern and pure C++. * is very easy to integrate and use. * depends only on [FunctionalPlus](https://github.com/Dobiasd/FunctionalPlus), [Eigen](http://eigen.tuxfamily.org/) and [json](https://github.com/nlohmann/json) - also header-only libraries. * supports inference (`model.predict`) not only for [sequential models](https://keras.io/getting-started/sequential-model-guide/) but also for computational graphs with a more complex topology, created with the [functional API](https://keras.io/getting-started/functional-api-guide/). * re-implements a (small) subset of TensorFlow, i.e., the operations needed to support prediction. * results in a much smaller binary size than linking against TensorFlow. * works out-of-the-box also when compiled into a 32-bit executable. (Of course, 64 bit is fine too.) * avoids temporarily allocating (potentially large chunks of) additional RAM during convolutions (by not materializing the im2col input matrix). * utterly ignores even the most powerful GPU in your system and uses only one CPU core per prediction. ;-) * but is quite fast on one CPU core, and you can run multiple predictions in parallel, thus utilizing as many CPUs as you like to improve the overall prediction throughput of your application/pipeline. ### Supported layer types * `Add`, `Concatenate`, `Subtract`, `Multiply`, `Average`, `Maximum`, `Minimum`, `Dot` * `AveragePooling1D/2D/3D`, `GlobalAveragePooling1D/2D/3D` * `TimeDistributed` * `Conv1D/2D`, `SeparableConv2D`, `DepthwiseConv2D` * `Conv1DTranspose`, `Conv2DTranspose` * `Cropping1D/2D/3D`, `ZeroPadding1D/2D/3D`, `CenterCrop` * `BatchNormalization`, `Dense`, `Flatten`, `Normalization` * `Dropout`, `AlphaDropout`, `GaussianDropout`, `GaussianNoise` * `SpatialDropout1D`, `SpatialDropout2D`, `SpatialDropout3D` * `ActivityRegularization`, `LayerNormalization`, `UnitNormalization` * `RandomContrast`, `RandomFlip`, `RandomHeight` * `RandomRotation`, `RandomTranslation`, `RandomWidth`, `RandomZoom` * `MaxPooling1D/2D/3D`, `GlobalMaxPooling1D/2D/3D` * `ELU`, `LeakyReLU`, `ReLU`, `SeLU`, `PReLU` * `Sigmoid`, `Softmax`, `Softplus`, `Tanh` * `Exponential`, `GELU`, `Softsign`, `Rescaling` * `UpSampling1D/2D`, `Resizing` * `Reshape`, `Permute`, `RepeatVector` * `Embedding`, `CategoryEncoding` * `Attention`, `AdditiveAttention`, `MultiHeadAttention` ### Also supported * multiple inputs and outputs * nested models * residual connections * shared layers * variable input shapes * arbitrary complex model architectures / computational graphs * custom layers (by passing custom factory functions to `load_model`) ### Currently not supported are the following: `Lambda` ([why](FAQ.md#why-are-lambda-layers-not-supported)), `Conv3D`, `ConvLSTM1D`, `ConvLSTM2D`, `Discretization`, `GRUCell`, `Hashing`, `IntegerLookup`, `LocallyConnected1D`, `LocallyConnected2D`, `LSTMCell`, `Masking`, `RepeatVector`, `RNN`, `SimpleRNN`, `SimpleRNNCell`, `StackedRNNCells`, `StringLookup`, `TextVectorization`, `Bidirectional`, `GRU`, `LSTM`, `CuDNNGRU`, `CuDNNLSTM`, `ThresholdedReLU`, `Upsampling3D`, `temporal` models Usage ----- 1) Use Keras/Python to build (`model.compile(...)`), train (`model.fit(...)`) and test (`model.evaluate(...)`) your model as usual. Then save it to a single file using `model.save('....keras')`. The `image_data_format` in your model must be `channels_last`, which is the default when using the TensorFlow backend. Models created with a different `image_data_format` and other backends are not supported. 2) Now convert it to the frugally-deep file format with `keras_export/convert_model.py` 3) Finally load it in C++ (`fdeep::load_model(...)`) and use `model.predict(...)` to invoke a forward pass with your data. The following minimal example shows the full workflow: ```python # create_model.py import numpy as np from keras.layers import Input, Dense from keras.models import Model inputs = Input(shape=(4,)) x = Dense(5, activation='relu')(inputs) predictions = Dense(3, activation='softmax')(x) model = Model(inputs=inputs, outputs=predictions) model.compile(loss='categorical_crossentropy', optimizer='nadam') model.fit( np.asarray([[1, 2, 3, 4], [2, 3, 4, 5]]), np.asarray([[1, 0, 0], [0, 0, 1]]), epochs=10) model.save('keras_model.keras') ``` ```bash python3 keras_export/convert_model.py keras_model.keras fdeep_model.json ``` ```cpp // main.cpp #include int main() { const auto model = fdeep::load_model("fdeep_model.json"); const auto result = model.predict( {fdeep::tensor(fdeep::tensor_shape(static_cast(4)), std::vector{1, 2, 3, 4})}); std::cout << fdeep::show_tensors(result) << std::endl; } ``` When using `convert_model.py` a test case (input and corresponding output values) is generated automatically and saved along with your model. `fdeep::load_model` runs this test to make sure the results of a forward pass in frugally-deep are the same as in Keras. For more integration examples please have a look at the [FAQ](FAQ.md). Requirements and Installation ----------------------------- - A **C++14**-compatible compiler: Compilers from these versions on are fine: GCC 4.9, Clang 3.7 (libc++ 3.7) and Visual C++ 2015 - Python 3.9 or higher - TensorFlow 2.18.0 - Keras 3.8.0 (These are the tested versions, but somewhat older ones might work too.) Guides for different ways to install frugally-deep can be found in [`INSTALL.md`](INSTALL.md). FAQ --- See [`FAQ.md`](FAQ.md) Disclaimer ---------- The API of this library still might change in the future. If you have any suggestions, find errors, or want to give general feedback/criticism, I'd [love to hear from you](issues). Of course, [contributions](pulls) are also very welcome. License ------- Distributed under the MIT License. (See accompanying file [`LICENSE`](LICENSE) or at [https://opensource.org/licenses/MIT](https://opensource.org/licenses/MIT)) frugally-deep-0.17.1/cmake/000077500000000000000000000000001476372554500154425ustar00rootroot00000000000000frugally-deep-0.17.1/cmake/Config.cmake.in000066400000000000000000000004341476372554500202570ustar00rootroot00000000000000@PACKAGE_INIT@ find_package(Eigen3 CONFIG REQUIRED) find_package(FunctionalPlus CONFIG REQUIRED) find_package(Threads REQUIRED) find_package(nlohmann_json CONFIG REQUIRED) include("${CMAKE_CURRENT_LIST_DIR}/@TARGETS_EXPORT_NAME@.cmake") check_required_components("@PROJECT_NAME@") frugally-deep-0.17.1/cmake/Finddoctest.cmake000066400000000000000000000011521476372554500207110ustar00rootroot00000000000000find_package(PkgConfig) pkg_check_modules(PKG_doctest QUIET doctest) set(doctest_DEFINITIONS ${PKG_doctest_CFLAGS_OTHER}) find_path(doctest_INCLUDE_DIR "doctest.h" HINTS ${PKG_doctest_INCLUDE_DIRS} "${doctest_DIR}/include" ) set(doctest_INCLUDE_DIRS ${doctest_INCLUDE_DIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(doctest DEFAULT_MSG doctest_INCLUDE_DIR ) mark_as_advanced(doctest_INCLUDE_DIR) frugally-deep-0.17.1/cmake/HunterGate.cmake000066400000000000000000000413371476372554500205220ustar00rootroot00000000000000# Copyright (c) 2013-2017, Ruslan Baratov # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # This is a gate file to Hunter package manager. # Include this file using `include` command and add package you need, example: # # cmake_minimum_required(VERSION 3.0) # # include("cmake/HunterGate.cmake") # HunterGate( # URL "https://github.com/path/to/hunter/archive.tar.gz" # SHA1 "798501e983f14b28b10cda16afa4de69eee1da1d" # ) # # project(MyProject) # # hunter_add_package(Foo) # hunter_add_package(Boo COMPONENTS Bar Baz) # # Projects: # * https://github.com/hunter-packages/gate/ # * https://github.com/ruslo/hunter option(HUNTER_ENABLED "Enable Hunter package manager support" ON) if(HUNTER_ENABLED) if(CMAKE_VERSION VERSION_LESS "3.0") message(FATAL_ERROR "At least CMake version 3.0 required for hunter dependency management." " Update CMake or set HUNTER_ENABLED to OFF.") endif() endif() include(CMakeParseArguments) # cmake_parse_arguments option(HUNTER_STATUS_PRINT "Print working status" ON) option(HUNTER_STATUS_DEBUG "Print a lot info" OFF) option(HUNTER_TLS_VERIFY "Enable/disable TLS certificate checking on downloads" ON) set(HUNTER_WIKI "https://github.com/ruslo/hunter/wiki") function(hunter_gate_status_print) foreach(print_message ${ARGV}) if(HUNTER_STATUS_PRINT OR HUNTER_STATUS_DEBUG) message(STATUS "[hunter] ${print_message}") endif() endforeach() endfunction() function(hunter_gate_status_debug) foreach(print_message ${ARGV}) if(HUNTER_STATUS_DEBUG) string(TIMESTAMP timestamp) message(STATUS "[hunter *** DEBUG *** ${timestamp}] ${print_message}") endif() endforeach() endfunction() function(hunter_gate_wiki wiki_page) message("------------------------------ WIKI -------------------------------") message(" ${HUNTER_WIKI}/${wiki_page}") message("-------------------------------------------------------------------") message("") message(FATAL_ERROR "") endfunction() function(hunter_gate_internal_error) message("") foreach(print_message ${ARGV}) message("[hunter ** INTERNAL **] ${print_message}") endforeach() message("[hunter ** INTERNAL **] [Directory:${CMAKE_CURRENT_LIST_DIR}]") message("") hunter_gate_wiki("error.internal") endfunction() function(hunter_gate_fatal_error) cmake_parse_arguments(hunter "" "WIKI" "" "${ARGV}") string(COMPARE EQUAL "${hunter_WIKI}" "" have_no_wiki) if(have_no_wiki) hunter_gate_internal_error("Expected wiki") endif() message("") foreach(x ${hunter_UNPARSED_ARGUMENTS}) message("[hunter ** FATAL ERROR **] ${x}") endforeach() message("[hunter ** FATAL ERROR **] [Directory:${CMAKE_CURRENT_LIST_DIR}]") message("") hunter_gate_wiki("${hunter_WIKI}") endfunction() function(hunter_gate_user_error) hunter_gate_fatal_error(${ARGV} WIKI "error.incorrect.input.data") endfunction() function(hunter_gate_self root version sha1 result) string(COMPARE EQUAL "${root}" "" is_bad) if(is_bad) hunter_gate_internal_error("root is empty") endif() string(COMPARE EQUAL "${version}" "" is_bad) if(is_bad) hunter_gate_internal_error("version is empty") endif() string(COMPARE EQUAL "${sha1}" "" is_bad) if(is_bad) hunter_gate_internal_error("sha1 is empty") endif() string(SUBSTRING "${sha1}" 0 7 archive_id) if(EXISTS "${root}/cmake/Hunter") set(hunter_self "${root}") else() set( hunter_self "${root}/_Base/Download/Hunter/${version}/${archive_id}/Unpacked" ) endif() set("${result}" "${hunter_self}" PARENT_SCOPE) endfunction() # Set HUNTER_GATE_ROOT cmake variable to suitable value. function(hunter_gate_detect_root) # Check CMake variable string(COMPARE NOTEQUAL "${HUNTER_ROOT}" "" not_empty) if(not_empty) set(HUNTER_GATE_ROOT "${HUNTER_ROOT}" PARENT_SCOPE) hunter_gate_status_debug("HUNTER_ROOT detected by cmake variable") return() endif() # Check environment variable string(COMPARE NOTEQUAL "$ENV{HUNTER_ROOT}" "" not_empty) if(not_empty) set(HUNTER_GATE_ROOT "$ENV{HUNTER_ROOT}" PARENT_SCOPE) hunter_gate_status_debug("HUNTER_ROOT detected by environment variable") return() endif() # Check HOME environment variable string(COMPARE NOTEQUAL "$ENV{HOME}" "" result) if(result) set(HUNTER_GATE_ROOT "$ENV{HOME}/.hunter" PARENT_SCOPE) hunter_gate_status_debug("HUNTER_ROOT set using HOME environment variable") return() endif() # Check SYSTEMDRIVE and USERPROFILE environment variable (windows only) if(WIN32) string(COMPARE NOTEQUAL "$ENV{SYSTEMDRIVE}" "" result) if(result) set(HUNTER_GATE_ROOT "$ENV{SYSTEMDRIVE}/.hunter" PARENT_SCOPE) hunter_gate_status_debug( "HUNTER_ROOT set using SYSTEMDRIVE environment variable" ) return() endif() string(COMPARE NOTEQUAL "$ENV{USERPROFILE}" "" result) if(result) set(HUNTER_GATE_ROOT "$ENV{USERPROFILE}/.hunter" PARENT_SCOPE) hunter_gate_status_debug( "HUNTER_ROOT set using USERPROFILE environment variable" ) return() endif() endif() hunter_gate_fatal_error( "Can't detect HUNTER_ROOT" WIKI "error.detect.hunter.root" ) endfunction() macro(hunter_gate_lock dir) if(NOT HUNTER_SKIP_LOCK) if("${CMAKE_VERSION}" VERSION_LESS "3.2") hunter_gate_fatal_error( "Can't lock, upgrade to CMake 3.2 or use HUNTER_SKIP_LOCK" WIKI "error.can.not.lock" ) endif() hunter_gate_status_debug("Locking directory: ${dir}") file(LOCK "${dir}" DIRECTORY GUARD FUNCTION) hunter_gate_status_debug("Lock done") endif() endmacro() function(hunter_gate_download dir) string( COMPARE NOTEQUAL "$ENV{HUNTER_DISABLE_AUTOINSTALL}" "" disable_autoinstall ) if(disable_autoinstall AND NOT HUNTER_RUN_INSTALL) hunter_gate_fatal_error( "Hunter not found in '${dir}'" "Set HUNTER_RUN_INSTALL=ON to auto-install it from '${HUNTER_GATE_URL}'" "Settings:" " HUNTER_ROOT: ${HUNTER_GATE_ROOT}" " HUNTER_SHA1: ${HUNTER_GATE_SHA1}" WIKI "error.run.install" ) endif() string(COMPARE EQUAL "${dir}" "" is_bad) if(is_bad) hunter_gate_internal_error("Empty 'dir' argument") endif() string(COMPARE EQUAL "${HUNTER_GATE_SHA1}" "" is_bad) if(is_bad) hunter_gate_internal_error("HUNTER_GATE_SHA1 empty") endif() string(COMPARE EQUAL "${HUNTER_GATE_URL}" "" is_bad) if(is_bad) hunter_gate_internal_error("HUNTER_GATE_URL empty") endif() set(done_location "${dir}/DONE") set(sha1_location "${dir}/SHA1") set(build_dir "${dir}/Build") set(cmakelists "${dir}/CMakeLists.txt") hunter_gate_lock("${dir}") if(EXISTS "${done_location}") # while waiting for lock other instance can do all the job hunter_gate_status_debug("File '${done_location}' found, skip install") return() endif() file(REMOVE_RECURSE "${build_dir}") file(REMOVE_RECURSE "${cmakelists}") file(MAKE_DIRECTORY "${build_dir}") # check directory permissions # Disabling languages speeds up a little bit, reduces noise in the output # and avoids path too long windows error file( WRITE "${cmakelists}" "cmake_minimum_required(VERSION 3.0)\n" "project(HunterDownload LANGUAGES NONE)\n" "include(ExternalProject)\n" "ExternalProject_Add(\n" " Hunter\n" " URL\n" " \"${HUNTER_GATE_URL}\"\n" " URL_HASH\n" " SHA1=${HUNTER_GATE_SHA1}\n" " DOWNLOAD_DIR\n" " \"${dir}\"\n" " TLS_VERIFY\n" " ${HUNTER_TLS_VERIFY}\n" " SOURCE_DIR\n" " \"${dir}/Unpacked\"\n" " CONFIGURE_COMMAND\n" " \"\"\n" " BUILD_COMMAND\n" " \"\"\n" " INSTALL_COMMAND\n" " \"\"\n" ")\n" ) if(HUNTER_STATUS_DEBUG) set(logging_params "") else() set(logging_params OUTPUT_QUIET) endif() hunter_gate_status_debug("Run generate") # Need to add toolchain file too. # Otherwise on Visual Studio + MDD this will fail with error: # "Could not find an appropriate version of the Windows 10 SDK installed on this machine" if(EXISTS "${CMAKE_TOOLCHAIN_FILE}") get_filename_component(absolute_CMAKE_TOOLCHAIN_FILE "${CMAKE_TOOLCHAIN_FILE}" ABSOLUTE) set(toolchain_arg "-DCMAKE_TOOLCHAIN_FILE=${absolute_CMAKE_TOOLCHAIN_FILE}") else() # 'toolchain_arg' can't be empty set(toolchain_arg "-DCMAKE_TOOLCHAIN_FILE=") endif() string(COMPARE EQUAL "${CMAKE_MAKE_PROGRAM}" "" no_make) if(no_make) set(make_arg "") else() # Test case: remove Ninja from PATH but set it via CMAKE_MAKE_PROGRAM set(make_arg "-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}") endif() execute_process( COMMAND "${CMAKE_COMMAND}" "-H${dir}" "-B${build_dir}" "-G${CMAKE_GENERATOR}" "${toolchain_arg}" ${make_arg} WORKING_DIRECTORY "${dir}" RESULT_VARIABLE download_result ${logging_params} ) if(NOT download_result EQUAL 0) hunter_gate_internal_error("Configure project failed") endif() hunter_gate_status_print( "Initializing Hunter workspace (${HUNTER_GATE_SHA1})" " ${HUNTER_GATE_URL}" " -> ${dir}" ) execute_process( COMMAND "${CMAKE_COMMAND}" --build "${build_dir}" WORKING_DIRECTORY "${dir}" RESULT_VARIABLE download_result ${logging_params} ) if(NOT download_result EQUAL 0) hunter_gate_internal_error("Build project failed") endif() file(REMOVE_RECURSE "${build_dir}") file(REMOVE_RECURSE "${cmakelists}") file(WRITE "${sha1_location}" "${HUNTER_GATE_SHA1}") file(WRITE "${done_location}" "DONE") hunter_gate_status_debug("Finished") endfunction() # Must be a macro so master file 'cmake/Hunter' can # apply all variables easily just by 'include' command # (otherwise PARENT_SCOPE magic needed) macro(HunterGate) if(HUNTER_GATE_DONE) # variable HUNTER_GATE_DONE set explicitly for external project # (see `hunter_download`) set_property(GLOBAL PROPERTY HUNTER_GATE_DONE YES) endif() # First HunterGate command will init Hunter, others will be ignored get_property(_hunter_gate_done GLOBAL PROPERTY HUNTER_GATE_DONE SET) if(NOT HUNTER_ENABLED) # Empty function to avoid error "unknown function" function(hunter_add_package) endfunction() set( _hunter_gate_disabled_mode_dir "${CMAKE_CURRENT_LIST_DIR}/cmake/Hunter/disabled-mode" ) if(EXISTS "${_hunter_gate_disabled_mode_dir}") hunter_gate_status_debug( "Adding \"disabled-mode\" modules: ${_hunter_gate_disabled_mode_dir}" ) list(APPEND CMAKE_PREFIX_PATH "${_hunter_gate_disabled_mode_dir}") endif() elseif(_hunter_gate_done) hunter_gate_status_debug("Secondary HunterGate (use old settings)") hunter_gate_self( "${HUNTER_CACHED_ROOT}" "${HUNTER_VERSION}" "${HUNTER_SHA1}" _hunter_self ) include("${_hunter_self}/cmake/Hunter") else() set(HUNTER_GATE_LOCATION "${CMAKE_CURRENT_LIST_DIR}") string(COMPARE NOTEQUAL "${PROJECT_NAME}" "" _have_project_name) if(_have_project_name) hunter_gate_fatal_error( "Please set HunterGate *before* 'project' command. " "Detected project: ${PROJECT_NAME}" WIKI "error.huntergate.before.project" ) endif() cmake_parse_arguments( HUNTER_GATE "LOCAL" "URL;SHA1;GLOBAL;FILEPATH" "" ${ARGV} ) string(COMPARE EQUAL "${HUNTER_GATE_SHA1}" "" _empty_sha1) string(COMPARE EQUAL "${HUNTER_GATE_URL}" "" _empty_url) string( COMPARE NOTEQUAL "${HUNTER_GATE_UNPARSED_ARGUMENTS}" "" _have_unparsed ) string(COMPARE NOTEQUAL "${HUNTER_GATE_GLOBAL}" "" _have_global) string(COMPARE NOTEQUAL "${HUNTER_GATE_FILEPATH}" "" _have_filepath) if(_have_unparsed) hunter_gate_user_error( "HunterGate unparsed arguments: ${HUNTER_GATE_UNPARSED_ARGUMENTS}" ) endif() if(_empty_sha1) hunter_gate_user_error("SHA1 suboption of HunterGate is mandatory") endif() if(_empty_url) hunter_gate_user_error("URL suboption of HunterGate is mandatory") endif() if(_have_global) if(HUNTER_GATE_LOCAL) hunter_gate_user_error("Unexpected LOCAL (already has GLOBAL)") endif() if(_have_filepath) hunter_gate_user_error("Unexpected FILEPATH (already has GLOBAL)") endif() endif() if(HUNTER_GATE_LOCAL) if(_have_global) hunter_gate_user_error("Unexpected GLOBAL (already has LOCAL)") endif() if(_have_filepath) hunter_gate_user_error("Unexpected FILEPATH (already has LOCAL)") endif() endif() if(_have_filepath) if(_have_global) hunter_gate_user_error("Unexpected GLOBAL (already has FILEPATH)") endif() if(HUNTER_GATE_LOCAL) hunter_gate_user_error("Unexpected LOCAL (already has FILEPATH)") endif() endif() hunter_gate_detect_root() # set HUNTER_GATE_ROOT # Beautify path, fix probable problems with windows path slashes get_filename_component( HUNTER_GATE_ROOT "${HUNTER_GATE_ROOT}" ABSOLUTE ) hunter_gate_status_debug("HUNTER_ROOT: ${HUNTER_GATE_ROOT}") if(NOT HUNTER_ALLOW_SPACES_IN_PATH) string(FIND "${HUNTER_GATE_ROOT}" " " _contain_spaces) if(NOT _contain_spaces EQUAL -1) hunter_gate_fatal_error( "HUNTER_ROOT (${HUNTER_GATE_ROOT}) contains spaces." "Set HUNTER_ALLOW_SPACES_IN_PATH=ON to skip this error" "(Use at your own risk!)" WIKI "error.spaces.in.hunter.root" ) endif() endif() string( REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]+[-_a-z0-9]*" HUNTER_GATE_VERSION "${HUNTER_GATE_URL}" ) string(COMPARE EQUAL "${HUNTER_GATE_VERSION}" "" _is_empty) if(_is_empty) set(HUNTER_GATE_VERSION "unknown") endif() hunter_gate_self( "${HUNTER_GATE_ROOT}" "${HUNTER_GATE_VERSION}" "${HUNTER_GATE_SHA1}" _hunter_self ) set(_master_location "${_hunter_self}/cmake/Hunter") if(EXISTS "${HUNTER_GATE_ROOT}/cmake/Hunter") # Hunter downloaded manually (e.g. by 'git clone') set(_unused "xxxxxxxxxx") set(HUNTER_GATE_SHA1 "${_unused}") set(HUNTER_GATE_VERSION "${_unused}") else() get_filename_component(_archive_id_location "${_hunter_self}/.." ABSOLUTE) set(_done_location "${_archive_id_location}/DONE") set(_sha1_location "${_archive_id_location}/SHA1") # Check Hunter already downloaded by HunterGate if(NOT EXISTS "${_done_location}") hunter_gate_download("${_archive_id_location}") endif() if(NOT EXISTS "${_done_location}") hunter_gate_internal_error("hunter_gate_download failed") endif() if(NOT EXISTS "${_sha1_location}") hunter_gate_internal_error("${_sha1_location} not found") endif() file(READ "${_sha1_location}" _sha1_value) string(COMPARE EQUAL "${_sha1_value}" "${HUNTER_GATE_SHA1}" _is_equal) if(NOT _is_equal) hunter_gate_internal_error( "Short SHA1 collision:" " ${_sha1_value} (from ${_sha1_location})" " ${HUNTER_GATE_SHA1} (HunterGate)" ) endif() if(NOT EXISTS "${_master_location}") hunter_gate_user_error( "Master file not found:" " ${_master_location}" "try to update Hunter/HunterGate" ) endif() endif() include("${_master_location}") set_property(GLOBAL PROPERTY HUNTER_GATE_DONE YES) endif() endmacro() frugally-deep-0.17.1/cmake/hunter.cmake000066400000000000000000000003541476372554500177530ustar00rootroot00000000000000option(HUNTER_ENABLED "Enable Hunter package manager support" OFF) include("cmake/HunterGate.cmake") HunterGate( URL "https://github.com/ruslo/hunter/archive/v0.20.0.tar.gz" SHA1 "e94556ed41e5432997450bca7232db72a3b0d5ef" ) frugally-deep-0.17.1/cmake/pkgconfig.cmake000066400000000000000000000041461476372554500204200ustar00rootroot00000000000000include(CMakePackageConfigHelpers) include(GNUInstallDirs) # Installation (https://github.com/forexample/package-example) { # Layout. This works for all platforms: # * /lib/cmake/ # * /lib/ # * /include/ set(config_install_dir "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") set(include_install_dir "include") set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated") # Configuration set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake") set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake") set(TARGETS_EXPORT_NAME "${PROJECT_NAME}Targets") set(namespace "${PROJECT_NAME}::") # Include module with fuction 'write_basic_package_version_file' include(CMakePackageConfigHelpers) # Configure 'ConfigVersion.cmake' # Use: # * PROJECT_VERSION write_basic_package_version_file( "${version_config}" COMPATIBILITY SameMajorVersion ) # Configure 'Config.cmake' # Use variables: # * TARGETS_EXPORT_NAME # * PROJECT_NAME configure_package_config_file( "cmake/Config.cmake.in" "${project_config}" INSTALL_DESTINATION "${config_install_dir}" ) # Targets: # * header location after install: /include/fdeep/fdeep.hpp # * headers can be included by C++ code `#include ` install( TARGETS fdeep EXPORT "${TARGETS_EXPORT_NAME}" LIBRARY DESTINATION "lib" ARCHIVE DESTINATION "lib" RUNTIME DESTINATION "bin" INCLUDES DESTINATION "${include_install_dir}" ) # Headers: # * include/fdeep/fdeep.hpp -> /include/fdeep/fdeep.hpp install( DIRECTORY "include/fdeep" # no trailing slash DESTINATION "${include_install_dir}" ) # Config # * /lib/cmake/frugally-deep/frugally-deepConfig.cmake # * /lib/cmake/frugally-deep/frugally-deepConfigVersion.cmake install( FILES "${project_config}" "${version_config}" DESTINATION "${config_install_dir}" ) # Config # * /lib/cmake/frugally-deep/frugally-deepTargets.cmake install( EXPORT "${TARGETS_EXPORT_NAME}" NAMESPACE "${namespace}" DESTINATION "${config_install_dir}" ) # } frugally-deep-0.17.1/cmake/toolchain.cmake000066400000000000000000000004171476372554500204260ustar00rootroot00000000000000add_compile_options(-Wall -Wextra -pedantic -Werror -Weffc++ -Wconversion -Wsign-conversion -Wctor-dtor-privacy -Wreorder -Wold-style-cast -Wparentheses ) set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) frugally-deep-0.17.1/conanfile.txt000066400000000000000000000001511476372554500170560ustar00rootroot00000000000000[requires] eigen/3.4.0 functionalplus/v0.2.24 nlohmann_json/3.11.3 [generators] CMakeToolchain CMakeDepsfrugally-deep-0.17.1/include/000077500000000000000000000000001476372554500160055ustar00rootroot00000000000000frugally-deep-0.17.1/include/fdeep/000077500000000000000000000000001476372554500170705ustar00rootroot00000000000000frugally-deep-0.17.1/include/fdeep/base64.hpp000066400000000000000000000107301476372554500206660ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include #include #include #include namespace fdeep { namespace internal { // Make sure data outlives instances of this this fascade class. class json_data_strs_char_prodiver { public: json_data_strs_char_prodiver(const nlohmann::json& data, std::string::value_type pad_right_char) : data_(data) , it_data_(std::begin(data_)) , current_str_(data_to_str(*it_data_)) , it_str_(std::begin(current_str_)) , pad_right_char_(pad_right_char) { } static std::string data_to_str(const nlohmann::json& dat) { std::string result = dat; return result; } std::size_t size() const { std::size_t sum = 0; for (const auto& dat : data_) { sum += data_to_str(dat).size(); } return sum; } std::string::value_type next() { if (it_data_ == std::end(data_)) { return pad_right_char_; } if (it_str_ == std::end(current_str_)) { ++it_data_; current_str_ = data_to_str(*it_data_); it_str_ = std::begin(current_str_); } return *(it_str_++); } private: const nlohmann::json& data_; nlohmann::json::const_iterator it_data_; std::string current_str_; std::string::const_iterator it_str_; std::string::value_type pad_right_char_; }; // source: https://stackoverflow.com/a/31322410/1866775 static const std::uint8_t from_base64[] = { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 62, 255, 62, 255, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255, 255, 255, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 63, 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 255, 255, 255, 255, 255 }; static const char to_base64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz" "0123456789+/"; inline std::vector Base64_decode( json_data_strs_char_prodiver&& encoded_string) { // Make sure string length is a multiple of 4 auto encoded_size = (encoded_string.size() + 3) & ~size_t(3); std::vector ret; ret.reserve(3 * encoded_size / 4); for (size_t i = 0; i < encoded_size; i += 4) { // Get values for each group of four base 64 characters std::uint8_t b4[4]; const auto c0 = encoded_string.next(); const auto c1 = encoded_string.next(); const auto c2 = encoded_string.next(); const auto c3 = encoded_string.next(); b4[0] = (c0 <= 'z') ? from_base64[static_cast(c0)] : 0xff; b4[1] = (c1 <= 'z') ? from_base64[static_cast(c1)] : 0xff; b4[2] = (c2 <= 'z') ? from_base64[static_cast(c2)] : 0xff; b4[3] = (c3 <= 'z') ? from_base64[static_cast(c3)] : 0xff; // Transform into a group of three bytes std::uint8_t b3[3]; b3[0] = static_cast(((b4[0] & 0x3f) << 2) + ((b4[1] & 0x30) >> 4)); b3[1] = static_cast(((b4[1] & 0x0f) << 4) + ((b4[2] & 0x3c) >> 2)); b3[2] = static_cast(((b4[2] & 0x03) << 6) + ((b4[3] & 0x3f) >> 0)); // Add the byte to the return value if it isn't part of an '=' character (indicated by 0xff) if (b4[1] != 0xff) ret.push_back(b3[0]); if (b4[2] != 0xff) ret.push_back(b3[1]); if (b4[3] != 0xff) ret.push_back(b3[2]); } return ret; } } } frugally-deep-0.17.1/include/fdeep/common.hpp000066400000000000000000000045151476372554500210760ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #if defined(__GNUC__) || defined(__GNUG__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wctor-dtor-privacy" #pragma GCC diagnostic ignored "-Wold-style-cast" #pragma GCC diagnostic ignored "-Wsign-conversion" #pragma GCC diagnostic ignored "-Weffc++" #pragma GCC diagnostic ignored "-Wconversion" #pragma GCC diagnostic ignored "-Wshadow" #endif #if defined _MSC_VER #pragma warning(push) #pragma warning(disable : 4706) #pragma warning(disable : 4996) #endif #include #if defined _MSC_VER #pragma warning(pop) #endif #if defined(__GNUC__) || defined(__GNUG__) #pragma GCC diagnostic pop #endif #include #include #include #include #include #include namespace fdeep { namespace internal { inline std::runtime_error error(const std::string& error) { return std::runtime_error(error); } inline void raise_error(const std::string& msg) { throw error(msg); } inline void assertion(bool cond, const std::string& error) { if (!cond) { raise_error(error); } } #ifdef FDEEP_FLOAT_TYPE typedef FDEEP_FLOAT_TYPE float_type; #else typedef float float_type; #endif #if EIGEN_VERSION_AT_LEAST(3, 3, 0) typedef Eigen::Index EigenIndex; #else typedef Eigen::DenseIndex EigenIndex; #endif typedef std::vector float_vec_unaligned; template using aligned_vector = std::vector>; typedef aligned_vector float_vec; typedef fplus::shared_ref shared_float_vec; using ColMajorMatrixXf = Eigen::Matrix; using RowMajorMatrixXf = Eigen::Matrix; using ArrayXf = Eigen::Array; using ArrayXf1D = Eigen::Array; using MappedRowMajorMatrixXf = Eigen::Map; inline float_type tanh_typed(float_type x) { return std::tanh(x); } } } frugally-deep-0.17.1/include/fdeep/convolution.hpp000066400000000000000000000356631476372554500221750ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/common.hpp" #include "fdeep/filter.hpp" #include #include #include #include namespace fdeep { namespace internal { struct convolution_filter_matrices { tensor_shape filter_shape_; std::size_t filter_count_; float_vec biases_; bool use_bias_; tensor filter_mats_; }; inline convolution_filter_matrices generate_im2col_filter_matrix( const std::vector& filters) { assertion(fplus::all_the_same_on( fplus_c_mem_fn_t(filter, shape, tensor_shape), filters), "all filters must have the same shape"); const auto biases = fplus::transform_convert( fplus_c_mem_fn_t(filter, get_bias, float_type), filters); const bool use_bias = fplus::sum(biases) != static_cast(0) || !fplus::all_the_same(biases); const auto shape = filters.front().shape(); tensor filter_mats = tensor( tensor_shape(shape.height_, shape.width_, shape.depth_, filters.size()), static_cast(0)); for (std::size_t y = 0; y < shape.height_; ++y) { for (std::size_t n = 0; n < filters.size(); ++n) { for (std::size_t x = 0; x < shape.width_; ++x) { for (std::size_t z = 0; z < shape.depth_; ++z) { filter_mats.set(tensor_pos(y, x, z, n), filters[n].get(tensor_pos(y, x, z))); } } } } return { shape, filters.size(), biases, use_bias, filter_mats }; } inline tensor init_conv_output_tensor( std::size_t out_height, std::size_t out_width, std::size_t out_depth, std::size_t rank, const convolution_filter_matrices& filter_mat) { tensor output(tensor_shape_with_changed_rank( tensor_shape(out_height, out_width, out_depth), rank), static_cast(0)); if (filter_mat.use_bias_) { const auto bias_ptr = &filter_mat.biases_.front(); const auto bias_ptr_end = bias_ptr + out_depth; for (std::size_t y_out = 0; y_out < out_height; ++y_out) { for (std::size_t x_out = 0; x_out < out_width; ++x_out) { auto output_ptr = &output.get_ref_ignore_rank(tensor_pos(0, 0, y_out, x_out, 0)); std::copy(bias_ptr, bias_ptr_end, output_ptr); } } } return output; } inline Eigen::Map> get_im2col_mapping( const tensor& in, std::size_t f_width, std::size_t f_depth, std::size_t strides_x, std::size_t out_width, std::size_t y, std::size_t y_filt) { // To avoid using too much RAM, the input tensor is not materializezd // as an actual im2col matrix, but instead the too-small outer stride // of the matrix mapping is utilized to achieve the overlap the receptive fields. return Eigen::Map>( const_cast(&in.get_ref_ignore_rank(tensor_pos(0, 0, y + y_filt, 0, 0))), static_cast(f_width * f_depth), static_cast(out_width), Eigen::OuterStride<>(static_cast(f_depth * strides_x))); } // Special version for convolution with strides_x == 1 and strides_y == 1. // Reduces the forward-pass runtime of VGG19 about 15%, by using fewer but larger GEMMs. inline tensor convolve_accumulative_s1x1( std::size_t out_height, std::size_t out_width, const convolution_filter_matrices& filter_mat, const tensor& in) { const tensor& filter_mats = filter_mat.filter_mats_; const auto f_height = filter_mat.filter_shape_.height_; const auto f_width = filter_mat.filter_shape_.width_; const auto f_depth = filter_mat.filter_shape_.depth_; const auto out_depth = filter_mat.filter_count_; assertion(f_depth == in.shape().depth_, "filter depth does not match input"); assertion(filter_mats.shape().size_dim_4_ == f_height, "incorrect number of filter levels in y direction"); assertion(out_width == (in.shape().width_ - f_width) + 1, "output width does not match"); assertion(out_depth == filter_mat.biases_.size(), "invlid bias count"); tensor output = init_conv_output_tensor(out_height, out_width, out_depth, in.shape().rank(), filter_mat); const std::size_t out_width_temp = out_width + f_width - 1; tensor output_temp(tensor_shape_with_changed_rank( tensor_shape(out_height, out_width_temp, out_depth), in.shape().rank()), static_cast(0)); const auto mapping_width = out_width_temp * (out_height - 1) + out_width; for (std::size_t y_filt = 0; y_filt < f_height; ++y_filt) { const Eigen::Map filter(const_cast(&filter_mats.get_ref_ignore_rank(tensor_pos(0, y_filt, 0, 0, 0))), static_cast(out_depth), static_cast(f_width * f_depth)); const auto input = get_im2col_mapping(in, f_width, f_depth, 1, mapping_width, 0, y_filt); Eigen::Map, Eigen::Unaligned> output_temp_map(&output_temp.get_ref_ignore_rank(tensor_pos(0, 0, 0, 0, 0)), static_cast(out_depth), static_cast(mapping_width)); output_temp_map.noalias() += filter * input; } // Dropping the superfluous results from "between" the rows. for (std::size_t y_out = 0; y_out < out_height; ++y_out) { for (std::size_t x_out = 0; x_out < out_width; ++x_out) { for (std::size_t z_out = 0; z_out < out_depth; ++z_out) { output.get_ref_ignore_rank(tensor_pos(0, 0, y_out, x_out, z_out)) += output_temp.get_ref_ignore_rank(tensor_pos(0, 0, y_out, x_out, z_out)); } } } return output; } inline tensor convolve_accumulative( std::size_t out_height, std::size_t out_width, std::size_t strides_y, std::size_t strides_x, const convolution_filter_matrices& filter_mat, const tensor& in) { // Using the im2col method, the convolution is expressed as GEMMs for performance. // https://stackoverflow.com/questions/16798888/2-d-convolution-as-a-matrix-matrix-multiplication // https://github.com/tensorflow/tensorflow/blob/a0d784bdd31b27e013a7eac58a86ba62e86db299/tensorflow/core/kernels/conv_ops_using_gemm.cc // http://www.youtube.com/watch?v=pA4BsUK3oP4&t=36m22s const tensor& filter_mats = filter_mat.filter_mats_; const auto f_height = filter_mat.filter_shape_.height_; const auto f_width = filter_mat.filter_shape_.width_; const auto f_depth = filter_mat.filter_shape_.depth_; const auto out_depth = filter_mat.filter_count_; assertion(f_depth == in.shape().depth_, "filter depth does not match input"); assertion(filter_mats.shape().size_dim_4_ == f_height, "incorrect number of filter levels in y direction"); assertion(out_width == (in.shape().width_ - f_width) / strides_x + 1, "output width does not match"); assertion(out_depth == filter_mat.biases_.size(), "invlid bias count"); if (strides_x == 1 && strides_y == 1) { return convolve_accumulative_s1x1(out_height, out_width, filter_mat, in); } tensor output = init_conv_output_tensor(out_height, out_width, out_depth, in.shape().rank(), filter_mat); for (std::size_t y_filt = 0; y_filt < f_height; ++y_filt) { const Eigen::Map filter(const_cast(&filter_mats.get_ref_ignore_rank(tensor_pos(0, y_filt, 0, 0, 0))), static_cast(out_depth), static_cast(f_width * f_depth)); for (std::size_t y = 0, y_out = 0; y < in.shape().height_ + 1 - f_height; y += strides_y, ++y_out) { const auto input = get_im2col_mapping(in, f_width, f_depth, strides_x, out_width, y, y_filt); Eigen::Map output_map(&output.get_ref_ignore_rank(tensor_pos(0, 0, y_out, 0, 0)), static_cast(out_depth), static_cast(out_width)); output_map.noalias() += filter * input; } } return output; } enum class padding { valid, same, causal }; struct convolution_config { std::size_t pad_top_; std::size_t pad_bottom_; std::size_t pad_left_; std::size_t pad_right_; std::size_t out_height_; std::size_t out_width_; }; inline convolution_config preprocess_convolution( const shape2& filter_shape, const shape2& strides, padding pad_type, std::size_t input_shape_height, std::size_t input_shape_width, bool transposed) { // https://www.tensorflow.org/api_guides/python/nn#Convolution const int filter_height = static_cast(filter_shape.height_); const int filter_width = static_cast(filter_shape.width_); const int in_height = static_cast(input_shape_height); const int in_width = static_cast(input_shape_width); const int strides_y = static_cast(strides.height_); const int strides_x = static_cast(strides.width_); int out_height = 0; int out_width = 0; if (pad_type == padding::same || pad_type == padding::causal) { out_height = fplus::ceil(static_cast(in_height) / static_cast(strides_y) - 0.001); out_width = fplus::ceil(static_cast(in_width) / static_cast(strides_x) - 0.001); } else { if (transposed) { out_height = fplus::ceil(static_cast(in_height + filter_height - 1) / static_cast(strides_y) - 0.001); out_width = fplus::ceil(static_cast(in_width + filter_width - 1) / static_cast(strides_x) - 0.001); } else { out_height = fplus::ceil(static_cast(in_height - filter_height + 1) / static_cast(strides_y) - 0.001); out_width = fplus::ceil(static_cast(in_width - filter_width + 1) / static_cast(strides_x) - 0.001); } } int pad_top = 0; int pad_bottom = 0; int pad_left = 0; int pad_right = 0; if (transposed) { pad_top = filter_height - 1; pad_bottom = filter_height - 1; pad_left = filter_width - 1; pad_right = filter_width - 1; } if (pad_type == padding::same) { int pad_along_height = 0; int pad_along_width = 0; if (in_height % strides_y == 0) pad_along_height = std::max(filter_height - strides_y, 0); else pad_along_height = std::max(filter_height - (in_height % strides_y), 0); if (in_width % strides_x == 0) pad_along_width = std::max(filter_width - strides_x, 0); else pad_along_width = std::max(filter_width - (in_width % strides_x), 0); pad_top = pad_along_height / 2; pad_bottom = pad_along_height - pad_top; pad_left = pad_along_width / 2; pad_right = pad_along_width - pad_left; } else if (pad_type == padding::causal) { pad_top = filter_height - 1; pad_left = filter_width - 1; } std::size_t out_height_size_t = fplus::integral_cast_throw(out_height); std::size_t out_width_size_t = fplus::integral_cast_throw(out_width); std::size_t pad_top_size_t = fplus::integral_cast_throw(pad_top); std::size_t pad_bottom_size_t = fplus::integral_cast_throw(pad_bottom); std::size_t pad_left_size_t = fplus::integral_cast_throw(pad_left); std::size_t pad_right_size_t = fplus::integral_cast_throw(pad_right); return { pad_top_size_t, pad_bottom_size_t, pad_left_size_t, pad_right_size_t, out_height_size_t, out_width_size_t }; } inline tensor convolve( const shape2& strides, const padding& pad_type, const convolution_filter_matrices& filter_mat, const tensor& input) { assertion(filter_mat.filter_shape_.depth_ == input.shape().depth_, "invalid filter depth"); const auto conv_cfg = preprocess_convolution( filter_mat.filter_shape_.without_depth(), strides, pad_type, input.shape().height_, input.shape().width_, false); // The padding step usually (on a VGG19 net) only takes about 1% of the overall runtime. // So the increased code complexity of doing it inside the convolution step // is probably not worth the small potential performance gain. const auto in_padded = pad_tensor(0, 0, 0, conv_cfg.pad_top_, conv_cfg.pad_bottom_, conv_cfg.pad_left_, conv_cfg.pad_right_, input); return convolve_accumulative( conv_cfg.out_height_, conv_cfg.out_width_, strides.height_, strides.width_, filter_mat, in_padded); } inline tensor convolve_transposed( const shape2& strides, const padding& pad_type, const convolution_filter_matrices& filter_mat, const tensor& input) { assertion(filter_mat.filter_shape_.depth_ == input.shape().depth_, "invalid filter depth"); const auto input_dilated = dilate_tensor(strides, input, pad_type == padding::same); const auto conv_cfg = preprocess_convolution( filter_mat.filter_shape_.without_depth(), shape2(1, 1), pad_type, input_dilated.shape().height_, input_dilated.shape().width_, true); const auto in_padded = pad_tensor(0, 0, 0, conv_cfg.pad_top_, conv_cfg.pad_bottom_, conv_cfg.pad_left_, conv_cfg.pad_right_, input_dilated); return convolve_accumulative( conv_cfg.out_height_, conv_cfg.out_width_, 1, 1, filter_mat, in_padded); } } } frugally-deep-0.17.1/include/fdeep/convolution3d.hpp000066400000000000000000000116421476372554500224130ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/common.hpp" #include "fdeep/filter.hpp" #include #include #include #include namespace fdeep { namespace internal { struct convolution3d_config { std::size_t pad_front_; std::size_t pad_back_; std::size_t pad_top_; std::size_t pad_bottom_; std::size_t pad_left_; std::size_t pad_right_; std::size_t out_size_d4_; std::size_t out_height_; std::size_t out_width_; }; inline convolution3d_config preprocess_convolution_3d( const shape3& filter_shape, const shape3& strides, padding pad_type, std::size_t input_shape_size_d4, std::size_t input_shape_height, std::size_t input_shape_width) { const int filter_size_d4 = static_cast(filter_shape.size_dim_4_); const int filter_height = static_cast(filter_shape.height_); const int filter_width = static_cast(filter_shape.width_); const int in_size_d4 = static_cast(input_shape_size_d4); const int in_height = static_cast(input_shape_height); const int in_width = static_cast(input_shape_width); const int strides_d4 = static_cast(strides.size_dim_4_); const int strides_y = static_cast(strides.height_); const int strides_x = static_cast(strides.width_); int out_size_d4 = 0; int out_height = 0; int out_width = 0; if (pad_type == padding::same || pad_type == padding::causal) { out_size_d4 = fplus::ceil(static_cast(in_size_d4) / static_cast(strides_d4) - 0.001); out_height = fplus::ceil(static_cast(in_height) / static_cast(strides_y) - 0.001); out_width = fplus::ceil(static_cast(in_width) / static_cast(strides_x) - 0.001); } else { out_size_d4 = fplus::ceil(static_cast(in_size_d4 - filter_size_d4 + 1) / static_cast(strides_d4) - 0.001); out_height = fplus::ceil(static_cast(in_height - filter_height + 1) / static_cast(strides_y) - 0.001); out_width = fplus::ceil(static_cast(in_width - filter_width + 1) / static_cast(strides_x) - 0.001); } int pad_front = 0; int pad_back = 0; int pad_top = 0; int pad_bottom = 0; int pad_left = 0; int pad_right = 0; if (pad_type == padding::same) { int pad_along_d4 = 0; int pad_along_height = 0; int pad_along_width = 0; if (in_size_d4 % strides_d4 == 0) pad_along_d4 = std::max(filter_size_d4 - strides_d4, 0); else pad_along_d4 = std::max(filter_size_d4 - (in_size_d4 % strides_d4), 0); if (in_height % strides_y == 0) pad_along_height = std::max(filter_height - strides_y, 0); else pad_along_height = std::max(filter_height - (in_height % strides_y), 0); if (in_width % strides_x == 0) pad_along_width = std::max(filter_width - strides_x, 0); else pad_along_width = std::max(filter_width - (in_width % strides_x), 0); pad_front = pad_along_d4 / 2; pad_back = pad_along_d4 - pad_front; pad_top = pad_along_height / 2; pad_bottom = pad_along_height - pad_top; pad_left = pad_along_width / 2; pad_right = pad_along_width - pad_left; } else if (pad_type == padding::causal) { pad_front = filter_size_d4 - 1; pad_top = filter_height - 1; pad_left = filter_width - 1; } std::size_t out_size_d4_size_t = fplus::integral_cast_throw(out_size_d4); std::size_t out_height_size_t = fplus::integral_cast_throw(out_height); std::size_t out_width_size_t = fplus::integral_cast_throw(out_width); std::size_t pad_front_size_t = fplus::integral_cast_throw(pad_front); std::size_t pad_back_size_t = fplus::integral_cast_throw(pad_back); std::size_t pad_top_size_t = fplus::integral_cast_throw(pad_top); std::size_t pad_bottom_size_t = fplus::integral_cast_throw(pad_bottom); std::size_t pad_left_size_t = fplus::integral_cast_throw(pad_left); std::size_t pad_right_size_t = fplus::integral_cast_throw(pad_right); return { pad_front_size_t, pad_back_size_t, pad_top_size_t, pad_bottom_size_t, pad_left_size_t, pad_right_size_t, out_size_d4_size_t, out_height_size_t, out_width_size_t }; } } } frugally-deep-0.17.1/include/fdeep/depthwise_convolution.hpp000066400000000000000000000101761476372554500242410ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/common.hpp" #include "fdeep/convolution.hpp" #include "fdeep/filter.hpp" #include #include #include #include namespace fdeep { namespace internal { inline tensor depthwise_convolve_accumulative( std::size_t out_height, std::size_t out_width, std::size_t strides_y, std::size_t strides_x, const convolution_filter_matrices& filter_mat, const tensor& in) { const tensor& filter_mats = filter_mat.filter_mats_; const auto f_height = filter_mat.filter_shape_.height_; const auto f_width = filter_mat.filter_shape_.width_; const auto filters_count = filter_mat.filter_count_; const auto out_depth = filter_mat.filter_count_; assertion(filter_mat.filter_shape_.depth_ == 1, "filter depth must be 1"); assertion(filters_count == in.shape().depth_, "filter count must match input depth"); assertion(out_depth == in.shape().depth_, "number of filters does not match input depth"); assertion(filter_mats.shape().size_dim_4_ == f_height, "incorrect number of filter levels in y direction"); assertion(out_width == (in.shape().width_ - f_width) / strides_x + 1, "output width does not match"); assertion(out_depth == filter_mat.biases_.size(), "invlid bias count"); tensor output = init_conv_output_tensor(out_height, out_width, out_depth, in.shape().rank(), filter_mat); for (std::size_t y_filt = 0; y_filt < f_height; ++y_filt) { const auto filter = Eigen::Map( const_cast(&filter_mats.get_ref_ignore_rank(tensor_pos(0, y_filt, 0, 0, 0))), static_cast(f_width * filters_count)); for (std::size_t y = 0, y_out = 0; y < in.shape().height_ + 1 - f_height; y += strides_y, ++y_out) { const auto input = get_im2col_mapping(in, f_width, filters_count, strides_x, out_width, y, y_filt).array(); // Not materialized to save memory and improve performance. const auto coefficient_wise_product = input.colwise() * filter; Eigen::Map output_map(&output.get_ref_ignore_rank(tensor_pos(0, 0, y_out, 0, 0)), static_cast(out_depth), static_cast(out_width)); for (EigenIndex x = 0; x < static_cast(out_width); ++x) { const ArrayXf col_materialized = coefficient_wise_product.col(x); const Eigen::Map cwp_reshaped(const_cast(col_materialized.data()), static_cast(filters_count), static_cast(f_width)); output_map.col(x) += cwp_reshaped.rowwise().sum(); } } } return output; } inline tensor depthwise_convolve( const shape2& strides, const padding& pad_type, const convolution_filter_matrices& filter_mat, const tensor& input) { assertion(filter_mat.filter_shape_.depth_ == 1, "invalid filter depth"); assertion(filter_mat.filter_count_ == input.shape().depth_, "invalid filter count"); const auto conv_cfg = preprocess_convolution( filter_mat.filter_shape_.without_depth(), strides, pad_type, input.shape().height_, input.shape().width_, false); const auto in_padded = pad_tensor(0, 0, 0, conv_cfg.pad_top_, conv_cfg.pad_bottom_, conv_cfg.pad_left_, conv_cfg.pad_right_, input); return depthwise_convolve_accumulative( conv_cfg.out_height_, conv_cfg.out_width_, strides.height_, strides.width_, filter_mat, in_padded); } } } frugally-deep-0.17.1/include/fdeep/fdeep.hpp000066400000000000000000000011541476372554500206650ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/common.hpp" #include "fdeep/convolution.hpp" #include "fdeep/filter.hpp" #include "fdeep/node.hpp" #include "fdeep/recurrent_ops.hpp" #include "fdeep/shape2.hpp" #include "fdeep/shape3.hpp" #include "fdeep/tensor.hpp" #include "fdeep/tensor_pos.hpp" #include "fdeep/tensor_shape.hpp" #include "fdeep/tensor_shape_variable.hpp" #include "fdeep/import_model.hpp" #include "fdeep/model.hpp" frugally-deep-0.17.1/include/fdeep/filter.hpp000066400000000000000000000061141476372554500210700ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/common.hpp" #include "fdeep/tensor.hpp" #include "fdeep/tensor_shape.hpp" #include #include #include namespace fdeep { namespace internal { class filter { public: filter(const tensor& m, float_type bias) : m_(m) , bias_(bias) { } const tensor_shape& shape() const { return m_.shape(); } std::size_t volume() const { return m_.shape().volume(); } const tensor& get_tensor() const { return m_; } float_type get(const tensor_pos& pos) const { return m_.get_ignore_rank(pos); } float_type get_bias() const { return bias_; } void set_params(const float_vec& weights, float_type bias) { assertion(weights.size() == m_.shape().volume(), "invalid parameter count"); m_ = tensor(m_.shape(), float_vec(weights)); bias_ = bias; } private: tensor m_; float_type bias_; }; typedef std::vector filter_vec; inline filter dilate_filter(const shape2& dilation_rate, const filter& undilated) { return filter(dilate_tensor(dilation_rate, undilated.get_tensor(), false), undilated.get_bias()); } inline filter_vec generate_filters( const shape2& dilation_rate, const tensor_shape& filter_shape, std::size_t k, const float_vec& weights, const float_vec& bias, bool transpose) { filter_vec filters(k, filter(tensor(filter_shape, 0), 0)); assertion(!filters.empty(), "at least one filter needed"); const std::size_t param_count = fplus::sum(fplus::transform( fplus_c_mem_fn_t(filter, volume, std::size_t), filters)); assertion(static_cast(weights.size()) == param_count, "invalid weight size"); const auto filter_param_cnt = filters.front().shape().volume(); auto filter_weights = fplus::split_every(filter_param_cnt, weights); assertion(filter_weights.size() == filters.size(), "invalid size of filter weights"); assertion(bias.size() == filters.size(), "invalid bias size"); auto it_filter_val = std::begin(filter_weights); auto it_filter_bias = std::begin(bias); for (auto& filt : filters) { filt.set_params(*it_filter_val, *it_filter_bias); filt = dilate_filter(dilation_rate, filt); if (transpose) { filt = filter(reverse_height_dimension(filt.get_tensor()), filt.get_bias()); filt = filter(reverse_width_dimension(filt.get_tensor()), filt.get_bias()); } ++it_filter_val; ++it_filter_bias; } return filters; } } } frugally-deep-0.17.1/include/fdeep/import_model.hpp000066400000000000000000001604551476372554500223060ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/base64.hpp" #if defined(__GNUC__) || defined(__GNUG__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wctor-dtor-privacy" #endif #if defined _MSC_VER #pragma warning(push) #pragma warning(disable : 4706) #pragma warning(disable : 4996) #endif #include #if defined _MSC_VER #pragma warning(pop) #endif #if defined(__GNUC__) || defined(__GNUG__) #pragma GCC diagnostic pop #endif #include "fdeep/common.hpp" #include "fdeep/layers/add_layer.hpp" #include "fdeep/layers/additive_attention_layer.hpp" #include "fdeep/layers/attention_layer.hpp" #include "fdeep/layers/average_layer.hpp" #include "fdeep/layers/average_pooling_3d_layer.hpp" #include "fdeep/layers/batch_normalization_layer.hpp" #include "fdeep/layers/category_encoding_layer.hpp" #include "fdeep/layers/centercrop_layer.hpp" #include "fdeep/layers/concatenate_layer.hpp" #include "fdeep/layers/conv_2d_layer.hpp" #include "fdeep/layers/conv_2d_transpose_layer.hpp" #include "fdeep/layers/cropping_3d_layer.hpp" #include "fdeep/layers/dense_layer.hpp" #include "fdeep/layers/depthwise_conv_2d_layer.hpp" #include "fdeep/layers/dot_layer.hpp" #include "fdeep/layers/elu_layer.hpp" #include "fdeep/layers/embedding_layer.hpp" #include "fdeep/layers/exponential_layer.hpp" #include "fdeep/layers/flatten_layer.hpp" #include "fdeep/layers/gelu_layer.hpp" #include "fdeep/layers/global_average_pooling_3d_layer.hpp" #include "fdeep/layers/global_max_pooling_3d_layer.hpp" #include "fdeep/layers/hard_sigmoid_layer.hpp" #include "fdeep/layers/input_layer.hpp" #include "fdeep/layers/layer.hpp" #include "fdeep/layers/layer_normalization_layer.hpp" #include "fdeep/layers/leaky_relu_layer.hpp" #include "fdeep/layers/linear_layer.hpp" #include "fdeep/layers/max_pooling_3d_layer.hpp" #include "fdeep/layers/maximum_layer.hpp" #include "fdeep/layers/minimum_layer.hpp" #include "fdeep/layers/model_layer.hpp" #include "fdeep/layers/multi_head_attention_layer.hpp" #include "fdeep/layers/multiply_layer.hpp" #include "fdeep/layers/normalization_layer.hpp" #include "fdeep/layers/permute_layer.hpp" #include "fdeep/layers/pooling_3d_layer.hpp" #include "fdeep/layers/prelu_layer.hpp" #include "fdeep/layers/relu_layer.hpp" #include "fdeep/layers/repeat_vector_layer.hpp" #include "fdeep/layers/rescaling_layer.hpp" #include "fdeep/layers/reshape_layer.hpp" #include "fdeep/layers/resizing_layer.hpp" #include "fdeep/layers/selu_layer.hpp" #include "fdeep/layers/separable_conv_2d_layer.hpp" #include "fdeep/layers/sigmoid_layer.hpp" #include "fdeep/layers/softmax_layer.hpp" #include "fdeep/layers/softplus_layer.hpp" #include "fdeep/layers/softsign_layer.hpp" #include "fdeep/layers/subtract_layer.hpp" #include "fdeep/layers/swish_layer.hpp" #include "fdeep/layers/tanh_layer.hpp" #include "fdeep/layers/time_distributed_layer.hpp" #include "fdeep/layers/unit_normalization_layer.hpp" #include "fdeep/layers/upsampling_1d_layer.hpp" #include "fdeep/layers/upsampling_2d_layer.hpp" #include "fdeep/layers/zero_padding_3d_layer.hpp" #include "fdeep/tensor.hpp" #include "fdeep/tensor_shape.hpp" #include "fdeep/tensor_shape_variable.hpp" #include #include #include #include #include #include #include #include #include namespace fdeep { namespace internal { template ValueT json_object_get(const nlohmann::json& data, KeyT&& key, ValueT&& default_value) { auto&& it = data.find(key); if (it != data.end()) return *it; else return std::forward(default_value); } inline bool json_obj_has_member(const nlohmann::json& data, const std::string& member_name) { return data.is_object() && data.find(member_name) != data.end(); } inline fplus::maybe create_maybe_size_t(const nlohmann::json& data) { if (data.is_null()) { return fplus::nothing(); } const int signed_result = data; if (signed_result < 0) { return fplus::nothing(); } const std::size_t result = data; return fplus::just(result); } inline tensor_shape_variable create_tensor_shape_variable_offset( const nlohmann::json& data, std::size_t offset) { assertion(data.is_array(), "tensor_shape_variable needs to be an array"); assertion(data.size() > 0, "need at least one dimension"); if (data.size() == 1 + offset) return tensor_shape_variable( create_maybe_size_t(data[0 + offset])); if (data.size() == 2 + offset) return tensor_shape_variable( create_maybe_size_t(data[0 + offset]), create_maybe_size_t(data[1 + offset])); if (data.size() == 3 + offset) return tensor_shape_variable( create_maybe_size_t(data[0 + offset]), create_maybe_size_t(data[1 + offset]), create_maybe_size_t(data[2 + offset])); if (data.size() == 4 + offset) return tensor_shape_variable( create_maybe_size_t(data[0 + offset]), create_maybe_size_t(data[1 + offset]), create_maybe_size_t(data[2 + offset]), create_maybe_size_t(data[3 + offset])); if (data.size() == 5 + offset) return tensor_shape_variable( create_maybe_size_t(data[0 + offset]), create_maybe_size_t(data[1 + offset]), create_maybe_size_t(data[2 + offset]), create_maybe_size_t(data[3 + offset]), create_maybe_size_t(data[4 + offset])); raise_error("tensor_shape_variable needs 1, 2, 3, 4 or 5 dimensions"); return tensor_shape_variable( fplus::nothing(), fplus::nothing(), fplus::nothing(), fplus::nothing(), fplus::nothing()); // Is never called } inline tensor_shape_variable create_tensor_shape_variable(const nlohmann::json& data) { return create_tensor_shape_variable_offset(data, 0); } inline tensor_shape_variable create_tensor_shape_variable_leading_null(const nlohmann::json& data) { return create_tensor_shape_variable_offset(data, 1); } inline tensor_shape create_tensor_shape(const nlohmann::json& data) { assertion(data.is_array(), "tensor_shape needs to be an array"); assertion(data.size() > 0, "need at least one dimension"); if (data.size() == 1) return tensor_shape(static_cast(data[0])); if (data.size() == 2) return tensor_shape(data[0], data[1]); if (data.size() == 3) return tensor_shape(data[0], data[1], data[2]); if (data.size() == 4) return tensor_shape(data[0], data[1], data[2], data[3]); if (data.size() == 5) return tensor_shape(data[0], data[1], data[2], data[3], data[4]); raise_error("tensor_shape needs 1, 2, 3, 4 or 5 dimensions"); return tensor_shape(static_cast(0)); // Is never be called } inline shape2 create_shape2(const nlohmann::json& data) { if (data.is_array()) { assertion(data.size() == 1 || data.size() == 2, "invalid number of dimensions in shape2"); if (data.size() == 1) return shape2(1, data[0]); else return shape2(data[0], data[1]); } else { const std::size_t width = data; return shape2(1, width); } } inline shape3 create_shape3(const nlohmann::json& data) { if (data.is_array()) { assertion(data.size() == 1 || data.size() == 2 || data.size() == 3, "invalid number of dimensions in shape2"); if (data.size() == 1) return shape3(1, 1, data[0]); if (data.size() == 2) return shape3(1, data[0], data[1]); else return shape3(data[0], data[1], data[2]); } else { const std::size_t width = data; return shape3(1, 1, width); } } inline std::size_t create_size_t(const nlohmann::json& int_data) { const int val = int_data; assertion(val >= 0, "invalid size_t value"); return static_cast(val); } inline int create_int(const nlohmann::json& int_data) { const int val = int_data; return val; } inline float_vec decode_floats(const nlohmann::json& data) { assertion(data.is_array() || data.is_string(), "invalid float array format"); if (data.is_array() && !data.empty() && data[0].is_number()) { const float_vec result = data; return result; } assertion(std::numeric_limits::is_iec559, "The floating-point format of your system is not supported."); const auto res = Base64_decode(json_data_strs_char_prodiver(data, '=')); float_vec out; assertion(res.size() % 4 == 0, "invalid float vector data"); out.reserve(res.size() / 4); for (std::size_t i = 0; i < res.size(); i += 4) { float_type val = static_cast( *(reinterpret_cast(&(res[i])))); out.push_back(val); } return out; } inline tensor create_tensor(const nlohmann::json& data) { const tensor_shape shape = create_tensor_shape(data["shape"]); return tensor(shape, decode_floats(data["values"])); } template std::vector create_vector(F f, const nlohmann::json& data) { if (data.is_array()) return fplus::transform_convert>(f, data); else return fplus::singleton_seq(f(data)); } inline std::vector create_tensor_shapes_variable(const nlohmann::json& data) { return create_vector(create_tensor_shape_variable, data); } inline node_connection create_node_connection_model_layer(const nlohmann::json& data) { assertion(data.is_array(), "invalid format for inbound node"); const std::string layer_id = data.front(); const auto node_idx = create_size_t(data[1]); const auto tensor_idx = create_size_t(data[2]); return node_connection(layer_id, node_idx, tensor_idx); } inline node_connection create_node_connection(const nlohmann::json& args) { const std::vector keras_history = args["config"]["keras_history"]; assertion(keras_history.size() >= 3, "invalid number of items in keras_history"); const std::string layer_id = keras_history[0]; const auto node_idx = create_size_t(keras_history[1]); const auto tensor_idx = create_size_t(keras_history[2]); return node_connection(layer_id, node_idx, tensor_idx); } using get_param_f = std::function; using layer_creators = std::map< std::string, std::function>; using wrapper_layer_creators = std::map< std::string, std::function>; layer_ptr create_layer(const get_param_f&, const nlohmann::json&, const layer_creators& custom_layer_creators, const std::string&); inline layer_ptr create_model_layer(const get_param_f& get_param, const nlohmann::json& data, const std::string& name, const layer_creators& custom_layer_creators, const std::string& prefix) { assertion(data["config"]["layers"].is_array(), "missing layers array"); const std::function get_prefixed_param = [&](const std::string& layer_name, const std::string& param_name) -> nlohmann::json { return get_param(prefix + layer_name, param_name); }; const auto make_layer = [&](const nlohmann::json& json) { return create_layer(get_prefixed_param, json, custom_layer_creators, prefix); }; const auto layers = create_vector(make_layer, data["config"]["layers"]); assertion(data["config"]["input_layers"].is_array(), "no input layers"); const auto inputs = create_vector( create_node_connection_model_layer, data["config"]["input_layers"]); const auto outputs = create_vector( create_node_connection_model_layer, data["config"]["output_layers"]); return std::make_shared(name, layers, inputs, outputs); } inline padding create_padding(const std::string& padding_str) { return fplus::throw_on_nothing(error("no padding"), fplus::choose({ { std::string("valid"), padding::valid }, { std::string("same"), padding::same }, { std::string("causal"), padding::causal }, }, padding_str)); } inline layer_ptr create_conv_2d_layer(const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { const std::string padding_str = data["config"]["padding"]; const auto pad_type = create_padding(padding_str); const shape2 strides = create_shape2(data["config"]["strides"]); const shape2 dilation_rate = create_shape2(data["config"]["dilation_rate"]); const auto filter_count = create_size_t(data["config"]["filters"]); float_vec bias(filter_count, 0); const bool use_bias = data["config"]["use_bias"]; if (use_bias) bias = decode_floats(get_param(name, "bias")); assertion(bias.size() == filter_count, "size of bias does not match"); const float_vec weights = decode_floats(get_param(name, "weights")); const shape2 kernel_size = create_shape2(data["config"]["kernel_size"]); assertion(weights.size() % kernel_size.area() == 0, "invalid number of weights"); const std::size_t filter_depths = weights.size() / (kernel_size.area() * filter_count); const tensor_shape filter_shape( kernel_size.height_, kernel_size.width_, filter_depths); return std::make_shared(name, filter_shape, filter_count, strides, pad_type, dilation_rate, weights, bias); } inline layer_ptr create_conv_2d_transpose_layer(const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { const std::string padding_str = data["config"]["padding"]; const auto pad_type = create_padding(padding_str); const shape2 strides = create_shape2(data["config"]["strides"]); const shape2 dilation_rate = create_shape2(data["config"]["dilation_rate"]); const auto filter_count = create_size_t(data["config"]["filters"]); float_vec bias(filter_count, 0); const bool use_bias = data["config"]["use_bias"]; if (use_bias) bias = decode_floats(get_param(name, "bias")); assertion(bias.size() == filter_count, "size of bias does not match"); const float_vec weights = decode_floats(get_param(name, "weights")); const shape2 kernel_size = create_shape2(data["config"]["kernel_size"]); assertion(weights.size() % kernel_size.area() == 0, "invalid number of weights"); const std::size_t filter_depths = weights.size() / (kernel_size.area() * filter_count); const tensor_shape filter_shape( kernel_size.height_, kernel_size.width_, filter_depths); return std::make_shared(name, filter_shape, filter_count, strides, pad_type, dilation_rate, weights, bias); } inline layer_ptr create_separable_conv_2D_layer(const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { const std::string padding_str = data["config"]["padding"]; const auto pad_type = create_padding(padding_str); const shape2 strides = create_shape2(data["config"]["strides"]); const shape2 dilation_rate = create_shape2(data["config"]["dilation_rate"]); const auto filter_count = create_size_t(data["config"]["filters"]); float_vec bias(filter_count, 0); const bool use_bias = data["config"]["use_bias"]; if (use_bias) bias = decode_floats(get_param(name, "bias")); assertion(bias.size() == filter_count, "size of bias does not match"); const float_vec slice_weights = decode_floats( get_param(name, "slice_weights")); const float_vec stack_weights = decode_floats( get_param(name, "stack_weights")); const shape2 kernel_size = create_shape2(data["config"]["kernel_size"]); assertion(slice_weights.size() % kernel_size.area() == 0, "invalid number of weights"); assertion(stack_weights.size() % filter_count == 0, "invalid number of weights"); const std::size_t input_depth = slice_weights.size() / kernel_size.area(); const std::size_t stack_output_depths_1 = stack_weights.size() / input_depth; assertion(stack_output_depths_1 == filter_count, "invalid weights sizes"); const tensor_shape filter_shape(kernel_size.height_, kernel_size.width_, 1); float_vec bias_0(input_depth, 0); return std::make_shared(name, input_depth, filter_shape, filter_count, strides, pad_type, dilation_rate, slice_weights, stack_weights, bias_0, bias); } inline layer_ptr create_depthwise_conv_2D_layer(const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { const std::string padding_str = data["config"]["padding"]; const auto pad_type = create_padding(padding_str); const shape2 strides = create_shape2(data["config"]["strides"]); const shape2 dilation_rate = create_shape2(data["config"]["dilation_rate"]); const float_vec slice_weights = decode_floats( get_param(name, "slice_weights")); const shape2 kernel_size = create_shape2(data["config"]["kernel_size"]); assertion(slice_weights.size() % kernel_size.area() == 0, "invalid number of weights"); const std::size_t input_depth = slice_weights.size() / kernel_size.area(); const tensor_shape filter_shape(kernel_size.height_, kernel_size.width_, 1); float_vec bias(input_depth, 0); const bool use_bias = data["config"]["use_bias"]; if (use_bias) bias = decode_floats(get_param(name, "bias")); assertion(bias.size() == input_depth, "size of bias does not match"); return std::make_shared(name, input_depth, filter_shape, strides, pad_type, dilation_rate, slice_weights, bias); } inline layer_ptr create_input_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { assertion(data["inbound_nodes"].empty(), "input layer is not allowed to have inbound nodes"); const auto input_shape = create_tensor_shape_variable_leading_null(data["config"]["batch_shape"]); return std::make_shared(name, input_shape); } inline layer_ptr create_batch_normalization_layer(const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { const float_vec moving_mean = decode_floats(get_param(name, "moving_mean")); const float_vec moving_variance = decode_floats(get_param(name, "moving_variance")); const bool center = data["config"]["center"]; const bool scale = data["config"]["scale"]; const auto axis_vec = create_vector(create_int, data["config"]["axis"]); assertion(axis_vec.size() == 1, "invalid axis configuration"); const int axis = axis_vec.front(); const float_type epsilon = data["config"]["epsilon"]; float_vec gamma; float_vec beta; if (scale) gamma = decode_floats(get_param(name, "gamma")); if (center) beta = decode_floats(get_param(name, "beta")); return std::make_shared( name, axis, moving_mean, moving_variance, beta, gamma, epsilon); } inline layer_ptr create_layer_normalization_layer(const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { const bool center = data["config"]["center"]; const bool scale = data["config"]["scale"]; const auto axes = create_vector(create_int, data["config"]["axis"]); const float_type epsilon = data["config"]["epsilon"]; float_vec gamma; float_vec beta; if (scale) gamma = decode_floats(get_param(name, "gamma")); if (center) beta = decode_floats(get_param(name, "beta")); return std::make_shared( name, axes, beta, gamma, epsilon); } inline layer_ptr create_unit_normalization_layer(const get_param_f&, const nlohmann::json& data, const std::string& name) { const auto axes = create_vector(create_int, data["config"]["axis"]); return std::make_shared(name, axes); } inline layer_ptr create_identity_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { // Dropout and noise layers are identity functions during prediction. return std::make_shared(name); } inline layer_ptr create_max_pooling_3d_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const auto pool_size = create_shape3(data["config"]["pool_size"]); const auto strides = create_shape3(data["config"]["strides"]); const std::string padding_str = data["config"]["padding"]; const auto pad_type = create_padding(padding_str); return std::make_shared(name, pool_size, strides, pad_type); } inline layer_ptr create_average_pooling_3d_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const auto pool_size = create_shape3(data["config"]["pool_size"]); const auto strides = create_shape3(data["config"]["strides"]); const std::string padding_str = data["config"]["padding"]; const auto pad_type = create_padding(padding_str); return std::make_shared(name, pool_size, strides, pad_type); } inline layer_ptr create_global_max_pooling_3d_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const bool keepdims = data["config"]["keepdims"]; return std::make_shared(name, keepdims); } inline layer_ptr create_global_average_pooling_3d_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const bool keepdims = data["config"]["keepdims"]; return std::make_shared(name, keepdims); } inline layer_ptr create_upsampling_1d_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const std::size_t size = data["config"]["size"]; return std::make_shared(name, size); } inline layer_ptr create_upsampling_2d_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const auto scale_factor = create_shape2(data["config"]["size"]); const std::string interpolation = data["config"]["interpolation"]; return std::make_shared( name, scale_factor, interpolation); } inline layer_ptr create_dense_layer(const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { const float_vec weights = decode_floats(get_param(name, "weights")); std::size_t units = data["config"]["units"]; float_vec bias(units, 0); const bool use_bias = data["config"]["use_bias"]; if (use_bias) bias = decode_floats(get_param(name, "bias")); assertion(bias.size() == units, "size of bias does not match"); return std::make_shared( name, units, weights, bias); } inline layer_ptr create_concatenate_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const int keras_axis = data["config"]["axis"]; return std::make_shared(name, keras_axis); } inline layer_ptr create_add_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline layer_ptr create_maximum_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline layer_ptr create_minimum_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline layer_ptr create_dot_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const auto axes = create_vector(create_int, data["config"]["axes"]); const bool normalize = data["config"]["normalize"]; return std::make_shared(name, axes, normalize); } inline layer_ptr create_multiply_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline layer_ptr create_average_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline layer_ptr create_subtract_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline layer_ptr create_flatten_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline layer_ptr create_zero_padding_3d_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const auto padding = create_vector>(fplus::bind_1st_of_2( create_vector, create_size_t), data["config"]["padding"]); assertion( (padding.size() == 2 && padding[0].size() == padding[1].size()) || (padding.size() == 3 && padding[0].size() == padding[1].size() && padding[1].size() == padding[2].size()), "invalid padding format"); if (padding[0].size() == 1) { const std::size_t front_pad = 0; const std::size_t back_pad = 0; const std::size_t top_pad = 0; const std::size_t bottom_pad = 0; const std::size_t left_pad = padding[0][0]; const std::size_t right_pad = padding[1][0]; return std::make_shared(name, front_pad, back_pad, top_pad, bottom_pad, left_pad, right_pad); } if (padding.size() == 2) { const std::size_t front_pad = 0; const std::size_t back_pad = 0; const std::size_t top_pad = padding[0][0]; const std::size_t bottom_pad = padding[0][1]; const std::size_t left_pad = padding[1][0]; const std::size_t right_pad = padding[1][1]; return std::make_shared(name, front_pad, back_pad, top_pad, bottom_pad, left_pad, right_pad); } else { const std::size_t front_pad = padding[0][0]; const std::size_t back_pad = padding[0][1]; const std::size_t top_pad = padding[1][0]; const std::size_t bottom_pad = padding[1][1]; const std::size_t left_pad = padding[2][0]; const std::size_t right_pad = padding[2][1]; return std::make_shared(name, front_pad, back_pad, top_pad, bottom_pad, left_pad, right_pad); } } inline layer_ptr create_cropping_3d_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const auto cropping = create_vector>(fplus::bind_1st_of_2( create_vector, create_size_t), data["config"]["cropping"]); assertion( (cropping.size() == 2 && cropping[0].size() == cropping[1].size()) || (cropping.size() == 3 && cropping[0].size() == cropping[1].size() && cropping[1].size() == cropping[2].size()), "invalid cropping format"); if (cropping[0].size() == 1) { const std::size_t front_crop = 0; const std::size_t back_crop = 0; const std::size_t top_crop = 0; const std::size_t bottom_crop = 0; const std::size_t left_crop = cropping[0][0]; const std::size_t right_crop = cropping[1][0]; return std::make_shared(name, front_crop, back_crop, top_crop, bottom_crop, left_crop, right_crop); } if (cropping.size() == 2) { const std::size_t front_crop = 0; const std::size_t back_crop = 0; const std::size_t top_crop = cropping[0][0]; const std::size_t bottom_crop = cropping[0][1]; const std::size_t left_crop = cropping[1][0]; const std::size_t right_crop = cropping[1][1]; return std::make_shared(name, front_crop, back_crop, top_crop, bottom_crop, left_crop, right_crop); } else { const std::size_t front_crop = cropping[0][0]; const std::size_t back_crop = cropping[0][1]; const std::size_t top_crop = cropping[1][0]; const std::size_t bottom_crop = cropping[1][1]; const std::size_t left_crop = cropping[2][0]; const std::size_t right_crop = cropping[2][1]; return std::make_shared(name, front_crop, back_crop, top_crop, bottom_crop, left_crop, right_crop); } } inline layer_ptr create_centercrop_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const std::size_t height = data["config"]["height"]; const std::size_t width = data["config"]["width"]; return std::make_shared(name, height, width); } inline layer_ptr create_repeat_vector_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const std::size_t n = data["config"]["n"]; return std::make_shared(name, n); } inline layer_ptr create_rescaling_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const float_type scale = data["config"]["scale"]; const float_type offset = data["config"]["offset"]; return std::make_shared(name, scale, offset); } inline layer_ptr create_reshape_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const auto target_shape = create_tensor_shape_variable(data["config"]["target_shape"]); return std::make_shared(name, target_shape); } inline layer_ptr create_resizing_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const std::size_t height = data["config"]["height"]; const std::size_t width = data["config"]["width"]; const std::string interpolation = data["config"]["interpolation"]; const bool crop_to_aspect_ratio = data["config"]["crop_to_aspect_ratio"]; return std::make_shared(name, height, width, interpolation, crop_to_aspect_ratio); } inline activation_layer_ptr create_linear_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline activation_layer_ptr create_softmax_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline activation_layer_ptr create_softplus_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline activation_layer_ptr create_tanh_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline activation_layer_ptr create_sigmoid_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline activation_layer_ptr create_swish_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline activation_layer_ptr create_hard_sigmoid_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline activation_layer_ptr create_relu_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { float_type max_value = std::numeric_limits::max(); float_type negative_slope = static_cast(0); float_type threshold = static_cast(0); if (json_obj_has_member(data, "config") && json_obj_has_member(data["config"], "max_value") && !data["config"]["max_value"].is_null()) { max_value = data["config"]["max_value"]; negative_slope = data["config"]["negative_slope"]; threshold = data["config"]["threshold"]; } return std::make_shared(name, max_value, negative_slope, threshold); } inline activation_layer_ptr create_relu6_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name, static_cast(6), static_cast(0), static_cast(0)); } inline activation_layer_ptr create_selu_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline activation_layer_ptr create_exponential_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline activation_layer_ptr create_gelu_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline activation_layer_ptr create_softsign_layer( const get_param_f&, const nlohmann::json&, const std::string& name) { return std::make_shared(name); } inline activation_layer_ptr create_leaky_relu_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { float_type negative_slope = 0.3f; if (json_obj_has_member(data, "config") && json_obj_has_member(data["config"], "negative_slope")) { negative_slope = data["config"]["negative_slope"]; } return std::make_shared(name, negative_slope); } inline layer_ptr create_leaky_relu_layer_isolated( const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { return create_leaky_relu_layer(get_param, data, name); } inline layer_ptr create_prelu_layer( const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { std::vector shared_axes; if (json_obj_has_member(data, "config") && json_obj_has_member(data["config"], "shared_axes") && !data["config"]["shared_axes"].empty()) { shared_axes = create_vector(create_size_t, data["config"]["shared_axes"]); } const float_vec alpha = decode_floats(get_param(name, "alpha")); return std::make_shared(name, alpha, shared_axes); } inline activation_layer_ptr create_elu_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { float_type alpha = 1.0f; if (json_obj_has_member(data, "config") && json_obj_has_member(data["config"], "alpha")) { alpha = data["config"]["alpha"]; } return std::make_shared(name, alpha); } inline layer_ptr create_elu_layer_isolated( const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { return create_elu_layer(get_param, data, name); } inline layer_ptr create_relu_layer_isolated( const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { return create_relu_layer(get_param, data, name); } inline layer_ptr create_normalization_layer( const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { const auto axex = create_vector(create_int, data["config"]["axis"]); const float_vec mean = decode_floats(get_param(name, "mean")); const float_vec variance = decode_floats(get_param(name, "variance")); return std::make_shared(name, axex, mean, variance); } inline layer_ptr create_category_encoding_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const std::size_t num_tokens = data["config"]["num_tokens"]; const std::string output_mode = data["config"]["output_mode"]; return std::make_shared(name, num_tokens, output_mode); } inline layer_ptr create_attention_layer( const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { const bool use_scale = data["config"]["use_scale"]; const std::string score_mode = data["config"]["score_mode"]; float_type scale = static_cast(1); float_type concat_score_weight = static_cast(1); if (use_scale) { scale = get_param(name, "scale"); } if (score_mode == "concat") { concat_score_weight = get_param(name, "concat_score_weight"); } return std::make_shared(name, score_mode, scale, concat_score_weight); } inline layer_ptr create_additive_attention_layer( const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { const bool use_scale = data["config"]["use_scale"]; float_vec scale(static_cast(1), 1); if (use_scale) { scale = decode_floats(get_param(name, "scale")); } return std::make_shared(name, scale); } inline layer_ptr create_multi_head_attention_layer( const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { const std::size_t num_heads = data["config"]["num_heads"]; const std::size_t key_dim = data["config"]["key_dim"]; const std::size_t value_dim = data["config"]["value_dim"]; const bool use_bias = data["config"]["use_bias"]; const auto weight_shapes = create_vector>(fplus::bind_1st_of_2( create_vector, create_size_t), get_param(name, "weight_shapes")); const auto weight_values = create_vector(decode_floats, get_param(name, "weights")); const auto weights_and_biases = fplus::zip_with( [](const std::vector& shape, const float_vec& values) -> tensor { return tensor( create_tensor_shape_from_dims(shape), fplus::convert_container(values)); }, weight_shapes, weight_values); return std::make_shared(name, num_heads, key_dim, value_dim, use_bias, weights_and_biases); } inline std::string get_activation_type(const nlohmann::json& data) { assertion(data.is_string(), "Layer activation must be a string."); return data; } inline activation_layer_ptr create_activation_layer_type_name( const get_param_f& get_param, const nlohmann::json& data, const std::string& type, const std::string& name) { const std::map> creators = { { "linear", create_linear_layer }, { "softmax", create_softmax_layer }, { "softplus", create_softplus_layer }, { "tanh", create_tanh_layer }, { "sigmoid", create_sigmoid_layer }, { "swish", create_swish_layer }, { "silu", create_swish_layer }, { "hard_sigmoid", create_hard_sigmoid_layer }, { "relu", create_relu_layer }, { "relu6", create_relu6_layer }, { "selu", create_selu_layer }, { "elu", create_elu_layer }, { "exponential", create_exponential_layer }, { "gelu", create_gelu_layer }, { "softsign", create_softsign_layer } }; return fplus::throw_on_nothing( error("unknown activation type: " + type), fplus::get_from_map(creators, type))( get_param, data, name); } inline layer_ptr create_activation_layer( const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { const std::string type = get_activation_type(data["config"]["activation"]); return create_activation_layer_type_name(get_param, data, type, name); } inline layer_ptr create_permute_layer( const get_param_f&, const nlohmann::json& data, const std::string& name) { const auto dims = create_vector(create_size_t, data["config"]["dims"]); return std::make_shared(name, dims); } inline node create_node(const nlohmann::json& inbound_nodes_data) { assertion(inbound_nodes_data["args"].is_array(), "node args need to be an array"); std::vector args = inbound_nodes_data["args"]; if (args.front().is_array()) { assertion(args.size() == 1, "invalid args format"); const std::vector inner_args = args.front(); return node(fplus::transform(create_node_connection, inner_args)); } else { return node(fplus::transform(create_node_connection, args)); } } inline nodes create_nodes(const nlohmann::json& data) { assertion(data["inbound_nodes"].is_array(), "no inbound nodes"); const std::vector inbound_nodes_data = data["inbound_nodes"]; return fplus::transform(create_node, inbound_nodes_data); } inline layer_ptr create_embedding_layer(const get_param_f& get_param, const nlohmann::json& data, const std::string& name) { const std::size_t input_dim = data["config"]["input_dim"]; const std::size_t output_dim = data["config"]["output_dim"]; const float_vec weights = decode_floats(get_param(name, "weights")); return std::make_shared(name, input_dim, output_dim, weights); } inline layer_ptr create_time_distributed_layer(const get_param_f& get_param, const nlohmann::json& data, const std::string& name, const layer_creators& custom_layer_creators, const std::string& prefix) { const std::string wrapped_layer_type = data["config"]["layer"]["class_name"]; nlohmann::json data_inner_layer = data["config"]["layer"]; data_inner_layer["name"] = data["name"]; data_inner_layer["inbound_nodes"] = data["inbound_nodes"]; const std::size_t td_input_len = std::size_t(decode_floats(get_param(name, "td_input_len")).front()); const std::size_t td_output_len = std::size_t(decode_floats(get_param(name, "td_output_len")).front()); layer_ptr inner_layer = create_layer(get_param, data_inner_layer, custom_layer_creators, prefix); return std::make_shared(name, inner_layer, td_input_len, td_output_len); } inline layer_ptr create_layer(const get_param_f& get_param, const nlohmann::json& data, const layer_creators& custom_layer_creators, const std::string&) { const std::string name = data["name"]; const layer_creators default_creators = { { "Identity", create_identity_layer }, { "Conv1D", create_conv_2d_layer }, { "Conv2D", create_conv_2d_layer }, { "Conv1DTranspose", create_conv_2d_transpose_layer }, { "Conv2DTranspose", create_conv_2d_transpose_layer }, { "SeparableConv1D", create_separable_conv_2D_layer }, { "SeparableConv2D", create_separable_conv_2D_layer }, { "DepthwiseConv2D", create_depthwise_conv_2D_layer }, { "InputLayer", create_input_layer }, { "BatchNormalization", create_batch_normalization_layer }, { "LayerNormalization", create_layer_normalization_layer }, { "UnitNormalization", create_unit_normalization_layer }, { "Dropout", create_identity_layer }, { "ActivityRegularization", create_identity_layer }, { "AlphaDropout", create_identity_layer }, { "FixedDropout", create_identity_layer }, { "GaussianDropout", create_identity_layer }, { "GaussianNoise", create_identity_layer }, { "SpatialDropout1D", create_identity_layer }, { "SpatialDropout2D", create_identity_layer }, { "SpatialDropout3D", create_identity_layer }, { "RandomContrast", create_identity_layer }, { "RandomFlip", create_identity_layer }, { "RandomHeight", create_identity_layer }, { "RandomRotation", create_identity_layer }, { "RandomTranslation", create_identity_layer }, { "RandomWidth", create_identity_layer }, { "RandomZoom", create_identity_layer }, { "LeakyReLU", create_leaky_relu_layer_isolated }, { "Permute", create_permute_layer }, { "PReLU", create_prelu_layer }, { "ELU", create_elu_layer_isolated }, { "ReLU", create_relu_layer_isolated }, { "MaxPooling1D", create_max_pooling_3d_layer }, { "MaxPooling2D", create_max_pooling_3d_layer }, { "MaxPooling3D", create_max_pooling_3d_layer }, { "AveragePooling1D", create_average_pooling_3d_layer }, { "AveragePooling2D", create_average_pooling_3d_layer }, { "AveragePooling3D", create_average_pooling_3d_layer }, { "GlobalMaxPooling1D", create_global_max_pooling_3d_layer }, { "GlobalMaxPooling2D", create_global_max_pooling_3d_layer }, { "GlobalMaxPooling3D", create_global_max_pooling_3d_layer }, { "GlobalAveragePooling1D", create_global_average_pooling_3d_layer }, { "GlobalAveragePooling2D", create_global_average_pooling_3d_layer }, { "GlobalAveragePooling3D", create_global_average_pooling_3d_layer }, { "UpSampling1D", create_upsampling_1d_layer }, { "UpSampling2D", create_upsampling_2d_layer }, { "Dense", create_dense_layer }, { "Add", create_add_layer }, { "Maximum", create_maximum_layer }, { "Minimum", create_minimum_layer }, { "Dot", create_dot_layer }, { "Concatenate", create_concatenate_layer }, { "Multiply", create_multiply_layer }, { "Average", create_average_layer }, { "Subtract", create_subtract_layer }, { "Flatten", create_flatten_layer }, { "ZeroPadding1D", create_zero_padding_3d_layer }, { "ZeroPadding2D", create_zero_padding_3d_layer }, { "ZeroPadding3D", create_zero_padding_3d_layer }, { "Cropping1D", create_cropping_3d_layer }, { "Cropping2D", create_cropping_3d_layer }, { "Cropping3D", create_cropping_3d_layer }, { "CenterCrop", create_centercrop_layer }, { "Activation", create_activation_layer }, { "RepeatVector", create_repeat_vector_layer }, { "Rescaling", create_rescaling_layer }, { "Reshape", create_reshape_layer }, { "Resizing", create_resizing_layer }, { "Embedding", create_embedding_layer }, { "Softmax", create_softmax_layer }, { "Normalization", create_normalization_layer }, { "CategoryEncoding", create_category_encoding_layer }, { "Attention", create_attention_layer }, { "AdditiveAttention", create_additive_attention_layer }, { "MultiHeadAttention", create_multi_head_attention_layer }, }; const wrapper_layer_creators wrapper_creators = { { "Model", create_model_layer }, { "Functional", create_model_layer }, { "TimeDistributed", create_time_distributed_layer } }; const std::string type = data["class_name"]; if (fplus::map_contains(wrapper_creators, type)) { auto result = fplus::get_from_map_unsafe(wrapper_creators, type)( get_param, data, name, custom_layer_creators, name + "_"); result->set_nodes(create_nodes(data)); return result; } else { const layer_creators creators = fplus::map_union(custom_layer_creators, default_creators); auto result = fplus::throw_on_nothing( error("unknown layer type: " + type), fplus::get_from_map(creators, type))( get_param, data, name); if (type != "Activation" && json_obj_has_member(data["config"], "activation")) { const std::string activation = get_activation_type(data["config"]["activation"]); result->set_activation( create_activation_layer_type_name(get_param, data, activation, "")); } result->set_nodes(create_nodes(data)); return result; } } struct test_case { tensors input_; tensors output_; }; using test_cases = std::vector; inline test_case load_test_case(const nlohmann::json& data) { assertion(data["inputs"].is_array(), "test needs inputs"); assertion(data["outputs"].is_array(), "test needs outputs"); return { create_vector(create_tensor, data["inputs"]), create_vector(create_tensor, data["outputs"]) }; } inline test_cases load_test_cases(const nlohmann::json& data) { return create_vector(load_test_case, data); } inline void check_test_outputs(float_type epsilon, const tensors& outputs, const tensors& targets) { assertion(outputs.size() == targets.size(), "invalid output count"); for (std::size_t i = 0; i < outputs.size(); ++i) { const auto& output = outputs[i]; const auto& target = targets[i]; assertion(output.shape() == target.shape(), "Wrong output size. Is " + show_tensor_shape(output.shape()) + ", should be " + show_tensor_shape(target.shape()) + "."); for (std::size_t pos_dim_5 = 0; pos_dim_5 < output.shape().size_dim_5_; ++pos_dim_5) { for (std::size_t pos_dim_4 = 0; pos_dim_4 < output.shape().size_dim_4_; ++pos_dim_4) { for (std::size_t y = 0; y < output.shape().height_; ++y) { for (std::size_t x = 0; x < output.shape().width_; ++x) { for (std::size_t z = 0; z < output.shape().depth_; ++z) { const tensor_pos pos(pos_dim_5, pos_dim_4, y, x, z); const auto target_val = target.get_ignore_rank(pos); const auto output_val = output.get_ignore_rank(pos); if (!fplus::is_in_closed_interval_around(epsilon, target_val, output_val) && !(std::isnan(target_val) && std::isnan(output_val))) { const std::string msg = std::string("test failed: ") + "output=" + fplus::show(i) + " " + "pos=" + fplus::show(y) + "," + fplus::show(x) + "," + fplus::show(z) + " " + "value=" + fplus::show(output_val) + " " "target=" + fplus::show(target_val); internal::raise_error(msg); } } } } } } } } } } frugally-deep-0.17.1/include/fdeep/layers/000077500000000000000000000000001476372554500203675ustar00rootroot00000000000000frugally-deep-0.17.1/include/fdeep/layers/activation_layer.hpp000066400000000000000000000023021476372554500244320ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include #include #include #include #include #include namespace fdeep { namespace internal { // Abstract base class for actication layers // https://en.wikipedia.org/wiki/Activation_function class activation_layer : public layer { public: explicit activation_layer(const std::string& name) : layer(name) { } tensors apply_impl(const tensors& inputs) const override { const auto f = [this](const tensor& t) -> tensor { return transform_input(t); }; return fplus::transform(f, inputs); } protected: virtual tensor transform_input(const tensor& input) const = 0; }; inline tensors apply_activation_layer( const activation_layer_ptr& ptr, const tensors& input) { return ptr == nullptr ? input : ptr->apply(input); } } } frugally-deep-0.17.1/include/fdeep/layers/add_layer.hpp000066400000000000000000000011411476372554500230210ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class add_layer : public layer { public: explicit add_layer(const std::string& name) : layer(name) { } protected: tensors apply_impl(const tensors& input) const override { return { sum_tensors(input) }; } }; } } frugally-deep-0.17.1/include/fdeep/layers/additive_attention_layer.hpp000066400000000000000000000032241476372554500261530ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include "fdeep/layers/softmax_layer.hpp" #include namespace fdeep { namespace internal { class additive_attention_layer : public layer { public: explicit additive_attention_layer(const std::string& name, const float_vec& scale) : layer(name) , scale_(scale) { } protected: tensors apply_impl(const tensors& input) const override { assertion(input.size() == 2 || input.size() == 3, "Invalid number of inputs for Attention layer."); const tensor& query = input[0]; const tensor& value = input[1]; const tensor& key = input.size() > 2 ? input[2] : value; const tensor scores = reshape( sum_depth( mult_tensors(tensor(tensor_shape(scale_.size()), float_vec(scale_)), transform_tensor(tanh_typed, add_tensors( reshape(query, tensor_shape(query.shape().width_, 1, query.shape().depth_)), reshape(key, tensor_shape(1, key.shape().width_, key.shape().depth_)))))), tensor_shape(query.shape().width_, key.shape().width_)); const tensor distribution = softmax(scores); return { dot_product_tensors(distribution, value, std::vector({ 2, 1 }), false) }; } float_vec scale_; }; } } frugally-deep-0.17.1/include/fdeep/layers/attention_layer.hpp000066400000000000000000000047521476372554500243110ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include "fdeep/layers/softmax_layer.hpp" #include namespace fdeep { namespace internal { class attention_layer : public layer { public: explicit attention_layer(const std::string& name, const std::string& score_mode, float_type scale, float_type concat_score_weight) : layer(name) , score_mode_(score_mode) , scale_(scale) , concat_score_weight_(concat_score_weight) { assertion(score_mode_ == "dot" || score_mode_ == "concat", "Invalid score_mode for Attention layer."); } protected: tensors apply_impl(const tensors& input) const override { assertion(input.size() == 2 || input.size() == 3, "Invalid number of inputs for Attention layer."); const tensor& query = input[0]; const tensor& value = input[1]; const tensor& key = input.size() > 2 ? input[2] : value; const tensor scores = score_mode_ == "dot" ? transform_tensor(fplus::multiply_with(scale_), dot_product_tensors(query, transpose(key), std::vector({ 2, 1 }), false)) : // https://github.com/keras-team/keras/blob/v2.13.1/keras/layers/attention/attention.py transform_tensor(fplus::multiply_with(concat_score_weight_), reshape( sum_depth( transform_tensor(tanh_typed, transform_tensor(fplus::multiply_with(scale_), add_tensors( reshape(query, tensor_shape(query.shape().width_, 1, query.shape().depth_)), reshape(key, tensor_shape(1, key.shape().width_, key.shape().depth_)))))), tensor_shape(query.shape().width_, key.shape().width_))); const tensor distribution = softmax(scores); return { dot_product_tensors(distribution, value, std::vector({ 2, 1 }), false) }; } std::string score_mode_; float_type scale_; float_type concat_score_weight_; }; } } frugally-deep-0.17.1/include/fdeep/layers/average_layer.hpp000066400000000000000000000011551476372554500237100ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class average_layer : public layer { public: explicit average_layer(const std::string& name) : layer(name) { } protected: tensors apply_impl(const tensors& input) const override { return { average_tensors(input) }; } }; } } frugally-deep-0.17.1/include/fdeep/layers/average_pooling_3d_layer.hpp000066400000000000000000000040321476372554500260220ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/pooling_3d_layer.hpp" #include #include namespace fdeep { namespace internal { inline void inner_average_pool(const tensor& in, tensor& out, std::size_t pool_size_d4, std::size_t pool_height, std::size_t pool_width, std::size_t strides_d4, std::size_t strides_y, std::size_t strides_x, std::size_t d4, std::size_t y, std::size_t x, std::size_t z, int pad_front_int, int pad_top_int, int pad_left_int) { const float_type invalid = std::numeric_limits::lowest(); float_type val = 0; std::size_t divisor = 0; for (std::size_t d4f = 0; d4f < pool_size_d4; ++d4f) { int in_get_d4 = static_cast(strides_d4 * d4 + d4f) - pad_front_int; for (std::size_t yf = 0; yf < pool_height; ++yf) { int in_get_y = static_cast(strides_y * y + yf) - pad_top_int; for (std::size_t xf = 0; xf < pool_width; ++xf) { int in_get_x = static_cast(strides_x * x + xf) - pad_left_int; const auto current = in.get_padded(invalid, 0, in_get_d4, in_get_y, in_get_x, static_cast(z)); if (current != invalid) { val += current; divisor += 1; } } } } out.set_ignore_rank(tensor_pos(d4, y, x, z), val / static_cast(divisor)); } class average_pooling_3d_layer : public pooling_3d_layer { public: explicit average_pooling_3d_layer(const std::string& name, const shape3& pool_size, const shape3& strides, padding p) : pooling_3d_layer(name, pool_size, strides, p, &inner_average_pool) { } }; } } frugally-deep-0.17.1/include/fdeep/layers/batch_normalization_layer.hpp000066400000000000000000000050331476372554500263240ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { // https://kratzert.github.io/2016/02/12/understanding-the-gradient-flow-through-the-batch-normalization-layer.html // https://stackoverflow.com/a/46444452/1866775 class batch_normalization_layer : public layer { public: explicit batch_normalization_layer(const std::string& name, int axis, const float_vec& moving_mean, const float_vec& moving_variance, const float_vec& beta, const float_vec& gamma, float_type epsilon) : layer(name) , axis_(axis) , moving_mean_(fplus::make_shared_ref(moving_mean)) , moving_variance_(fplus::make_shared_ref(moving_variance)) , beta_(fplus::make_shared_ref(beta)) , gamma_(fplus::make_shared_ref(gamma)) , epsilon_(epsilon) { assertion(moving_variance.size() == moving_mean.size(), "invalid sizes"); assertion(beta.empty() || beta.size() == moving_mean.size(), "invalid sizes"); assertion(gamma.empty() || gamma.size() == moving_mean.size(), "invalid sizes"); } protected: int axis_; shared_float_vec moving_mean_; shared_float_vec moving_variance_; shared_float_vec beta_; shared_float_vec gamma_; float_type epsilon_; tensors apply_impl(const tensors& inputs) const override { const auto input = single_tensor_from_tensors(inputs); std::vector dims(5, 1); dims[rank_aligned_axis_to_absolute_axis(input.shape().rank(), axis_) - 1] = moving_mean_->size(); const tensor_shape params_shape = create_tensor_shape_from_dims(dims); return { batch_normalization( input, broadcast(tensor(params_shape, moving_mean_), input.shape()), broadcast(tensor(params_shape, moving_variance_), input.shape()), beta_->empty() ? tensor(input.shape(), 0) : broadcast(tensor(params_shape, beta_), input.shape()), gamma_->empty() ? tensor(input.shape(), 1) : broadcast(tensor(params_shape, gamma_), input.shape()), epsilon_) }; } }; } } frugally-deep-0.17.1/include/fdeep/layers/category_encoding_layer.hpp000066400000000000000000000047051476372554500257650ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include #include namespace fdeep { namespace internal { class category_encoding_layer : public layer { public: explicit category_encoding_layer(const std::string& name, const std::size_t& num_tokens, const std::string& output_mode) : layer(name) , num_tokens_(num_tokens) , output_mode_(output_mode) { assertion(output_mode_ == "one_hot" || output_mode_ == "multi_hot" || output_mode_ == "count", "Unsupported output mode (" + output_mode_ + ")."); } protected: tensors apply_impl(const tensors& inputs) const override { assertion(inputs.size() == 1, "need exactly one input"); const auto input = inputs[0]; assertion(input.shape().rank() == 1, "Tensor of rank 1 required, but shape is '" + show_tensor_shape(input.shape()) + "'"); if (output_mode_ == "one_hot") { assertion(input.shape().depth_ == 1, "Tensor of depth 1 required, but is: " + fplus::show(input.shape().depth_)); tensor out(tensor_shape(num_tokens_), float_type(0)); const std::size_t idx = fplus::floor(input.get_ignore_rank(tensor_pos(0))); assertion(idx <= num_tokens_, "Invalid input value (> num_tokens)."); out.set_ignore_rank(tensor_pos(idx), 1); return { out }; } else { tensor out(tensor_shape(num_tokens_), float_type(0)); for (const auto& x : *(input.as_vector())) { const std::size_t idx = fplus::floor(x); assertion(idx <= num_tokens_, "Invalid input value (> num_tokens)."); if (output_mode_ == "multi_hot") { out.set_ignore_rank(tensor_pos(idx), 1); } else if (output_mode_ == "count") { out.set_ignore_rank(tensor_pos(idx), out.get_ignore_rank(tensor_pos(idx)) + 1); } } return { out }; } } std::size_t num_tokens_; std::string output_mode_; }; } } frugally-deep-0.17.1/include/fdeep/layers/centercrop_layer.hpp000066400000000000000000000027571476372554500244530ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class centercrop_layer : public layer { public: explicit centercrop_layer(const std::string& name, std::size_t height, std::size_t width) : layer(name) , height_(height) , width_(width) { } protected: tensors apply_impl(const tensors& inputs) const override { auto input = single_tensor_from_tensors(inputs); if (input.shape().height_ < height_ || input.shape().width_ < width_) { input = smart_resize_tensor_2d(input, shape2(height_, width_), "bilinear"); } const std::size_t excess_height = input.shape().height_ - height_; const std::size_t excess_width = input.shape().width_ - width_; const std::size_t top_crop = excess_height / 2; const std::size_t left_crop = excess_width / 2; const std::size_t bottom_crop = excess_height - top_crop; const std::size_t right_crop = excess_width - left_crop; return { crop_tensor(0, 0, top_crop, bottom_crop, left_crop, right_crop, input) }; } std::size_t height_; std::size_t width_; }; } } frugally-deep-0.17.1/include/fdeep/layers/concatenate_layer.hpp000066400000000000000000000012671476372554500245660ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class concatenate_layer : public layer { public: explicit concatenate_layer(const std::string& name, int axis) : layer(name) , axis_(axis) { } protected: tensors apply_impl(const tensors& input) const override { return { concatenate_tensors(input, axis_) }; } int axis_; }; } } frugally-deep-0.17.1/include/fdeep/layers/conv_2d_layer.hpp000066400000000000000000000031111476372554500236220ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/convolution.hpp" #include "fdeep/filter.hpp" #include "fdeep/layers/layer.hpp" #include "fdeep/shape2.hpp" #include "fdeep/tensor_shape.hpp" #include #include #include #include namespace fdeep { namespace internal { class conv_2d_layer : public layer { public: explicit conv_2d_layer( const std::string& name, const tensor_shape& filter_shape, std::size_t k, const shape2& strides, padding p, const shape2& dilation_rate, const float_vec& weights, const float_vec& bias) : layer(name) , filters_(generate_im2col_filter_matrix( generate_filters(dilation_rate, filter_shape, k, weights, bias, false))) , strides_(strides) , padding_(p) { assertion(k > 0, "needs at least one filter"); assertion(filter_shape.volume() > 0, "filter must have volume"); assertion(strides.area() > 0, "invalid strides"); } protected: tensors apply_impl(const tensors& inputs) const override { const auto& input = single_tensor_from_tensors(inputs); return { convolve(strides_, padding_, filters_, input) }; } convolution_filter_matrices filters_; shape2 strides_; padding padding_; }; } } frugally-deep-0.17.1/include/fdeep/layers/conv_2d_transpose_layer.hpp000066400000000000000000000032621476372554500257270ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/convolution.hpp" #include "fdeep/filter.hpp" #include "fdeep/layers/layer.hpp" #include "fdeep/shape2.hpp" #include "fdeep/tensor_shape.hpp" #include #include #include #include namespace fdeep { namespace internal { class conv_2d_transpose_layer : public layer { public: explicit conv_2d_transpose_layer( const std::string& name, const tensor_shape& filter_shape, std::size_t k, const shape2& strides, padding p, const shape2& dilation_rate, const float_vec& weights, const float_vec& bias) : layer(name) , filters_(generate_im2col_filter_matrix( generate_filters(dilation_rate, filter_shape, k, weights, bias, true))) , dilation_rate_(dilation_rate) , strides_(strides) , padding_(p) { assertion(k > 0, "needs at least one filter"); assertion(filter_shape.volume() > 0, "filter must have volume"); assertion(strides.area() > 0, "invalid strides"); } protected: tensors apply_impl(const tensors& inputs) const override { const auto& input = single_tensor_from_tensors(inputs); return { convolve_transposed(strides_, padding_, filters_, input) }; } convolution_filter_matrices filters_; shape2 dilation_rate_; shape2 strides_; padding padding_; }; } } frugally-deep-0.17.1/include/fdeep/layers/cropping_3d_layer.hpp000066400000000000000000000025671476372554500245150ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class cropping_3d_layer : public layer { public: explicit cropping_3d_layer(const std::string& name, std::size_t front_crop, std::size_t back_crop, std::size_t top_crop, std::size_t bottom_crop, std::size_t left_crop, std::size_t right_crop) : layer(name) , front_crop_(front_crop) , back_crop_(back_crop) , top_crop_(top_crop) , bottom_crop_(bottom_crop) , left_crop_(left_crop) , right_crop_(right_crop) { } protected: tensors apply_impl(const tensors& inputs) const override { const auto& input = single_tensor_from_tensors(inputs); return { crop_tensor(front_crop_, back_crop_, top_crop_, bottom_crop_, left_crop_, right_crop_, input) }; } std::size_t front_crop_; std::size_t back_crop_; std::size_t top_crop_; std::size_t bottom_crop_; std::size_t left_crop_; std::size_t right_crop_; }; } } frugally-deep-0.17.1/include/fdeep/layers/dense_layer.hpp000066400000000000000000000101551476372554500233740ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include "fdeep/tensor.hpp" #include #include #include #include namespace fdeep { namespace internal { // Takes a single stack volume (tensor_shape(n)) as input. class dense_layer : public layer { public: static RowMajorMatrixXf generate_params(std::size_t n_in, const float_vec& weights, const float_vec& bias) { assertion(weights.size() % bias.size() == 0, "invalid params"); return eigen_row_major_mat_from_values(n_in + 1, bias.size(), fplus::append(weights, bias)); } dense_layer(const std::string& name, std::size_t units, const float_vec& weights, const float_vec& bias) : layer(name) , n_in_(weights.size() / bias.size()) , n_out_(units) , params_(generate_params(n_in_, weights, bias)) { assertion(bias.size() == units, "invalid bias count"); assertion(weights.size() % units == 0, "invalid weight count"); } protected: tensors apply_impl(const tensors& inputs) const override { assertion(inputs.size() == 1, "invalid number of input tensors"); auto input = inputs.front(); // According to the Keras documentation // https://keras.io/layers/core/#dense // "if the input to the layer has a rank greater than 2, // then it is flattened prior to the initial dot product with kernel." // But this seems to not be the case. // Instead it does this: https://stackoverflow.com/a/43237727/1866775 // Otherwise the following would need to be done: // if (input.shape().get_not_one_dimension_count() > 1) // { // input = flatten_tensor(input); // } const auto feature_arr = input.as_vector(); const size_t size = feature_arr->size(); const size_t depth = input.shape().depth_; assertion(depth == n_in_ && (size % depth) == 0, "Invalid input value count."); std::vector result_values((input.shape().volume() / depth) * n_out_); const size_t n_of_parts = size / depth; Eigen::Map params( params_.data(), static_cast(params_.rows() - 1), static_cast(params_.cols())); Eigen::Map bias( params_.data() + (params_.rows() - 1) * params_.cols(), static_cast(1), static_cast(params_.cols())); for (size_t part_id = 0; part_id < n_of_parts; ++part_id) { Eigen::Map m( &(*feature_arr)[part_id * depth], static_cast(1), static_cast(depth)); Eigen::Map res_m( &result_values[part_id * n_out_], static_cast(1), static_cast(n_out_)); res_m.noalias() = m * params + bias; } return { tensor(tensor_shape_with_changed_rank( tensor_shape( input.shape().size_dim_5_, input.shape().size_dim_4_, input.shape().height_, input.shape().width_, n_out_), input.shape().rank()), std::move(result_values)) }; } std::size_t n_in_; std::size_t n_out_; RowMajorMatrixXf params_; }; } }frugally-deep-0.17.1/include/fdeep/layers/depthwise_conv_2d_layer.hpp000066400000000000000000000040571476372554500257100ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/depthwise_convolution.hpp" #include "fdeep/filter.hpp" #include "fdeep/layers/layer.hpp" #include "fdeep/shape2.hpp" #include "fdeep/tensor_shape.hpp" #include #include #include #include #include namespace fdeep { namespace internal { // Convolve depth slices separately. class depthwise_conv_2d_layer : public layer { public: explicit depthwise_conv_2d_layer( const std::string& name, std::size_t input_depth, const tensor_shape& filter_shape, const shape2& strides, padding p, const shape2& dilation_rate, const float_vec& depthwise_weights, const float_vec& bias) : layer(name) , filters_(generate_im2col_filter_matrix( generate_filters(dilation_rate, filter_shape, input_depth, depthwise_weights, bias, false))) , strides_(strides) , padding_(p) { assertion(filter_shape.volume() > 0, "filter must have volume"); assertion(strides.area() > 0, "invalid strides"); assertion(filters_.filter_count_ == input_depth, "invalid number of filters"); assertion(filters_.filter_shape_.depth_ == 1, "invalid filter shape"); } protected: tensors apply_impl(const tensors& inputs) const override { const auto& input = single_tensor_from_tensors(inputs); const auto result = depthwise_convolve(strides_, padding_, filters_, input); assertion(result.shape().depth_ == input.shape().depth_, "Invalid output shape"); return { result }; } convolution_filter_matrices filters_; shape2 strides_; padding padding_; }; } } frugally-deep-0.17.1/include/fdeep/layers/dot_layer.hpp000066400000000000000000000016101476372554500230600ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class dot_layer : public layer { public: explicit dot_layer(const std::string& name, const std::vector& axes, bool normalize) : layer(name) , axes_(axes) , normalize_(normalize) { } std::vector axes_; bool normalize_; protected: tensors apply_impl(const tensors& input) const override { assertion(input.size() == 2, "need exactly 2 input tensors"); return { dot_product_tensors(input[0], input[1], axes_, normalize_) }; } }; } } frugally-deep-0.17.1/include/fdeep/layers/elu_layer.hpp000066400000000000000000000017021476372554500230610ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/activation_layer.hpp" #include namespace fdeep { namespace internal { class elu_layer : public activation_layer { public: explicit elu_layer(const std::string& name, float_type alpha) : activation_layer(name) , alpha_(alpha) { } protected: float_type alpha_; static float_type activation_function(float_type alpha, float_type x) { return x >= 0 ? x : alpha * (std::exp(x) - 1); } tensor transform_input(const tensor& in_vol) const override { return transform_tensor( fplus::bind_1st_of_2(activation_function, alpha_), in_vol); } }; } } frugally-deep-0.17.1/include/fdeep/layers/embedding_layer.hpp000066400000000000000000000044701476372554500242170ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include #include namespace fdeep { namespace internal { class embedding_layer : public layer { public: explicit embedding_layer(const std::string& name, std::size_t input_dim, std::size_t output_dim, const float_vec& weights) : layer(name) , input_dim_(input_dim) , output_dim_(output_dim) , weights_(weights) { } protected: tensors apply_impl(const tensors& inputs) const override final { const auto input_shapes = fplus::transform(fplus_c_mem_fn_t(tensor, shape, tensor_shape), inputs); // ensure that tensor shape is (1, 1, 1, 1, seq_len) assertion(inputs.front().shape().size_dim_5_ == 1 && inputs.front().shape().size_dim_4_ == 1 && inputs.front().shape().height_ == 1 && inputs.front().shape().width_ == 1, "size_dim_5, size_dim_4, height and width dimension must be 1, but shape is '" + show_tensor_shapes(input_shapes) + "'"); tensors results; for (auto&& input : inputs) { const std::size_t sequence_len = input.shape().depth_; float_vec output_vec(sequence_len * output_dim_); auto&& it = output_vec.begin(); for (std::size_t i = 0; i < sequence_len; ++i) { std::size_t index = static_cast(input.get(tensor_pos(i))); assertion(index < input_dim_, "vocabulary item indices must all be strictly less than the value of input_dim"); it = std::copy_n(weights_.cbegin() + static_cast(index * output_dim_), output_dim_, it); } results.push_back(tensor(tensor_shape(sequence_len, output_dim_), std::move(output_vec))); } return results; } const std::size_t input_dim_; const std::size_t output_dim_; const float_vec weights_; }; } } frugally-deep-0.17.1/include/fdeep/layers/exponential_layer.hpp000066400000000000000000000013451476372554500246250ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/activation_layer.hpp" #include "fdeep/recurrent_ops.hpp" #include #include namespace fdeep { namespace internal { class exponential_layer : public activation_layer { public: explicit exponential_layer(const std::string& name) : activation_layer(name) { } protected: tensor transform_input(const tensor& in_vol) const override { return transform_tensor(exponential_activation, in_vol); } }; } } frugally-deep-0.17.1/include/fdeep/layers/flatten_layer.hpp000066400000000000000000000013671476372554500237400ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { // Converts a volume into single column volume (tensor_shape(n)). class flatten_layer : public layer { public: explicit flatten_layer(const std::string& name) : layer(name) { } protected: tensors apply_impl(const tensors& inputs) const override { const auto& input = single_tensor_from_tensors(inputs); return { flatten_tensor(input) }; } }; } } frugally-deep-0.17.1/include/fdeep/layers/gelu_layer.hpp000066400000000000000000000013201476372554500232240ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/activation_layer.hpp" #include "fdeep/recurrent_ops.hpp" #include #include namespace fdeep { namespace internal { class gelu_layer : public activation_layer { public: explicit gelu_layer(const std::string& name) : activation_layer(name) { } protected: tensor transform_input(const tensor& in_vol) const override { return transform_tensor(gelu_activation, in_vol); } }; } } frugally-deep-0.17.1/include/fdeep/layers/global_average_pooling_3d_layer.hpp000066400000000000000000000031511476372554500273430ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/global_pooling_layer.hpp" #include namespace fdeep { namespace internal { class global_average_pooling_3d_layer : public global_pooling_layer { public: explicit global_average_pooling_3d_layer(const std::string& name, bool keepdims) : global_pooling_layer(name) , keepdims_(keepdims) { } protected: tensor pool(const tensor& in) const override { const auto out_dimensions = keepdims_ ? fplus::append_elem(in.shape().depth_, std::vector(in.shape().rank() - 1, 1)) : fplus::singleton_seq(in.shape().depth_); tensor out(create_tensor_shape_from_dims(out_dimensions), 0); for (std::size_t z = 0; z < in.shape().depth_; ++z) { float_type val = 0; for (std::size_t d4 = 0; d4 < in.shape().size_dim_4_; ++d4) { for (std::size_t y = 0; y < in.shape().height_; ++y) { for (std::size_t x = 0; x < in.shape().width_; ++x) { val += in.get_ignore_rank(tensor_pos(d4, y, x, z)); } } } out.set_ignore_rank(tensor_pos(z), val / static_cast(in.shape().size_dim_4_ * in.shape().height_ * in.shape().width_)); } return out; } bool keepdims_; }; } } frugally-deep-0.17.1/include/fdeep/layers/global_max_pooling_3d_layer.hpp000066400000000000000000000031431476372554500265170ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/global_pooling_layer.hpp" #include #include #include namespace fdeep { namespace internal { class global_max_pooling_3d_layer : public global_pooling_layer { public: explicit global_max_pooling_3d_layer(const std::string& name, bool keepdims) : global_pooling_layer(name) , keepdims_(keepdims) { } protected: tensor pool(const tensor& in) const override { const auto out_dimensions = keepdims_ ? fplus::append_elem(in.shape().depth_, std::vector(in.shape().rank() - 1, 1)) : fplus::singleton_seq(in.shape().depth_); tensor out(create_tensor_shape_from_dims(out_dimensions), 0); for (std::size_t z = 0; z < in.shape().depth_; ++z) { float_type val = std::numeric_limits::lowest(); for (std::size_t d4 = 0; d4 < in.shape().size_dim_4_; ++d4) { for (std::size_t y = 0; y < in.shape().height_; ++y) { for (std::size_t x = 0; x < in.shape().width_; ++x) { val = std::max(val, in.get_ignore_rank(tensor_pos(d4, y, x, z))); } } } out.set_ignore_rank(tensor_pos(z), val); } return out; } bool keepdims_; }; } } frugally-deep-0.17.1/include/fdeep/layers/global_pooling_layer.hpp000066400000000000000000000016001476372554500252600ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include #include #include #include #include namespace fdeep { namespace internal { // Abstract base class for global pooling layers class global_pooling_layer : public layer { public: explicit global_pooling_layer(const std::string& name) : layer(name) { } protected: tensors apply_impl(const tensors& inputs) const override final { const auto& input = single_tensor_from_tensors(inputs); return { pool(input) }; } virtual tensor pool(const tensor& input) const = 0; }; } } frugally-deep-0.17.1/include/fdeep/layers/hard_sigmoid_layer.hpp000066400000000000000000000013531476372554500247270ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/activation_layer.hpp" #include "fdeep/recurrent_ops.hpp" #include #include namespace fdeep { namespace internal { class hard_sigmoid_layer : public activation_layer { public: explicit hard_sigmoid_layer(const std::string& name) : activation_layer(name) { } protected: tensor transform_input(const tensor& in_vol) const override { return transform_tensor(hard_sigmoid_activation, in_vol); } }; } } frugally-deep-0.17.1/include/fdeep/layers/input_layer.hpp000066400000000000000000000017471476372554500234440ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class input_layer : public layer { public: explicit input_layer(const std::string& name, const tensor_shape_variable& input_shape) : layer(name) , input_shape_(input_shape) , output_() { } protected: tensors apply_impl(const tensors& inputs) const override { assertion(inputs.size() == 1, "need exactly one input"); assertion(inputs.front().shape() == input_shape_, "invalid input size"); return inputs; } tensor_shape_variable input_shape_; // provide initial tensor for computation mutable fplus::maybe output_; }; } } frugally-deep-0.17.1/include/fdeep/layers/layer.hpp000066400000000000000000000062641476372554500222240ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/common.hpp" #include "fdeep/tensor.hpp" #include "fdeep/node.hpp" #include #include #include #include namespace fdeep { namespace internal { class layer; typedef std::shared_ptr layer_ptr; typedef std::vector layer_ptrs; class activation_layer; typedef std::shared_ptr activation_layer_ptr; tensors apply_activation_layer(const activation_layer_ptr& ptr, const tensors& input); class layer { public: explicit layer(const std::string& name) : name_(name) , nodes_() , activation_(nullptr) { } virtual ~layer() { } void set_activation(const activation_layer_ptr& activation) { activation_ = activation; } void set_nodes(const nodes& layer_nodes) { nodes_ = layer_nodes; } virtual tensors apply(const tensors& input) const final { const auto result = apply_impl(input); if (activation_ == nullptr) return result; else return apply_activation_layer(activation_, result); } virtual tensor get_output(const layer_ptrs& layers, output_dict& output_cache, std::size_t node_idx, std::size_t tensor_idx) const { const node_connection conn(name_, node_idx, tensor_idx); if (!fplus::map_contains(output_cache, conn.without_tensor_idx())) { assertion(node_idx < nodes_.size(), "invalid node index"); output_cache[conn.without_tensor_idx()] = nodes_[node_idx].get_output(layers, output_cache, *this); } const auto& outputs = fplus::get_from_map_unsafe( output_cache, conn.without_tensor_idx()); assertion(tensor_idx < outputs.size(), "invalid tensor index"); return outputs[tensor_idx]; } std::string name_; nodes nodes_; protected: virtual tensors apply_impl(const tensors& input) const = 0; activation_layer_ptr activation_; }; inline tensor get_layer_output(const layer_ptrs& layers, output_dict& output_cache, const layer_ptr& layer, std::size_t node_idx, std::size_t tensor_idx) { return layer->get_output(layers, output_cache, node_idx, tensor_idx); } inline tensors apply_layer(const layer& layer, const tensors& inputs) { return layer.apply(inputs); } inline layer_ptr get_layer(const layer_ptrs& layers, const std::string& layer_id) { const auto is_matching_layer = [layer_id](const layer_ptr& ptr) -> bool { return ptr->name_ == layer_id; }; return fplus::throw_on_nothing( error("dangling layer reference: " + layer_id), fplus::find_first_by(is_matching_layer, layers)); } } } frugally-deep-0.17.1/include/fdeep/layers/layer_normalization_layer.hpp000066400000000000000000000044071476372554500263630ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class layer_normalization_layer : public layer { public: explicit layer_normalization_layer(const std::string& name, std::vector axes, const float_vec& beta, const float_vec& gamma, float_type epsilon) : layer(name) , axes_(axes) , beta_(fplus::make_shared_ref(beta)) , gamma_(fplus::make_shared_ref(gamma)) , epsilon_(epsilon) { } protected: std::vector axes_; shared_float_vec beta_; shared_float_vec gamma_; float_type epsilon_; tensors apply_impl(const tensors& inputs) const override { const auto& input = single_tensor_from_tensors(inputs); // https://github.com/keras-team/keras/blob/v2.14.0/keras/layers/normalization/layer_normalization.py#L291-L304 const auto& input_moments = moments(input, axes_); const auto& mean = input_moments.first; const auto& variance = input_moments.second; std::vector dims(5, 1); tensor_shape input_shape = input.shape(); input_shape.maximize_rank(); const auto input_shape_dimensions = input_shape.dimensions(); for (const auto axis : axes_) { const std::size_t pos = rank_aligned_axis_to_absolute_axis(input.shape().rank(), axis) - 1; dims[pos] = input_shape_dimensions[pos]; } const tensor_shape params_shape = create_tensor_shape_from_dims(dims); return { batch_normalization( input, mean, variance, beta_->empty() ? tensor(input.shape(), 0) : broadcast(tensor(params_shape, beta_), input.shape()), gamma_->empty() ? tensor(input.shape(), 1) : broadcast(tensor(params_shape, gamma_), input.shape()), epsilon_) }; } }; } } frugally-deep-0.17.1/include/fdeep/layers/leaky_relu_layer.hpp000066400000000000000000000016531476372554500244350ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/activation_layer.hpp" #include namespace fdeep { namespace internal { class leaky_relu_layer : public activation_layer { public: explicit leaky_relu_layer(const std::string& name, float_type negative_slope) : activation_layer(name) , negative_slope_(negative_slope) { } protected: float_type negative_slope_; tensor transform_input(const tensor& in_vol) const override { auto activation_function = [this](float_type x) -> float_type { return x > 0 ? x : negative_slope_ * x; }; return transform_tensor(activation_function, in_vol); } }; } } frugally-deep-0.17.1/include/fdeep/layers/linear_layer.hpp000066400000000000000000000011741476372554500235510ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/activation_layer.hpp" #include namespace fdeep { namespace internal { class linear_layer : public activation_layer { public: explicit linear_layer(const std::string& name) : activation_layer(name) { } protected: tensor transform_input(const tensor& in_vol) const override { return in_vol; } }; } } frugally-deep-0.17.1/include/fdeep/layers/max_pooling_3d_layer.hpp000066400000000000000000000036161476372554500252040ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/pooling_3d_layer.hpp" #include #include namespace fdeep { namespace internal { inline void inner_max_pool(const tensor& in, tensor& out, std::size_t pool_size_d4, std::size_t pool_height, std::size_t pool_width, std::size_t strides_d4, std::size_t strides_y, std::size_t strides_x, std::size_t d4, std::size_t y, std::size_t x, std::size_t z, int pad_front_int, int pad_top_int, int pad_left_int) { const float_type invalid = std::numeric_limits::lowest(); float_type val = std::numeric_limits::lowest(); for (std::size_t d4f = 0; d4f < pool_size_d4; ++d4f) { int in_get_d4 = static_cast(strides_d4 * d4 + d4f) - pad_front_int; for (std::size_t yf = 0; yf < pool_height; ++yf) { int in_get_y = static_cast(strides_y * y + yf) - pad_top_int; for (std::size_t xf = 0; xf < pool_width; ++xf) { int in_get_x = static_cast(strides_x * x + xf) - pad_left_int; const auto current = in.get_padded(invalid, 0, in_get_d4, in_get_y, in_get_x, static_cast(z)); val = std::max(val, current); } } } out.set_ignore_rank(tensor_pos(d4, y, x, z), val); } class max_pooling_3d_layer : public pooling_3d_layer { public: explicit max_pooling_3d_layer(const std::string& name, const shape3& pool_size, const shape3& strides, padding p) : pooling_3d_layer(name, pool_size, strides, p, &inner_max_pool) { } }; } } frugally-deep-0.17.1/include/fdeep/layers/maximum_layer.hpp000066400000000000000000000011511476372554500237470ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class maximum_layer : public layer { public: explicit maximum_layer(const std::string& name) : layer(name) { } protected: tensors apply_impl(const tensors& input) const override { return { max_tensors(input) }; } }; } } frugally-deep-0.17.1/include/fdeep/layers/minimum_layer.hpp000066400000000000000000000011511476372554500237450ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class minimum_layer : public layer { public: explicit minimum_layer(const std::string& name) : layer(name) { } protected: tensors apply_impl(const tensors& input) const override { return { min_tensors(input) }; } }; } } frugally-deep-0.17.1/include/fdeep/layers/model_layer.hpp000066400000000000000000000051011476372554500233710ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/common.hpp" #include "fdeep/tensor.hpp" #include "fdeep/layers/layer.hpp" #include #include #include #include namespace fdeep { namespace internal { class model_layer : public layer { public: explicit model_layer(const std::string& name, const layer_ptrs& layers, const node_connections& input_connections, const node_connections& output_connections) : layer(name) , layers_(layers) , input_connections_(input_connections) , output_connections_(output_connections) { assertion(fplus::all_unique( fplus::transform(fplus_get_ptr_mem(name_), layers)), "layer names must be unique"); } tensor get_output(const layer_ptrs& layers, output_dict& output_cache, std::size_t node_idx, std::size_t tensor_idx) const override { // https://stackoverflow.com/questions/46011749/understanding-keras-model-architecture-node-index-of-nested-model if (node_idx >= 1) { node_idx = node_idx - 1; } assertion(node_idx < nodes_.size(), "invalid node index: " + std::to_string(node_idx) + " of " + std::to_string(nodes_.size())); return layer::get_output(layers, output_cache, node_idx, tensor_idx); } protected: tensors apply_impl(const tensors& inputs) const override { output_dict output_cache; assertion(inputs.size() == input_connections_.size(), "invalid number of input tensors for this model: " + fplus::show(input_connections_.size()) + " required but " + fplus::show(inputs.size()) + " provided"); for (std::size_t i = 0; i < inputs.size(); ++i) { output_cache[input_connections_[i].without_tensor_idx()] = { inputs[i] }; } const auto get_output = [this, &output_cache](const node_connection& conn) -> tensor { return get_layer(layers_, conn.layer_id_)->get_output(layers_, output_cache, conn.node_idx_, conn.tensor_idx_); }; return fplus::transform(get_output, output_connections_); } layer_ptrs layers_; node_connections input_connections_; node_connections output_connections_; }; } } frugally-deep-0.17.1/include/fdeep/layers/multi_head_attention_layer.hpp000066400000000000000000000144731476372554500265050ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/dense_layer.hpp" #include "fdeep/layers/layer.hpp" #include "fdeep/layers/softmax_layer.hpp" #include namespace fdeep { namespace internal { class multi_head_attention_layer : public layer { public: explicit multi_head_attention_layer(const std::string& name, std::size_t num_heads, std::size_t key_dim, std::size_t value_dim, bool use_bias, const std::vector& weights_and_biases) : layer(name) , num_heads_(num_heads) , key_dim_(key_dim) , value_dim_(value_dim) , query_dense_(create_dense_layers(weights_and_biases, use_bias, num_heads, 0, key_dim, name + "_query_dense")) , value_dense_(create_dense_layers(weights_and_biases, use_bias, num_heads, 2, value_dim, name + "_value_dense")) , key_dense_(create_dense_layers(weights_and_biases, use_bias, num_heads, 1, key_dim, name + "_key_dense")) , output_dense_(create_output_dense_layer(weights_and_biases, use_bias, name + "_output_dense")) { } private: std::vector create_dense_layers( const tensors& weights_and_biases, bool use_bias, const std::size_t num_heads, const std::size_t index, const std::size_t units, const std::string& name) { assertion(index <= 2, "Invalid dense layer index."); const std::size_t index_factor = use_bias ? 2 : 1; const tensor weights = weights_and_biases[index_factor * index]; const tensor biases = use_bias ? weights_and_biases[index_factor * index + 1] : tensor(tensor_shape(num_heads, units), 0); assertion(weights.shape().depth_ == units, "Invalid weights shape for attention head dimension."); assertion(biases.shape().depth_ == units, "Invalid biases shape for attention head dimension."); const auto weights_per_head = tensor_to_tensors_width_slices(weights); const auto biases_per_head = tensor_to_tensors_width_slices(biases); assertion(weights_per_head.size() == num_heads, "Invalid weights for number of heads."); assertion(biases_per_head.size() == num_heads, "Invalid biases for number of heads."); return fplus::transform( [&](const std::pair>& n_and_w_with_b) { return dense_layer( name + "_" + std::to_string(n_and_w_with_b.first), units, *n_and_w_with_b.second.first.as_vector(), *n_and_w_with_b.second.second.as_vector()); }, fplus::enumerate(fplus::zip(weights_per_head, biases_per_head))); } dense_layer create_output_dense_layer( const tensors& weights_and_biases, bool use_bias, const std::string& name) { const std::size_t index_factor = use_bias ? 2 : 1; const tensor weights = weights_and_biases[index_factor * 3]; const std::size_t units = weights.shape().depth_; const tensor biases = use_bias ? weights_and_biases[index_factor * 3 + 1] : tensor(tensor_shape(units), 0); return dense_layer(name + "_output", units, *weights.as_vector(), *biases.as_vector()); } tensors extract_biases(const tensors& saved_weights, bool use_bias) { return use_bias ? fplus::unweave(saved_weights).second : tensors(); } tensor apply_head( const tensor& query_raw, const tensor& value_raw, const tensor& key_raw, std::size_t head_index) const { assertion( query_raw.shape().rank() == 2 && value_raw.shape().rank() == 2 && key_raw.shape().rank() == 2 && query_raw.shape().depth_ == value_raw.shape().depth_ && query_raw.shape().depth_ == key_raw.shape().depth_ && value_raw.shape().width_ == key_raw.shape().width_, "Invalid shapes; need a query tensor of shape (B, T, dim) and a value/key tensor of shape (B, S, dim)."); const tensor query = query_dense_[head_index].apply({ query_raw }).front(); const tensor value = value_dense_[head_index].apply({ value_raw }).front(); const tensor key = key_dense_[head_index].apply({ key_raw }).front(); // https://towardsdatascience.com/transformers-explained-visually-part-3-multi-head-attention-deep-dive-1c1ff1024853 // https://dmol.pub/dl/attention.html#multi-head-attention-block // https://github.com/keras-team/keras/blob/v2.14.0/keras/layers/attention/multi_head_attention.py // https://gist.github.com/sevagh/b71d253a347a9b59c026580625452fc5 const tensor scores = dot_product_tensors(query, transpose(key), std::vector({ 2, 1 }), false); const std::size_t query_size = query.shape().depth_; const tensor distribution = softmax(transform_tensor(fplus::multiply_with(1 / std::sqrt(query_size)), scores)); return dot_product_tensors(distribution, value, std::vector({ 2, 1 }), false); } protected: tensors apply_impl(const tensors& input) const override { assertion(input.size() == 2 || input.size() == 3, "Invalid number of inputs for MultiHeadAttention layer."); const tensor query_raw = input[0]; const tensor value_raw = input[1]; const tensor key_raw = input.size() > 2 ? input[2] : value_raw; const auto outputs = fplus::transform([&](const std::size_t head_idx) { return apply_head(query_raw, value_raw, key_raw, head_idx); }, fplus::numbers(0, num_heads_)); const tensor merged = concatenate_tensors_depth(outputs); return output_dense_.apply({ merged }); } std::size_t num_heads_; std::size_t key_dim_; std::size_t value_dim_; std::vector query_dense_; std::vector value_dense_; std::vector key_dense_; dense_layer output_dense_; }; } } frugally-deep-0.17.1/include/fdeep/layers/multiply_layer.hpp000066400000000000000000000011601476372554500241510ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class multiply_layer : public layer { public: explicit multiply_layer(const std::string& name) : layer(name) { } protected: tensors apply_impl(const tensors& input) const override { return { multiply_tensors(input) }; } }; } } frugally-deep-0.17.1/include/fdeep/layers/normalization_layer.hpp000066400000000000000000000056731476372554500251750ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include #include #include #include namespace fdeep { namespace internal { class normalization_layer : public layer { public: explicit normalization_layer( const std::string& name, const std::vector& axes, const float_vec& mean, const float_vec& variance) : layer(name) , axes_(axes) , mean_(mean) , variance_(variance) { assertion(axes.size() <= 1, "Unsupported number of axes for Normalization layer"); } protected: tensors apply_impl(const tensors& inputs) const override final { const auto& input = single_tensor_from_tensors(inputs); const int rank = static_cast(input.shape().rank()); const auto transform_slice = [&](const std::size_t idx, const tensor& slice) -> tensor { const auto sqrt_of_variance = std::sqrt(variance_[idx]); return transform_tensor([&](float_type x) { return (x - mean_[idx]) / std::fmax(sqrt_of_variance, 1e-7); }, slice); }; if (axes_.empty()) { assertion(variance_.size() == 1, "Invalid number of variance values in Normalization layer."); return { transform_slice(0, input) }; } const auto axis_dim = axes_[0] == -1 ? 0 : rank - axes_[0]; const auto transform_slice_with_idx = [&](const tensors& slices) -> tensors { assertion(variance_.size() == slices.size(), "Invalid number of variance values in Normalization layer."); return fplus::transform_with_idx(transform_slice, slices); }; if (axis_dim == 0) return { concatenate_tensors_depth(transform_slice_with_idx(tensor_to_depth_slices(input))) }; else if (axis_dim == 1) return { concatenate_tensors_width(transform_slice_with_idx(tensor_to_tensors_width_slices(input))) }; else if (axis_dim == 2) return { concatenate_tensors_height(transform_slice_with_idx(tensor_to_tensors_height_slices(input))) }; else if (axis_dim == 3) return { concatenate_tensors_dim4(transform_slice_with_idx(tensor_to_tensors_dim4_slices(input))) }; else if (axis_dim == 4) return { concatenate_tensors_dim5(transform_slice_with_idx(tensor_to_tensors_dim5_slices(input))) }; else raise_error("Invalid axis (" + std::to_string(axis_dim) + ") for Normalization layer"); return {}; } const std::vector axes_; float_vec mean_; float_vec variance_; }; } } frugally-deep-0.17.1/include/fdeep/layers/permute_layer.hpp000066400000000000000000000015451476372554500237620ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class permute_layer : public layer { public: explicit permute_layer(const std::string& name, const std::vector& dims) : layer(name) , dims_raw_(dims) { check_permute_tensor_dims(dims); } protected: tensors apply_impl(const tensors& inputs) const override { const auto& input = single_tensor_from_tensors(inputs); return { permute_tensor(input, dims_raw_) }; } std::vector dims_raw_; }; } } frugally-deep-0.17.1/include/fdeep/layers/pooling_3d_layer.hpp000066400000000000000000000062051476372554500243340ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/convolution3d.hpp" #include "fdeep/layers/layer.hpp" #include #include #include #include #include namespace fdeep { namespace internal { typedef void (*inner_pooling_func)( const tensor&, tensor& out, std::size_t, std::size_t, std::size_t, std::size_t, std::size_t, std::size_t, std::size_t, std::size_t, std::size_t, std::size_t, int, int, int); // Abstract base class for pooling layers class pooling_3d_layer : public layer { public: explicit pooling_3d_layer(const std::string& name, const shape3& pool_size, const shape3& strides, padding p, const inner_pooling_func inner_f) : layer(name) , pool_size_(pool_size) , strides_(strides) , padding_(p) , inner_f_(inner_f) { } protected: tensor pool(const tensor& in) const { const auto conv_cfg = preprocess_convolution_3d( shape3(pool_size_.size_dim_4_, pool_size_.height_, pool_size_.width_), shape3(strides_.size_dim_4_, strides_.height_, strides_.width_), padding_, in.shape().size_dim_4_, in.shape().height_, in.shape().width_); int pad_front_int = static_cast(conv_cfg.pad_front_); int pad_top_int = static_cast(conv_cfg.pad_top_); int pad_left_int = static_cast(conv_cfg.pad_left_); const std::size_t out_size_d4 = conv_cfg.out_size_d4_; const std::size_t out_height = conv_cfg.out_height_; const std::size_t out_width = conv_cfg.out_width_; tensor out( tensor_shape_with_changed_rank( tensor_shape(out_size_d4, out_height, out_width, in.shape().depth_), in.shape().rank()), 0); for (std::size_t d4 = 0; d4 < out_size_d4; ++d4) { for (std::size_t y = 0; y < out_height; ++y) { for (std::size_t x = 0; x < out_width; ++x) { for (std::size_t z = 0; z < in.shape().depth_; ++z) { inner_f_(in, out, pool_size_.size_dim_4_, pool_size_.height_, pool_size_.width_, strides_.size_dim_4_, strides_.height_, strides_.width_, d4, y, x, z, pad_front_int, pad_top_int, pad_left_int); } } } } return out; } tensors apply_impl(const tensors& inputs) const override final { const auto& input = single_tensor_from_tensors(inputs); return { pool(input) }; } shape3 pool_size_; shape3 strides_; padding padding_; inner_pooling_func inner_f_; }; } } frugally-deep-0.17.1/include/fdeep/layers/prelu_layer.hpp000066400000000000000000000060311476372554500234230ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class prelu_layer : public layer { public: explicit prelu_layer(const std::string& name, const float_vec& alpha, std::vector shared_axes) : layer(name) , alpha_(fplus::make_shared_ref(alpha)) , shared_axes_(shared_axes) { } protected: fdeep::shared_float_vec alpha_; std::vector shared_axes_; tensors apply_impl(const tensors& input) const override { // We need to shift shared_axes if the original Keras tensor // was one or two dimensional. // We detect this by checking if the axes indicated in shared_axes // has length 1. // For this to work we need to remove axes with length 1 // from shared axes in Python. std::vector shared_axes_shifted; std::size_t shift = 0; for (std::size_t i = 0; i < shared_axes_.size(); ++i) { if ((shared_axes_[i] == 1 && input[0].shape().height_ == 1) || (shared_axes_[i] == 2 && input[0].shape().width_ == 1)) { shift++; } shared_axes_shifted.push_back(shared_axes_[i] + shift); } const bool height_shared = fplus::is_elem_of(1, shared_axes_shifted); const bool width_shared = fplus::is_elem_of(2, shared_axes_shifted); const bool channels_shared = fplus::is_elem_of(3, shared_axes_shifted); const size_t width = width_shared ? 1 : input[0].shape().width_; const size_t depth = channels_shared ? 1 : input[0].shape().depth_; fdeep::tensor out(input[0].shape(), 1.0f); for (std::size_t y = 0; y < out.shape().height_; ++y) { for (std::size_t x = 0; x < out.shape().width_; ++x) { for (std::size_t z = 0; z < out.shape().depth_; ++z) { if (input[0].get_ignore_rank(tensor_pos(y, x, z)) > 0) { out.set_ignore_rank(tensor_pos(y, x, z), input[0].get_ignore_rank(tensor_pos(y, x, z))); } else { const size_t y_temp = height_shared ? 0 : y; const size_t x_temp = width_shared ? 0 : x; const size_t z_temp = channels_shared ? 0 : z; const size_t pos = y_temp * width * depth + x_temp * depth + z_temp; out.set_ignore_rank(tensor_pos(y, x, z), (*alpha_)[pos] * input[0].get_ignore_rank(tensor_pos(y, x, z))); } } } } return { out }; } }; } }frugally-deep-0.17.1/include/fdeep/layers/relu_layer.hpp000066400000000000000000000024751476372554500232530ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/activation_layer.hpp" #include #include namespace fdeep { namespace internal { class relu_layer : public activation_layer { public: explicit relu_layer(const std::string& name, const float_type max_value, const float_type negative_slope, const float_type threshold) : activation_layer(name) , max_value_(max_value) , negative_slope_(negative_slope) , threshold_(threshold) { } protected: tensor transform_input(const tensor& in_vol) const override { auto activation_function = [&](float_type x) -> float_type { if (x >= max_value_) return max_value_; if (threshold_ <= x && x < max_value_) return x; return negative_slope_ * (x - threshold_); }; return transform_tensor(activation_function, in_vol); } float_type max_value_; float_type negative_slope_; float_type threshold_; }; } } frugally-deep-0.17.1/include/fdeep/layers/repeat_vector_layer.hpp000066400000000000000000000017211476372554500251370ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include #include namespace fdeep { namespace internal { class repeat_vector_layer : public layer { public: explicit repeat_vector_layer(const std::string& name, std::size_t n) : layer(name) , n_(n) { } protected: tensors apply_impl(const tensors& inputs) const override { const auto& input = single_tensor_from_tensors(inputs); assertion(input.shape().rank() == 1, "Invalid input shape for RepeatVector"); return { tensor( tensor_shape(n_, input.shape().depth_), fplus::repeat(n_, *input.as_vector())) }; } std::size_t n_; }; } } frugally-deep-0.17.1/include/fdeep/layers/rescaling_layer.hpp000066400000000000000000000022031476372554500242400ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include #include namespace fdeep { namespace internal { class rescaling_layer : public layer { public: explicit rescaling_layer(const std::string& name, float_type scale, float_type offset) : layer(name) , scale_(scale) , offset_(offset) { } protected: float_type scale_; float_type offset_; static float_type rescale_value(float_type scale, float_type offset, float_type x) { return scale * x + offset; } tensors apply_impl(const tensors& inputs) const override { const auto f = fplus::bind_1st_and_2nd_of_3(rescale_value, scale_, offset_); const auto rescale_tensor = fplus::bind_1st_of_2(transform_tensor, f); return fplus::transform(rescale_tensor, inputs); } }; } } frugally-deep-0.17.1/include/fdeep/layers/reshape_layer.hpp000066400000000000000000000017501476372554500237260ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include #include namespace fdeep { namespace internal { class reshape_layer : public layer { public: explicit reshape_layer(const std::string& name, const tensor_shape_variable& target_shape) : layer(name) , target_shape_(target_shape) { } protected: tensors apply_impl(const tensors& inputs) const override { const auto& input = single_tensor_from_tensors(inputs); const auto fixed_target_shape = derive_fixed_tensor_shape( input.shape().volume(), target_shape_); return { tensor(fixed_target_shape, input.as_vector()) }; } tensor_shape_variable target_shape_; }; } } frugally-deep-0.17.1/include/fdeep/layers/resizing_layer.hpp000066400000000000000000000026101476372554500241250ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include #include #include #include #include namespace fdeep { namespace internal { class resizing_layer : public layer { public: explicit resizing_layer(const std::string& name, std::size_t height, std::size_t width, const std::string& interpolation, bool crop_to_aspect_ratio) : layer(name) , height_(height) , width_(width) , interpolation_(interpolation) , crop_to_aspect_ratio_(crop_to_aspect_ratio) { } protected: tensors apply_impl(const tensors& inputs) const override final { const auto& input = single_tensor_from_tensors(inputs); if (crop_to_aspect_ratio_) { return { smart_resize_tensor_2d(input, shape2(height_, width_), interpolation_) }; } else { return { resize_tensor_2d(input, shape2(height_, width_), interpolation_) }; } } std::size_t height_; std::size_t width_; std::string interpolation_; bool crop_to_aspect_ratio_; }; } } frugally-deep-0.17.1/include/fdeep/layers/selu_layer.hpp000066400000000000000000000016461476372554500232530ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/activation_layer.hpp" #include "fdeep/recurrent_ops.hpp" #include namespace fdeep { namespace internal { // https://arxiv.org/pdf/1706.02515.pdf class selu_layer : public activation_layer { public: explicit selu_layer(const std::string& name) : activation_layer(name) { } protected: const float_type alpha_ = static_cast(1.6732632423543772848170429916717); const float_type scale_ = static_cast(1.0507009873554804934193349852946); tensor transform_input(const tensor& in_vol) const override { return transform_tensor(selu_activation, in_vol); } }; } } frugally-deep-0.17.1/include/fdeep/layers/separable_conv_2d_layer.hpp000066400000000000000000000036131476372554500256470ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/convolution.hpp" #include "fdeep/filter.hpp" #include "fdeep/layers/layer.hpp" #include "fdeep/shape2.hpp" #include "fdeep/tensor_shape.hpp" #include #include #include namespace fdeep { namespace internal { // Convolve depth slices separately first. // Then convolve normally with kernel_size = (1, 1) class separable_conv_2d_layer : public layer { public: explicit separable_conv_2d_layer( const std::string& name, std::size_t input_depth, const tensor_shape& filter_shape, std::size_t k, const shape2& strides, padding p, const shape2& dilation_rate, const float_vec& depthwise_weights, const float_vec& pointwise_weights, const float_vec& bias_0, const float_vec& bias) : layer(name) , depthwise_layer_(name + "_depthwise_part", input_depth, filter_shape, strides, p, dilation_rate, depthwise_weights, bias_0) , filters_pointwise_(generate_im2col_filter_matrix( generate_filters(shape2(1, 1), tensor_shape(input_depth), k, pointwise_weights, bias, false))) { } protected: tensors apply_impl(const tensors& inputs) const override { const auto temp = depthwise_layer_.apply(inputs); const auto temp_single = single_tensor_from_tensors(temp); return { convolve(shape2(1, 1), padding::valid, filters_pointwise_, temp_single) }; } depthwise_conv_2d_layer depthwise_layer_; convolution_filter_matrices filters_pointwise_; }; } } frugally-deep-0.17.1/include/fdeep/layers/sigmoid_layer.hpp000066400000000000000000000013311476372554500237250ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/activation_layer.hpp" #include "fdeep/recurrent_ops.hpp" #include #include namespace fdeep { namespace internal { class sigmoid_layer : public activation_layer { public: explicit sigmoid_layer(const std::string& name) : activation_layer(name) { } protected: tensor transform_input(const tensor& in_vol) const override { return transform_tensor(sigmoid_activation, in_vol); } }; } } frugally-deep-0.17.1/include/fdeep/layers/softmax_layer.hpp000066400000000000000000000012051476372554500237530ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/activation_layer.hpp" #include namespace fdeep { namespace internal { class softmax_layer : public activation_layer { public: explicit softmax_layer(const std::string& name) : activation_layer(name) { } protected: tensor transform_input(const tensor& input) const override { return softmax(input); } }; } } frugally-deep-0.17.1/include/fdeep/layers/softplus_layer.hpp000066400000000000000000000024001476372554500241470ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/activation_layer.hpp" #include #include namespace fdeep { namespace internal { class softplus_layer : public activation_layer { public: explicit softplus_layer(const std::string& name) : activation_layer(name) { } protected: tensor transform_input(const tensor& in_vol) const override { auto activation_function = [](float_type x) -> float_type { // https://github.com/tensorflow/tensorflow/blob/626808e4e4a83aafbb3809a30db57bb78e839040/tensorflow/core/kernels/softplus_op.h#L41 const float_type threshold = std::log(std::numeric_limits::epsilon()) + 2; if (x > -threshold) // too_large return x; else if (x < threshold) // too_small return std::exp(x); else return std::log1p(std::exp(x)); }; return transform_tensor(activation_function, in_vol); } }; } } frugally-deep-0.17.1/include/fdeep/layers/softsign_layer.hpp000066400000000000000000000013341476372554500241310ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/activation_layer.hpp" #include "fdeep/recurrent_ops.hpp" #include #include namespace fdeep { namespace internal { class softsign_layer : public activation_layer { public: explicit softsign_layer(const std::string& name) : activation_layer(name) { } protected: tensor transform_input(const tensor& in_vol) const override { return transform_tensor(softsign_activation, in_vol); } }; } } frugally-deep-0.17.1/include/fdeep/layers/subtract_layer.hpp000066400000000000000000000013511476372554500241230ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class subtract_layer : public layer { public: explicit subtract_layer(const std::string& name) : layer(name) { } protected: tensors apply_impl(const tensors& input) const override { assertion(input.size() == 2, "subtract layer needs exactly two input tensors"); return { subtract_tensors(input[0], input[1]) }; } }; } } frugally-deep-0.17.1/include/fdeep/layers/swish_layer.hpp000066400000000000000000000013011476372554500234240ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/activation_layer.hpp" #include "fdeep/recurrent_ops.hpp" #include namespace fdeep { namespace internal { class swish_layer : public activation_layer { public: explicit swish_layer(const std::string& name) : activation_layer(name) { } protected: tensor transform_input(const tensor& in_vol) const override { return transform_tensor(swish_activation, in_vol); } }; } } frugally-deep-0.17.1/include/fdeep/layers/tanh_layer.hpp000066400000000000000000000012761476372554500232340ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/activation_layer.hpp" #include "fdeep/recurrent_ops.hpp" #include namespace fdeep { namespace internal { class tanh_layer : public activation_layer { public: explicit tanh_layer(const std::string& name) : activation_layer(name) { } protected: tensor transform_input(const tensor& in_vol) const override { return transform_tensor(tanh_activation, in_vol); } }; } } frugally-deep-0.17.1/include/fdeep/layers/time_distributed_layer.hpp000066400000000000000000000064521476372554500256430ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include "fdeep/recurrent_ops.hpp" #include #include #include namespace fdeep { namespace internal { class time_distributed_layer : public layer { public: explicit time_distributed_layer(const std::string& name, const layer_ptr& inner_layer, const std::size_t td_input_len, const std::size_t td_output_len) : layer(name) , inner_layer_(inner_layer) , td_input_len_(td_input_len) , td_output_len_(td_output_len) { assertion(td_output_len_ > 1, "Wrong input dimension"); } protected: tensors apply_impl(const tensors& inputs) const override final { const auto& input = single_tensor_from_tensors(inputs); tensors result_time_step = {}; std::size_t len_series = 0; tensors slices = {}; std::int32_t concat_axis = 0; if (td_input_len_ == 2) { len_series = input.shape().width_; slices = tensor_to_tensors_width_slices(input); } else if (td_input_len_ == 3) { len_series = input.shape().height_; slices = tensor_to_tensors_height_slices(input); } else if (td_input_len_ == 4) { len_series = input.shape().size_dim_4_; slices = tensor_to_tensors_dim4_slices(input); } else if (td_input_len_ == 5) { len_series = input.shape().size_dim_5_; slices = tensor_to_tensors_dim5_slices(input); } else raise_error("invalid input dim for TimeDistributed"); for (auto& slice : slices) { slice.shrink_rank(); } if (td_output_len_ == 2) concat_axis = 2; else if (td_output_len_ == 3) concat_axis = 1; else if (td_output_len_ == 4) concat_axis = 3; else if (td_output_len_ == 5) concat_axis = 4; else raise_error("invalid output dim for TimeDistributed"); for (std::size_t i = 0; i < len_series; ++i) { const auto curr_result = inner_layer_->apply({ slices[i] }); result_time_step.push_back(curr_result.front()); } if (concat_axis == 1) { return { concatenate_tensors_height(result_time_step) }; } if (concat_axis == 2) { return { concatenate_tensors_width(result_time_step) }; } if (concat_axis == 3) { return { concatenate_tensors_dim4(result_time_step) }; } if (concat_axis == 4) { return { concatenate_tensors_dim5(result_time_step) }; } raise_error("Invalid concat_axis in time_distributed_layer."); return {}; } const layer_ptr inner_layer_; const std::size_t td_input_len_; const std::size_t td_output_len_; }; } } frugally-deep-0.17.1/include/fdeep/layers/unit_normalization_layer.hpp000066400000000000000000000014521476372554500262230ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class unit_normalization_layer : public layer { public: explicit unit_normalization_layer(const std::string& name, std::vector axes) : layer(name) , axes_(axes) { } protected: std::vector axes_; tensors apply_impl(const tensors& inputs) const override { const auto& input = single_tensor_from_tensors(inputs); return { l2_normalize(input, axes_) }; } }; } } frugally-deep-0.17.1/include/fdeep/layers/upsampling_1d_layer.hpp000066400000000000000000000032331476372554500250400ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include #include #include #include #include namespace fdeep { namespace internal { class upsampling_1d_layer : public layer { public: explicit upsampling_1d_layer(const std::string& name, const std::size_t size) : layer(name) , size_(size) { } protected: tensors apply_impl(const tensors& inputs) const override final { const auto& input = single_tensor_from_tensors(inputs); assertion(input.shape().rank() == 2, "invalid input shape for Upsampling1D"); return { upsampling_1d_rank_2(input) }; } tensor upsampling_1d_rank_2(const tensor& input) const { assertion(input.shape().rank() == 2, "invalid rank for upsampling"); tensor out_vol(tensor_shape( input.shape().width_ * size_, input.shape().depth_), 0); for (std::size_t x = 0; x < out_vol.shape().width_; ++x) { for (std::size_t z = 0; z < out_vol.shape().depth_; ++z) { const std::size_t x_in = x / size_; out_vol.set_ignore_rank(tensor_pos(x, z), input.get_ignore_rank(tensor_pos(x_in, z))); } } return { out_vol }; } std::size_t size_; }; } } frugally-deep-0.17.1/include/fdeep/layers/upsampling_2d_layer.hpp000066400000000000000000000030361476372554500250420ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include #include #include #include #include namespace fdeep { namespace internal { class upsampling_2d_layer : public layer { public: explicit upsampling_2d_layer(const std::string& name, const shape2& scale_factor, const std::string& interpolation) : layer(name) , scale_factor_(scale_factor) , interpolation_(interpolation) { } protected: tensors apply_impl(const tensors& inputs) const override final { const auto& input = single_tensor_from_tensors(inputs); if (interpolation_ == "nearest") { return { resize2d_nearest( input, shape2(scale_factor_.height_ * input.shape().height_, scale_factor_.width_ * input.shape().width_)) }; } else if (interpolation_ == "bilinear") { return { resize2d_bilinear( input, shape2(scale_factor_.height_ * input.shape().height_, scale_factor_.width_ * input.shape().width_)) }; } else { raise_error("Invalid interpolation method: " + interpolation_); return inputs; } } shape2 scale_factor_; std::string interpolation_; }; } } frugally-deep-0.17.1/include/fdeep/layers/zero_padding_3d_layer.hpp000066400000000000000000000025431476372554500253330ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/layers/layer.hpp" #include namespace fdeep { namespace internal { class zero_padding_3d_layer : public layer { public: explicit zero_padding_3d_layer(const std::string& name, std::size_t front_pad, std::size_t back_pad, std::size_t top_pad, std::size_t bottom_pad, std::size_t left_pad, std::size_t right_pad) : layer(name) , front_pad_(front_pad) , back_pad_(back_pad) , top_pad_(top_pad) , bottom_pad_(bottom_pad) , left_pad_(left_pad) , right_pad_(right_pad) { } protected: tensors apply_impl(const tensors& inputs) const override { const auto& input = single_tensor_from_tensors(inputs); return { pad_tensor(0, front_pad_, back_pad_, top_pad_, bottom_pad_, left_pad_, right_pad_, input) }; } std::size_t front_pad_; std::size_t back_pad_; std::size_t top_pad_; std::size_t bottom_pad_; std::size_t left_pad_; std::size_t right_pad_; }; } } frugally-deep-0.17.1/include/fdeep/model.hpp000066400000000000000000000245441476372554500207120ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/common.hpp" #include "fdeep/import_model.hpp" #include "fdeep/layers/layer.hpp" #include "fdeep/tensor.hpp" #include #include #include namespace fdeep { class model { public: // A single forward pass (no batches). tensors predict(const tensors& inputs) const { return predict_impl(inputs); } // Forward pass multiple data. // When parallelly == true, the work is distributed to up to // as many CPUs as data entries are provided. std::vector predict_multi(const std::vector& inputs_vec, bool parallelly) const { const auto f = [this](const tensors& inputs) -> tensors { return predict(inputs); }; if (parallelly) { return fplus::transform_parallelly(f, inputs_vec); } else { return fplus::transform(f, inputs_vec); } } // Convenience wrapper around predict for models with // single tensor outputs of shape (1, 1, z). // Suitable for classification models with more than one output neuron. // Returns the index of the output neuron with the maximum activation. std::size_t predict_class(const tensors& inputs) const { return predict_class_with_confidence_impl(inputs).first; } // Like predict_class, // but also returns the value of the maximally activated output neuron. std::pair predict_class_with_confidence(const tensors& inputs) const { return predict_class_with_confidence_impl(inputs); } // Convenience wrapper around predict for models with // single tensor outputs of shape (1, 1, 1), // typically used for regression or binary classification. // Returns this one activation value. float_type predict_single_output(const tensors& inputs) const { return predict_single_output_impl(inputs); } const std::vector& get_input_shapes() const { return input_shapes_; } const std::vector& get_output_shapes() const { return output_shapes_; } const std::vector get_dummy_input_shapes() const { return fplus::transform( fplus::bind_1st_of_2(internal::make_tensor_shape_with, tensor_shape(42, 42, 42)), get_input_shapes()); } // Returns zero-filled tensors with the models input shapes. tensors generate_dummy_inputs() const { return fplus::transform([](const tensor_shape& shape) -> tensor { return tensor(shape, 0); }, get_dummy_input_shapes()); } // Measure time of one single forward pass using dummy input data. double test_speed() const { const auto inputs = generate_dummy_inputs(); fplus::stopwatch stopwatch; predict(inputs); return stopwatch.elapsed(); } const std::string& name() const { return model_layer_->name_; } const std::string& hash() const { return hash_; } private: model(const internal::layer_ptr& model_layer, const std::vector& input_shapes, const std::vector& output_shapes, const std::string& hash) : input_shapes_(input_shapes) , output_shapes_(output_shapes) , model_layer_(model_layer) , hash_(hash) { } friend model read_model(std::istream&, bool, const std::function&, float_type, const internal::layer_creators&); tensors predict_impl(const tensors& inputs) const { const auto input_shapes = fplus::transform( fplus_c_mem_fn_t(tensor, shape, tensor_shape), inputs); internal::assertion(input_shapes == get_input_shapes(), std::string("Invalid inputs shape.\n") + "The model takes " + show_tensor_shapes_variable(get_input_shapes()) + " but provided was: " + show_tensor_shapes(input_shapes)); const auto outputs = model_layer_->apply(inputs); const auto output_shapes = fplus::transform( fplus_c_mem_fn_t(tensor, shape, tensor_shape), outputs); internal::assertion(output_shapes == get_output_shapes(), std::string("Invalid outputs shape.\n") + "The model should return " + show_tensor_shapes_variable(get_output_shapes()) + " but actually returned: " + show_tensor_shapes(output_shapes)); return outputs; } std::pair predict_class_with_confidence_impl(const tensors& inputs) const { const tensors outputs = predict(inputs); internal::assertion(outputs.size() == 1, std::string("invalid number of outputs.\n") + "Use model::predict instead of model::predict_class."); const auto output_shape = outputs.front().shape(); internal::assertion(output_shape.without_depth().area() == 1, std::string("invalid output shape.\n") + "Use model::predict instead of model::predict_class."); const auto pos = internal::tensor_max_pos(outputs.front()); return std::make_pair(pos.z_, outputs.front().get(pos)); } float_type predict_single_output_impl(const tensors& inputs) const { const tensors outputs = predict(inputs); internal::assertion(outputs.size() == 1, "invalid number of outputs"); const auto output_shape = outputs.front().shape(); internal::assertion(output_shape.volume() == 1, "invalid output shape"); return to_singleton_value(outputs.front()); } std::vector input_shapes_; std::vector output_shapes_; internal::layer_ptr model_layer_; std::string hash_; }; // Write an std::string to std::cout. inline void cout_logger(const std::string& str) { std::cout << str << std::flush; } // Take an std::string and do nothing. // Useful for silencing the logging when loading a model. inline void dev_null_logger(const std::string&) { } // Load and construct an fdeep::model from an istream // providing the exported json content. // Throws an exception if a problem occurs. inline model read_model(std::istream& model_file_stream, bool verify = true, const std::function& logger = cout_logger, float_type verify_epsilon = static_cast(0.0001), const internal::layer_creators& custom_layer_creators = internal::layer_creators()) { const auto log = [&logger](const std::string& msg) { if (logger) { logger(msg + "\n"); } }; fplus::stopwatch stopwatch; const auto log_sol = [&stopwatch, &logger](const std::string& msg) { stopwatch.reset(); if (logger) { logger(msg + " ... "); } }; const auto log_duration = [&stopwatch, &logger]() { if (logger) { logger("done. elapsed time: " + fplus::show_float(0, 6, stopwatch.elapsed()) + " s\n"); } stopwatch.reset(); }; log_sol("Loading json"); nlohmann::json json_data; model_file_stream >> json_data; log_duration(); const std::string image_data_format = json_data["image_data_format"]; internal::assertion(image_data_format == "channels_last", "only channels_last data format supported"); const std::function get_param = [&json_data](const std::string& layer_name, const std::string& param_name) -> nlohmann::json { return json_data["trainable_params"][layer_name][param_name]; }; log_sol("Building model"); model full_model(internal::create_model_layer( get_param, json_data["architecture"], json_data["architecture"]["config"]["name"], custom_layer_creators, ""), internal::create_tensor_shapes_variable(json_data["input_shapes"]), internal::create_tensor_shapes_variable(json_data["output_shapes"]), internal::json_object_get( json_data, "hash", "")); log_duration(); if (verify) { if (!json_data["tests"].is_array()) { log("No test cases available"); } else { const auto tests = internal::load_test_cases(json_data["tests"]); json_data = {}; // free RAM for (std::size_t i = 0; i < tests.size(); ++i) { log_sol("Running test " + fplus::show(i + 1) + " of " + fplus::show(tests.size())); const auto output = full_model.predict_impl(tests[i].input_); log_duration(); check_test_outputs(verify_epsilon, output, tests[i].output_); } } } return full_model; } inline model read_model_from_string(const std::string& content, bool verify = true, const std::function& logger = cout_logger, float_type verify_epsilon = static_cast(0.0001), const internal::layer_creators& custom_layer_creators = internal::layer_creators()) { std::istringstream content_stream(content); return read_model(content_stream, verify, logger, verify_epsilon, custom_layer_creators); } // Load and construct an fdeep::model from file. // Throws an exception if a problem occurs. inline model load_model(const std::string& file_path, bool verify = true, const std::function& logger = cout_logger, float_type verify_epsilon = static_cast(0.0001), const internal::layer_creators& custom_layer_creators = internal::layer_creators()) { fplus::stopwatch stopwatch; std::ifstream in_stream(file_path); internal::assertion(in_stream.good(), "Can not open " + file_path); const auto model = read_model(in_stream, verify, logger, verify_epsilon, custom_layer_creators); if (logger) { const std::string additional_action = verify ? ", testing" : ""; logger("Loading, constructing" + additional_action + " of " + file_path + " took " + fplus::show_float(0, 6, stopwatch.elapsed()) + " s overall.\n"); } return model; } } frugally-deep-0.17.1/include/fdeep/node.hpp000066400000000000000000000043411476372554500205300ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include #include #include #include #include #include #include namespace fdeep { namespace internal { struct node_connection { node_connection(const std::string& layer_id, std::size_t node_idx, std::size_t tensor_idx) : layer_id_(layer_id) , node_idx_(node_idx) , tensor_idx_(tensor_idx) { } std::pair without_tensor_idx() const { return std::make_pair(layer_id_, node_idx_); } std::string layer_id_; std::size_t node_idx_; std::size_t tensor_idx_; }; using node_connections = std::vector; using output_dict = std::map, tensors>; class layer; typedef std::shared_ptr layer_ptr; typedef std::vector layer_ptrs; layer_ptr get_layer(const layer_ptrs& layers, const std::string& layer_id); tensor get_layer_output(const layer_ptrs& layers, output_dict& output_cache, const layer_ptr& layer, std::size_t node_idx, std::size_t tensor_idx); tensors apply_layer(const layer& layer, const tensors& inputs); class node { public: explicit node(const node_connections& inbound_nodes) : inbound_connections_(inbound_nodes) { } tensors get_output(const layer_ptrs& layers, output_dict& output_cache, const layer& layer) const { const auto get_input = [&output_cache, &layers](const node_connection& conn) -> tensor { return get_layer_output(layers, output_cache, get_layer(layers, conn.layer_id_), conn.node_idx_, conn.tensor_idx_); }; return apply_layer(layer, fplus::transform(get_input, inbound_connections_)); } private: node_connections inbound_connections_; }; typedef std::vector nodes; } } frugally-deep-0.17.1/include/fdeep/recurrent_ops.hpp000066400000000000000000000037261476372554500225030ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include #include namespace fdeep { namespace internal { using Eigen::Dynamic; template using RowVector = Eigen::Matrix; inline float_type tanh_activation(float_type x) { return std::tanh(x); } inline float_type sigmoid_activation(float_type x) { return 1 / (1 + std::exp(-x)); } inline float_type swish_activation(float_type x) { return x / (1 + std::exp(-x)); } inline float_type hard_sigmoid_activation(float_type x) { // https://github.com/keras-team/keras/blob/f7bc67e6c105c116a2ba7f5412137acf78174b1a/keras/ops/nn.py#L316C6-L316C74 if (x < -3) { return 0; } if (x > 3) { return 1; } return (x / static_cast(6)) + static_cast(0.5); } inline float_type selu_activation(float_type x) { const float_type alpha = static_cast(1.6732632423543772848170429916717); const float_type scale = static_cast(1.0507009873554804934193349852946); return scale * (x >= 0 ? x : alpha * (std::exp(x) - 1)); } inline float_type exponential_activation(float_type x) { return static_cast(std::exp(x)); } inline float_type gelu_activation(float_type x) { return static_cast(0.5) * x * (static_cast(1) + static_cast(std::erf(x / std::sqrt(static_cast(2))))); } inline float_type softsign_activation(float_type x) { return x / (std::abs(x) + static_cast(1)); } inline float_type elu_activation(float_type x) { return x >= 0 ? x : std::exp(x) - 1; } } } frugally-deep-0.17.1/include/fdeep/shape2.hpp000066400000000000000000000015151476372554500207650ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/common.hpp" #include #include #include namespace fdeep { namespace internal { class shape2 { public: explicit shape2( std::size_t height, std::size_t width) : height_(height) , width_(width) { } std::size_t area() const { return height_ * width_; } std::size_t height_; std::size_t width_; }; inline bool operator==(const shape2& lhs, const shape2& rhs) { return lhs.height_ == rhs.height_ && lhs.width_ == rhs.width_; } } } frugally-deep-0.17.1/include/fdeep/shape3.hpp000066400000000000000000000017561476372554500207750ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/common.hpp" #include #include #include namespace fdeep { namespace internal { class shape3 { public: explicit shape3( std::size_t size_dim_4, std::size_t height, std::size_t width) : size_dim_4_(size_dim_4) , height_(height) , width_(width) { } std::size_t volume() const { return size_dim_4_ * height_ * width_; } std::size_t size_dim_4_; std::size_t height_; std::size_t width_; }; inline bool operator==(const shape3& lhs, const shape3& rhs) { return lhs.size_dim_4_ == rhs.size_dim_4_ && lhs.height_ == rhs.height_ && lhs.width_ == rhs.width_; } } } frugally-deep-0.17.1/include/fdeep/tensor.hpp000066400000000000000000001572461476372554500211320ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/common.hpp" #include "fdeep/tensor_pos.hpp" #include "fdeep/tensor_shape.hpp" #include #include #include #include #include #include #include #include #include namespace fdeep { namespace internal { class tensor { public: tensor(const tensor_shape& shape, const shared_float_vec& values) : shape_(shape) , values_(values) { assertion(shape.volume() == values->size(), std::string("invalid number of values. shape: ") + show_tensor_shape(shape) + "; value count: " + std::to_string(values->size())); } tensor(const tensor_shape& shape, float_vec&& values) : tensor(shape, fplus::make_shared_ref(std::move(values))) { } tensor(const tensor_shape& shape, const float_vec_unaligned& values) : tensor(shape, fplus::make_shared_ref(fplus::convert_container(values))) { } tensor(const tensor_shape& shape, float_type value) : tensor(shape, fplus::make_shared_ref(shape.volume(), value)) { } float_type get(const tensor_pos& pos) const { return (*values_)[idx(pos)]; } float_type get_ignore_rank(const tensor_pos& pos) const { return (*values_)[idx_ignore_rank(pos)]; } const float_type& get_ref_ignore_rank(const tensor_pos& pos) const { return (*values_)[idx_ignore_rank(pos)]; } float_type& get_ref_ignore_rank(const tensor_pos& pos) { return (*values_)[idx_ignore_rank(pos)]; } float_type get_padded(float_type pad_value, int d5, int d4, int y, int x, int z) const { if (d5 < 0 || d5 >= static_cast(shape().size_dim_5_) || d4 < 0 || d4 >= static_cast(shape().size_dim_4_) || y < 0 || y >= static_cast(shape().height_) || x < 0 || x >= static_cast(shape().width_) || z < 0 || z >= static_cast(shape().depth_)) { return pad_value; } return get_ignore_rank(tensor_pos( static_cast(d5), static_cast(d4), static_cast(y), static_cast(x), static_cast(z))); } void set(const tensor_pos& pos, float_type value) { (*values_)[idx(pos)] = value; } void set_ignore_rank(const tensor_pos& pos, float_type value) { (*values_)[idx_ignore_rank(pos)] = value; } // Deprecated! Will likely be removed from the API soon. // Please use // get(const tensor_pos&) const // or // get_ignore_rank(const tensor_pos&) const // instead. float_type get(std::size_t pos_dim_5, std::size_t pos_dim_4, std::size_t y, std::size_t x, std::size_t z) const { return get_ignore_rank(tensor_pos(pos_dim_5, pos_dim_4, y, x, z)); } // Deprecated! Will likely be removed from the API soon. // Please use // set(const tensor_pos, float_type) // or // set_ignore_rank(const tensor_pos&, float_type) // instead. void set(std::size_t pos_dim_5, std::size_t pos_dim_4, std::size_t y, std::size_t x, std::size_t z, float_type value) { set_ignore_rank(tensor_pos(pos_dim_5, pos_dim_4, y, x, z), value); } const tensor_shape& shape() const { return shape_; } void shrink_rank() { shape_.shrink_rank(); } void shrink_rank_with_min(std::size_t min_rank_to_keep) { shape_.shrink_rank_with_min(min_rank_to_keep); } void maximize_rank() { shape_.maximize_rank(); } std::size_t rank() const { return shape_.rank(); } std::size_t depth() const { return shape().depth_; } std::size_t height() const { return shape().height_; } std::size_t width() const { return shape().width_; } const shared_float_vec& as_vector() const { return values_; } shared_float_vec& as_vector() { return values_; } float_vec_unaligned to_vector() const { return float_vec_unaligned(fplus::convert_container(*values_)); } private: std::size_t idx_ignore_rank(const tensor_pos& pos) const { return pos.pos_dim_5_ * shape().size_dim_4_ * shape().height_ * shape().width_ * shape().depth_ + pos.pos_dim_4_ * shape().height_ * shape().width_ * shape().depth_ + pos.y_ * shape().width_ * shape().depth_ + pos.x_ * shape().depth_ + pos.z_; }; std::size_t idx(const tensor_pos& pos) const { assertion(pos.rank() == shape().rank(), "Invalid position rank for tensor"); return idx_ignore_rank(pos); }; tensor_shape shape_; shared_float_vec values_; }; typedef std::vector tensors; typedef std::vector tensors_vec; inline tensor single_tensor_from_tensors(const tensors& ts) { assertion(ts.size() == 1, "invalid number of tensors"); return ts.front(); } inline bool is_singleton_value(const tensor& t) { return t.shape().volume() == 1; } inline float_type to_singleton_value(const tensor& t) { assertion(is_singleton_value(t), "Tensor must contain exactly one value."); return t.get(tensor_pos(static_cast(0))); } template tensor transform_tensor(F f, const tensor& m) { return tensor(m.shape(), fplus::transform_convert(f, *m.as_vector())); } inline std::vector tensor_to_depth_slices(const tensor& m) { std::vector ms; ms.reserve(m.shape().depth_); for (std::size_t i = 0; i < m.shape().depth_; ++i) { ms.push_back(tensor(change_tensor_shape_dimension_by_index( m.shape(), 4, 1), 0)); } loop_over_all_dims(m.shape(), [&m, &ms](std::size_t dim5, std::size_t dim4, std::size_t y, std::size_t x, std::size_t z) { // .set and .get would work here too // but using _ignore_rank here for // improved performance. ms[z].set_ignore_rank(tensor_pos(dim5, dim4, y, x, 0), m.get_ignore_rank(tensor_pos(dim5, dim4, y, x, z))); }); return ms; } inline tensors tensor_to_tensors_width_slices(const tensor& m) { tensors ms; ms.reserve(m.shape().width_); for (std::size_t i = 0; i < m.shape().width_; ++i) { ms.push_back(tensor(change_tensor_shape_dimension_by_index( m.shape(), 3, 1), 0)); } loop_over_all_dims(m.shape(), [&m, &ms](std::size_t dim5, std::size_t dim4, std::size_t y, std::size_t x, std::size_t z) { ms[x].set_ignore_rank(tensor_pos(dim5, dim4, y, 0, z), m.get_ignore_rank(tensor_pos(dim5, dim4, y, x, z))); }); return ms; } inline tensors tensor_to_tensors_height_slices(const tensor& m) { tensors ms; ms.reserve(m.shape().height_); for (std::size_t i = 0; i < m.shape().height_; ++i) { ms.push_back(tensor(change_tensor_shape_dimension_by_index( m.shape(), 2, 1), 0)); } loop_over_all_dims(m.shape(), [&m, &ms](std::size_t dim5, std::size_t dim4, std::size_t y, std::size_t x, std::size_t z) { ms[y].set_ignore_rank(tensor_pos(dim5, dim4, 0, x, z), m.get_ignore_rank(tensor_pos(dim5, dim4, y, x, z))); }); return ms; } inline tensors tensor_to_tensors_dim4_slices(const tensor& m) { tensors ms; ms.reserve(m.shape().size_dim_4_); for (std::size_t i = 0; i < m.shape().size_dim_4_; ++i) { ms.push_back(tensor(change_tensor_shape_dimension_by_index( m.shape(), 1, 1), 0)); } loop_over_all_dims(m.shape(), [&m, &ms](std::size_t dim5, std::size_t dim4, std::size_t y, std::size_t x, std::size_t z) { ms[dim4].set_ignore_rank(tensor_pos(dim5, 0, y, x, z), m.get_ignore_rank(tensor_pos(dim5, dim4, y, x, z))); }); return ms; } inline tensors tensor_to_tensors_dim5_slices(const tensor& m) { tensors ms; ms.reserve(m.shape().size_dim_5_); for (std::size_t i = 0; i < m.shape().size_dim_5_; ++i) { ms.push_back(tensor(change_tensor_shape_dimension_by_index( m.shape(), 0, 1), 0)); } loop_over_all_dims(m.shape(), [&m, &ms](std::size_t dim5, std::size_t dim4, std::size_t y, std::size_t x, std::size_t z) { ms[dim5].set_ignore_rank(tensor_pos(dim4, y, x, z), m.get_ignore_rank(tensor_pos(dim5, dim4, y, x, z))); }); return ms; } inline std::pair tensor_min_max_pos( const tensor& vol) { tensor_pos result_min(0, 0, 0, 0, 0); tensor_pos result_max(0, 0, 0, 0, 0); float_type value_max = std::numeric_limits::lowest(); float_type value_min = std::numeric_limits::max(); loop_over_all_dims(vol.shape(), [&](std::size_t dim5, std::size_t dim4, std::size_t y, std::size_t x, std::size_t z) { auto current_value = vol.get_ignore_rank(tensor_pos(y, x, z)); if (current_value > value_max) { result_max = tensor_pos(dim5, dim4, y, x, z); value_max = current_value; } if (current_value < value_min) { result_min = tensor_pos(dim5, dim4, y, x, z); value_min = current_value; } }); return std::make_pair( tensor_pos_with_changed_rank(result_min, vol.shape().rank()), tensor_pos_with_changed_rank(result_max, vol.shape().rank())); } inline std::vector> get_tensors_shape_sizes(const tensors& ts) { return { fplus::transform([](const auto& t) { return t.shape().size_dim_5_; }, ts), fplus::transform([](const auto& t) { return t.shape().size_dim_4_; }, ts), fplus::transform([](const auto& t) { return t.shape().height_; }, ts), fplus::transform([](const auto& t) { return t.shape().width_; }, ts), fplus::transform([](const auto& t) { return t.shape().depth_; }, ts) }; } inline tensor_pos tensor_max_pos(const tensor& vol) { return tensor_min_max_pos(vol).second; } inline tensor concatenate_tensors_depth(const tensors& in) { const auto shape_sizes = get_tensors_shape_sizes(in); assertion( fplus::all_the_same(shape_sizes[0]) && fplus::all_the_same(shape_sizes[1]) && fplus::all_the_same(shape_sizes[2]) && fplus::all_the_same(shape_sizes[3]), "Tensor shapes differ on wrong dimension."); tensor result(change_tensor_shape_dimension_by_index( in.front().shape(), 4, fplus::sum(shape_sizes[4])), 0); std::size_t out_dim1 = 0; for (const auto& t : in) { for (std::size_t z = 0; z < t.shape().depth_; ++z, ++out_dim1) { for (std::size_t dim5 = 0; dim5 < t.shape().size_dim_5_; ++dim5) { for (std::size_t dim4 = 0; dim4 < t.shape().size_dim_4_; ++dim4) { for (std::size_t y = 0; y < t.shape().height_; ++y) { for (std::size_t x = 0; x < t.shape().width_; ++x) { result.set_ignore_rank(tensor_pos(dim5, dim4, y, x, out_dim1), t.get_ignore_rank(tensor_pos(dim5, dim4, y, x, z))); } } } } } } return result; } inline tensor concatenate_tensors_width(const tensors& in) { const auto shape_sizes = get_tensors_shape_sizes(in); assertion( fplus::all_the_same(shape_sizes[0]) && fplus::all_the_same(shape_sizes[1]) && fplus::all_the_same(shape_sizes[2]) && fplus::all_the_same(shape_sizes[4]), "Tensor shapes differ on wrong dimension."); tensor result(change_tensor_shape_dimension_by_index( in.front().shape(), 3, fplus::sum(shape_sizes[3])), 0); std::size_t out_dim2 = 0; for (const auto& t : in) { for (std::size_t x = 0; x < t.shape().width_; ++x, ++out_dim2) { for (std::size_t dim5 = 0; dim5 < t.shape().size_dim_5_; ++dim5) { for (std::size_t dim4 = 0; dim4 < t.shape().size_dim_4_; ++dim4) { for (std::size_t y = 0; y < t.shape().height_; ++y) { for (std::size_t z = 0; z < t.shape().depth_; ++z) { result.set_ignore_rank(tensor_pos(dim5, dim4, y, out_dim2, z), t.get_ignore_rank(tensor_pos(dim5, dim4, y, x, z))); } } } } } } return result; } inline tensor concatenate_tensors_height(const tensors& in) { const auto shape_sizes = get_tensors_shape_sizes(in); assertion( fplus::all_the_same(shape_sizes[0]) && fplus::all_the_same(shape_sizes[1]) && fplus::all_the_same(shape_sizes[3]) && fplus::all_the_same(shape_sizes[4]), "Tensor shapes differ on wrong dimension."); tensor result(change_tensor_shape_dimension_by_index( in.front().shape(), 2, fplus::sum(shape_sizes[2])), 0); std::size_t out_dim3 = 0; for (const auto& t : in) { for (std::size_t y = 0; y < t.shape().height_; ++y, ++out_dim3) { for (std::size_t dim5 = 0; dim5 < t.shape().size_dim_5_; ++dim5) { for (std::size_t dim4 = 0; dim4 < t.shape().size_dim_4_; ++dim4) { for (std::size_t x = 0; x < t.shape().width_; ++x) { for (std::size_t z = 0; z < t.shape().depth_; ++z) { result.set_ignore_rank(tensor_pos(dim5, dim4, out_dim3, x, z), t.get_ignore_rank(tensor_pos(dim5, dim4, y, x, z))); } } } } } } return result; } inline tensor concatenate_tensors_dim4(const tensors& in) { const auto shape_sizes = get_tensors_shape_sizes(in); assertion( fplus::all_the_same(shape_sizes[0]) && fplus::all_the_same(shape_sizes[2]) && fplus::all_the_same(shape_sizes[3]) && fplus::all_the_same(shape_sizes[4]), "Tensor shapes differ on wrong dimension."); tensor result(change_tensor_shape_dimension_by_index( in.front().shape(), 1, fplus::sum(shape_sizes[1])), 0); std::size_t out_dim4 = 0; for (const auto& t : in) { for (std::size_t dim4 = 0; dim4 < t.shape().size_dim_4_; ++dim4, ++out_dim4) { for (std::size_t dim5 = 0; dim5 < t.shape().size_dim_5_; ++dim5) { for (std::size_t y = 0; y < t.shape().height_; ++y) { for (std::size_t x = 0; x < t.shape().width_; ++x) { for (std::size_t z = 0; z < t.shape().depth_; ++z) { result.set_ignore_rank(tensor_pos(dim5, out_dim4, y, x, z), t.get_ignore_rank(tensor_pos(dim5, dim4, y, x, z))); } } } } } } return result; } inline tensor concatenate_tensors_dim5(const tensors& in) { const auto shape_sizes = get_tensors_shape_sizes(in); assertion( fplus::all_the_same(shape_sizes[1]) && fplus::all_the_same(shape_sizes[2]) && fplus::all_the_same(shape_sizes[3]) && fplus::all_the_same(shape_sizes[4]), "Tensor shapes differ on wrong dimension."); tensor result(change_tensor_shape_dimension_by_index( in.front().shape(), 0, fplus::sum(shape_sizes[0])), 0); std::size_t out_dim5 = 0; for (const auto& t : in) { for (std::size_t dim5 = 0; dim5 < t.shape().size_dim_5_; ++dim5, ++out_dim5) { for (std::size_t dim4 = 0; dim4 < t.shape().size_dim_4_; ++dim4) { for (std::size_t y = 0; y < t.shape().height_; ++y) { for (std::size_t x = 0; x < t.shape().width_; ++x) { for (std::size_t z = 0; z < t.shape().depth_; ++z) { result.set_ignore_rank(tensor_pos(out_dim5, dim4, y, x, z), t.get_ignore_rank(tensor_pos(dim5, dim4, y, x, z))); } } } } } } return result; } inline tensor concatenate_tensors(const tensors& ts, std::int32_t axis) { const auto rank = ts.front().shape().rank(); if (axis < 0) { axis = axis + static_cast(rank) + 1; } axis = std::min(5, axis - static_cast(rank) + 5); if (axis == 5) { return concatenate_tensors_depth(ts); } if (axis == 4) { return concatenate_tensors_width(ts); } if (axis == 3) { return concatenate_tensors_height(ts); } if (axis == 2) { return concatenate_tensors_dim4(ts); } if (axis == 1) { return concatenate_tensors_dim5(ts); } raise_error("Invalid axis (" + std::to_string(axis) + ") for tensor concatenation."); return tensor(tensor_shape(static_cast(0)), 0); } inline tensor flatten_tensor(const tensor& vol) { return tensor(tensor_shape(vol.shape().volume()), vol.as_vector()); } inline tensor pad_tensor(float_type val, std::size_t front_pad, std::size_t back_pad, std::size_t top_pad, std::size_t bottom_pad, std::size_t left_pad, std::size_t right_pad, const tensor& in) { if (front_pad == 0 && back_pad == 0 && top_pad == 0 && bottom_pad == 0 && left_pad == 0 && right_pad == 0) { return in; } tensor result(tensor_shape_with_changed_rank(tensor_shape( in.shape().size_dim_4_ + front_pad + back_pad, in.shape().height_ + top_pad + bottom_pad, in.shape().width_ + left_pad + right_pad, in.shape().depth_), in.shape().rank()), val); for (std::size_t d4 = 0; d4 < in.shape().size_dim_4_; ++d4) { for (std::size_t y = 0; y < in.shape().height_; ++y) { for (std::size_t x = 0; x < in.shape().width_; ++x) { auto result_ptr = &result.get_ref_ignore_rank(tensor_pos(0, d4 + front_pad, y + top_pad, x + left_pad, 0)); auto input_ptr = &in.get_ref_ignore_rank(tensor_pos(0, d4, y, x, 0)); auto input_ptr_end = input_ptr + in.shape().depth_; std::copy(input_ptr, input_ptr_end, result_ptr); } } } return result; } inline void check_permute_tensor_dims(const std::vector& dims_raw) { assertion( fplus::minimum(dims_raw) >= 1 && fplus::maximum(dims_raw) <= 5 && fplus::size_of_cont(fplus::nub(dims_raw)) == fplus::size_of_cont(dims_raw), "Invalid dims for permute_tensor."); } inline tensor permute_tensor(const tensor& in, const std::vector& dims_raw) { check_permute_tensor_dims(dims_raw); const auto dims = fplus::transform(fplus::subtract(1), dims_raw); const auto permute_idxs = [&dims](const std::vector& idxs) { return fplus::elems_at_idxs(dims, idxs); }; const auto out_shape = create_tensor_shape_from_dims( permute_idxs(in.shape().dimensions())); tensor out(out_shape, 0); loop_over_all_dims(in.shape(), [&](std::size_t dim5, std::size_t dim4, std::size_t y, std::size_t x, std::size_t z) { const auto in_pos = tensor_pos_with_changed_rank( tensor_pos(dim5, dim4, y, x, z), dims.size()); const auto out_pos = create_tensor_pos_from_dims( permute_idxs(in_pos.dimensions())); out.set_ignore_rank(out_pos, in.get_ignore_rank(in_pos)); }); return out; } inline tensor reverse_depth_dimension(const tensor& in) { tensor out = tensor(in.shape(), static_cast(0)); loop_over_all_dims(in.shape(), [&in, &out](std::size_t dim5, std::size_t dim4, std::size_t y, std::size_t x, std::size_t z) { out.set_ignore_rank(tensor_pos(dim5, dim4, y, x, in.shape().depth_ - z - 1), in.get_ignore_rank(tensor_pos(dim5, dim4, y, x, z))); }); return out; } inline tensor reverse_width_dimension(const tensor& in) { tensor out = tensor(in.shape(), static_cast(0)); loop_over_all_dims(in.shape(), [&in, &out](std::size_t dim5, std::size_t dim4, std::size_t y, std::size_t x, std::size_t z) { out.set_ignore_rank(tensor_pos(dim5, dim4, y, in.shape().width_ - x - 1, z), in.get_ignore_rank(tensor_pos(dim5, dim4, y, x, z))); }); return out; } inline tensor reverse_height_dimension(const tensor& in) { tensor out = tensor(in.shape(), static_cast(0)); loop_over_all_dims(in.shape(), [&in, &out](std::size_t dim5, std::size_t dim4, std::size_t y, std::size_t x, std::size_t z) { out.set_ignore_rank(tensor_pos(dim5, dim4, in.shape().height_ - y - 1, x, z), in.get_ignore_rank(tensor_pos(dim5, dim4, y, x, z))); }); return out; } inline tensor transpose(const tensor& in) { return permute_tensor(in, std::vector({ 2, 1 })); } inline tensor crop_tensor( std::size_t front_crop, std::size_t back_crop, std::size_t top_crop, std::size_t bottom_crop, std::size_t left_crop, std::size_t right_crop, const tensor& in) { tensor result(tensor_shape_with_changed_rank(tensor_shape( in.shape().size_dim_4_ - (front_crop + back_crop), in.shape().height_ - (top_crop + bottom_crop), in.shape().width_ - (left_crop + right_crop), in.shape().depth_), in.shape().rank()), 0); for (std::size_t d4 = 0; d4 < result.shape().size_dim_4_; ++d4) { for (std::size_t y = 0; y < result.shape().height_; ++y) { for (std::size_t x = 0; x < result.shape().width_; ++x) { for (std::size_t z = 0; z < result.shape().depth_; ++z) { result.set_ignore_rank(tensor_pos(d4, y, x, z), in.get_ignore_rank(tensor_pos(d4 + front_crop, y + top_crop, x + left_crop, z))); } } } } return result; } inline tensor dilate_tensor(const shape2& dilation_rate, const tensor& in, bool trailing_zeros) { assertion(in.shape().rank() <= 3, "Invalid rank for dilation"); if (dilation_rate == shape2(1, 1)) { return in; } const std::size_t expansion_x = trailing_zeros ? (dilation_rate.width_ - 1) : 0; const std::size_t expansion_y = trailing_zeros ? (dilation_rate.height_ - 1) : 0; auto dilated_shape = dilate_tensor_shape(dilation_rate, in.shape()); dilated_shape.width_ += expansion_x; dilated_shape.height_ += expansion_y; const std::size_t offset_x = expansion_x - expansion_x / 2; const std::size_t offset_y = expansion_y - expansion_y / 2; tensor result(dilated_shape, 0); for (std::size_t y = 0; y < in.shape().height_; ++y) { for (std::size_t x = 0; x < in.shape().width_; ++x) { for (std::size_t z = 0; z < in.shape().depth_; ++z) { result.set_ignore_rank(tensor_pos( y * dilation_rate.height_ + offset_y, x * dilation_rate.width_ + offset_x, z), in.get_ignore_rank(tensor_pos(y, x, z))); } } } return result; } template tensor elem_wise_combine_tensors(F f, const tensor& a, const tensor& b) { assertion( (std::min(a.shape().size_dim_5_, b.shape().size_dim_5_) == 1 || a.shape().size_dim_5_ == b.shape().size_dim_5_) && (std::min(a.shape().size_dim_4_, b.shape().size_dim_4_) == 1 || a.shape().size_dim_4_ == b.shape().size_dim_4_) && (std::min(a.shape().height_, b.shape().height_) == 1 || a.shape().height_ == b.shape().height_) && (std::min(a.shape().width_, b.shape().width_) == 1 || a.shape().width_ == b.shape().width_) && (std::min(a.shape().depth_, b.shape().depth_) == 1 || a.shape().depth_ == b.shape().depth_), "Invalid shapes for combining tensors."); const tensor_shape out_shape = tensor_shape( std::max(a.shape().size_dim_5_, b.shape().size_dim_5_), std::max(a.shape().size_dim_4_, b.shape().size_dim_4_), std::max(a.shape().height_, b.shape().height_), std::max(a.shape().width_, b.shape().width_), std::max(a.shape().depth_, b.shape().depth_)); tensor out_tensor = tensor(out_shape, static_cast(0)); loop_over_all_dims(out_tensor.shape(), [&](std::size_t dim5, std::size_t dim4, std::size_t y, std::size_t x, std::size_t z) { out_tensor.set_ignore_rank(tensor_pos(dim5, dim4, y, x, z), f(a.get_ignore_rank(tensor_pos(dim5 % a.shape().size_dim_5_, dim4 % a.shape().size_dim_4_, y % a.shape().height_, x % a.shape().width_, z % a.shape().depth_)), b.get_ignore_rank(tensor_pos(dim5 % b.shape().size_dim_5_, dim4 % b.shape().size_dim_4_, y % b.shape().height_, x % b.shape().width_, z % b.shape().depth_)))); }); out_tensor.shrink_rank_with_min(std::max(a.rank(), b.rank())); return out_tensor; } inline tensor add_tensors(const tensor& a, const tensor& b) { return elem_wise_combine_tensors(std::plus(), a, b); } inline tensor subtract_tensors(const tensor& a, const tensor& b) { return elem_wise_combine_tensors(std::minus(), a, b); } inline tensor mult_tensors(const tensor& a, const tensor& b) { return elem_wise_combine_tensors(std::multiplies(), a, b); } inline tensor sum_tensors(const tensors& ts) { return fplus::fold_left_1(add_tensors, ts); } inline tensor sum_depth(const tensor& t) { return sum_tensors(tensor_to_depth_slices(t)); } inline tensor multiply_tensors(const tensors& ts_orig) { return fplus::fold_left_1(mult_tensors, ts_orig); } inline std::size_t rank_aligned_axis_to_absolute_axis(std::size_t rank, int axis) { assertion(axis >= -1, "invalid axis"); assertion(axis <= static_cast(rank), "invalid axis"); return axis == -1 ? 5 : 5 + static_cast(axis) - rank; } inline tensor broadcast(const tensor& t, const tensor_shape& shape) { assertion( (t.shape().size_dim_5_ == 1 || t.shape().size_dim_5_ == shape.size_dim_5_) && (t.shape().size_dim_4_ == 1 || t.shape().size_dim_4_ == shape.size_dim_4_) && (t.shape().height_ == 1 || t.shape().height_ == shape.height_) && (t.shape().width_ == 1 || t.shape().width_ == shape.width_) && (t.shape().depth_ == 1 || t.shape().depth_ == shape.depth_), "Invalid shapes for combining tensors."); tensor out_tensor = tensor(shape, static_cast(0)); loop_over_all_dims(out_tensor.shape(), [&](std::size_t dim5, std::size_t dim4, std::size_t y, std::size_t x, std::size_t z) { out_tensor.set_ignore_rank(tensor_pos(dim5, dim4, y, x, z), t.get_ignore_rank(tensor_pos( dim5 % t.shape().size_dim_5_, dim4 % t.shape().size_dim_4_, y % t.shape().height_, x % t.shape().width_, z % t.shape().depth_))); }); return out_tensor; } inline tensors slice_along_axis(const tensor& t, int axis) { const std::size_t adjusted_axis = rank_aligned_axis_to_absolute_axis(t.shape().rank(), axis); if (adjusted_axis == 5) { return tensor_to_depth_slices(t); } else if (adjusted_axis == 4) { return tensor_to_tensors_width_slices(t); } else if (adjusted_axis == 3) { return tensor_to_tensors_height_slices(t); } else if (adjusted_axis == 2) { return tensor_to_tensors_dim4_slices(t); } else if (adjusted_axis == 1) { return tensor_to_tensors_dim5_slices(t); } raise_error("Invalid axis for slicing."); // Just to make the compiler happy. // In reality, this is never called. return tensors(); } template tensor reduce_single_axis(F f, const tensor& t, int axis) { return fplus::reduce_1(f, slice_along_axis(t, axis)); } template tensor reduce(F f, const tensor& t, const std::vector& axes) { tensor result = t; for (const auto axis : axes) { result = reduce_single_axis(f, result, axis); } return result; } inline std::pair moments(const tensor& t, const std::vector& axes) { const tensor summed = reduce(add_tensors, t, axes); const auto factor = static_cast(t.shape().volume()) / static_cast(summed.shape().volume()); const auto mean_t = transform_tensor(fplus::divide_by(factor), summed); const auto diffs = elem_wise_combine_tensors(fplus::abs_diff, t, mean_t); const auto variance_t = transform_tensor( fplus::divide_by(factor), reduce( add_tensors, elem_wise_combine_tensors(std::multiplies(), diffs, diffs), axes)); return std::make_pair(mean_t, variance_t); } inline tensor batch_normalization( const tensor& x, const tensor& mean, const tensor& variance, const tensor& offset, const tensor& scale, float_type variance_epsilon) { // https://github.com/tensorflow/tensorflow/blob/v2.14.0/tensorflow/python/ops/nn_impl.py#L1592-L1599 const auto inv = mult_tensors( transform_tensor( [](float_type v) { return static_cast(1) / std::sqrt(v); }, transform_tensor( fplus::add_to(variance_epsilon), variance)), scale); return add_tensors( mult_tensors(x, inv), subtract_tensors( offset, mult_tensors(mean, inv))); } inline tensor reshape(const tensor& t, const tensor_shape& target_shape) { assertion(t.shape().volume() == target_shape.volume(), "Invalid target shape"); return tensor(target_shape, t.as_vector()); } inline tensor l2_normalize(const tensor& t, const std::vector& axes) { const float_type epsilon = std::numeric_limits::epsilon(); // https://github.com/tensorflow/tensorflow/blob/v2.14.0/tensorflow/python/ops/nn_impl.py#L705-L707 const auto square_sum = reduce(add_tensors, transform_tensor(fplus::square, t), axes); const auto x_inv_norm = transform_tensor( [](float_type v) { return static_cast(1) / std::sqrt(v); }, transform_tensor( [epsilon](float_type x) { return std::max(x, epsilon); }, square_sum)); return mult_tensors(t, x_inv_norm); } inline tensor dot_product_tensors( const tensor& a, const tensor& b, const std::vector& axes_raw, bool normalize) { /* Move axes[0] to start of a. Move axes[1] to end of b. Reshape a into (axes[0], remaining_axes). Reshape b into (remaining_axes, axes[1]). Matrix-multiply b with a. Reshape result into: non-contracted axes of a + non-contracted axes of b. See: - https://github.com/keras-team/keras/blob/v2.11.0/keras/layers/merging/dot.py#L29-L206 - https://github.com/numpy/numpy/blob/9896b46b36c4875badc15787c403840d997cf45a/numpy/core/numeric.py#L938 - https://stackoverflow.com/questions/58963955/what-does-axes-parameter-do-in-dot-layer-in-keras - https://stackoverflow.com/questions/65348319/how-would-i-write-numpy-tensordot-in-c#comment115530443_65348319 - https://stackoverflow.com/questions/41870228/understanding-tensordot - https://stackoverflow.com/questions/42475212/c-eigen-dynamic-tensor */ assertion(axes_raw.size() == 1 || axes_raw.size() == 2, "axes must have size 1 or 2"); const auto axes = axes_raw.size() == 2 ? axes_raw : std::vector({ axes_raw.front(), axes_raw.front() }); const auto axis_a = axes[0]; const auto axis_b = axes[1]; const auto permute_target_a_suffix = fplus::keep_if( fplus::is_not_equal_to(axis_a), fplus::numbers(std::size_t(1), a.rank() + 1)); const auto permute_target_b_prefix = fplus::keep_if( fplus::is_not_equal_to(axis_b), fplus::numbers(std::size_t(1), b.rank() + 1)); const auto permute_target_a = fplus::prepend_elem(axis_a, permute_target_a_suffix); const auto permute_target_b = fplus::append_elem(axis_b, permute_target_b_prefix); const auto a_permuted = permute_tensor(normalize ? l2_normalize(a, { axis_a }) : a, permute_target_a); const auto b_permuted = permute_tensor(normalize ? l2_normalize(b, { axis_b }) : b, permute_target_b); const auto a_axis_dim_size = a.shape().dimensions()[static_cast(axis_a - 1)]; const auto b_axis_dim_size = b.shape().dimensions()[static_cast(axis_b - 1)]; const auto a_remaining_dim_sizes = fplus::elems_at_idxs( fplus::numbers(std::size_t(1), a.rank()), a_permuted.shape().dimensions()); const auto b_remaining_dim_sizes = fplus::elems_at_idxs( fplus::numbers(std::size_t(0), b.rank() - 1), b_permuted.shape().dimensions()); const auto a_remaining_dim_sizes_prod = a.rank() == 1 ? 1 : fplus::product(a_remaining_dim_sizes); const auto b_remaining_dim_sizes_prod = b.rank() == 1 ? 1 : fplus::product(b_remaining_dim_sizes); const auto out_dims = permute_target_a_suffix.size() + permute_target_b_prefix.size() == 0 ? std::vector { 1 } : fplus::concat(std::vector> { a_remaining_dim_sizes, b_remaining_dim_sizes }); tensor output = tensor(create_tensor_shape_from_dims(out_dims), static_cast(0)); const Eigen::Map a_mat(const_cast(a_permuted.as_vector()->data()), static_cast(a_axis_dim_size), static_cast(a_remaining_dim_sizes_prod)); const Eigen::Map b_mat(const_cast(b_permuted.as_vector()->data()), static_cast(b_remaining_dim_sizes_prod), static_cast(b_axis_dim_size)); Eigen::Map output_map(output.as_vector()->data(), static_cast(b_remaining_dim_sizes_prod), static_cast(a_remaining_dim_sizes_prod)); output_map.noalias() = b_mat * a_mat; return output; } inline tensor average_tensors(const tensors& ts) { const auto sum = sum_tensors(ts); const float_type divisor = static_cast(ts.size()); return transform_tensor(fplus::multiply_with(1 / divisor), sum); } inline tensor max_tensors(const tensors& ts) { assertion(!ts.empty(), "no tensors given"); assertion( fplus::all_the_same_on(fplus_c_mem_fn_t(tensor, shape, tensor_shape), ts), "all tensors must have the same size"); const auto ts_values = fplus::transform( fplus_c_mem_fn_t(tensor, as_vector, shared_float_vec), ts); float_vec result_values; result_values.reserve(ts_values.front()->size()); for (std::size_t i = 0; i < ts_values.front()->size(); ++i) { float_type max_val = std::numeric_limits::lowest(); for (const auto& t_vals : ts_values) { max_val = std::max(max_val, (*t_vals)[i]); } result_values.push_back(max_val); } return tensor(ts.front().shape(), std::move(result_values)); } inline tensor min_tensors(const tensors& ts) { assertion(!ts.empty(), "no tensors given"); assertion( fplus::all_the_same_on(fplus_c_mem_fn_t(tensor, shape, tensor_shape), ts), "all tensors must have the same size"); const auto ts_values = fplus::transform( fplus_c_mem_fn_t(tensor, as_vector, shared_float_vec), ts); float_vec result_values; result_values.reserve(ts_values.front()->size()); for (std::size_t i = 0; i < ts_values.front()->size(); ++i) { float_type min_val = std::numeric_limits::max(); for (const auto& t_vals : ts_values) { min_val = std::min(min_val, (*t_vals)[i]); } result_values.push_back(min_val); } return tensor(ts.front().shape(), std::move(result_values)); } inline RowMajorMatrixXf eigen_row_major_mat_from_values(std::size_t height, std::size_t width, const float_vec& values) { assertion(height * width == values.size(), "invalid shape"); RowMajorMatrixXf m(height, width); std::memcpy(m.data(), values.data(), values.size() * sizeof(float_type)); return m; } inline tensor resize2d_nearest(const tensor& in_vol, const shape2& target_size) { tensor out_vol(tensor_shape(target_size.height_, target_size.width_, in_vol.shape().depth_), 0); const float_type scale_y = static_cast(target_size.height_) / static_cast(in_vol.shape().height_); const float_type scale_x = static_cast(target_size.width_) / static_cast(in_vol.shape().width_); for (std::size_t y = 0; y < out_vol.shape().height_; ++y) { const std::size_t y_in = fplus::round((static_cast(y) + 0.5f) / scale_y - 0.5f); for (std::size_t x = 0; x < out_vol.shape().width_; ++x) { const std::size_t x_in = fplus::round((static_cast(x) + 0.5f) / scale_x - 0.5f); for (std::size_t z = 0; z < in_vol.shape().depth_; ++z) { out_vol.set_ignore_rank(tensor_pos(y, x, z), in_vol.get_ignore_rank(tensor_pos(y_in, x_in, z))); } } } return out_vol; } inline float_type interpolate_2d_value_bilinearly(const tensor& t, float_type y, float_type x, std::size_t z) { y = fplus::max(0, y); x = fplus::max(0, x); y = fplus::min(y, t.height()); x = fplus::min(x, t.width()); std::size_t y_top = static_cast(fplus::max(0, fplus::floor(y))); std::size_t y_bottom = static_cast(fplus::min(t.height() - 1, y_top + 1)); std::size_t x_left = static_cast(fplus::max(0, fplus::floor(x))); std::size_t x_right = static_cast(fplus::min(t.width() - 1, x_left + 1)); const auto val_top_left = t.get_ignore_rank(tensor_pos(y_top, x_left, z)); const auto val_top_right = t.get_ignore_rank(tensor_pos(y_top, x_right, z)); const auto val_bottom_left = t.get_ignore_rank(tensor_pos(y_bottom, x_left, z)); const auto val_bottom_right = t.get_ignore_rank(tensor_pos(y_bottom, x_right, z)); const auto y_factor_top = static_cast(y_bottom) - y; const auto y_factor_bottom = 1.0 - y_factor_top; const auto x_factor_left = static_cast(x_right) - x; const auto x_factor_right = 1.0 - x_factor_left; return static_cast( y_factor_top * x_factor_left * val_top_left + y_factor_top * x_factor_right * val_top_right + y_factor_bottom * x_factor_left * val_bottom_left + y_factor_bottom * x_factor_right * val_bottom_right); } inline tensor resize2d_bilinear(const tensor& in_vol, const shape2& target_size) { tensor out_vol(tensor_shape(target_size.height_, target_size.width_, in_vol.shape().depth_), 0); const float_type scale_y = static_cast(target_size.height_) / static_cast(in_vol.shape().height_); const float_type scale_x = static_cast(target_size.width_) / static_cast(in_vol.shape().width_); for (std::size_t y = 0; y < out_vol.shape().height_; ++y) { const auto y_in = (static_cast(y) + 0.5f) / scale_y - 0.5f; for (std::size_t x = 0; x < out_vol.shape().width_; ++x) { const auto x_in = (static_cast(x) + 0.5f) / scale_x - 0.5f; for (std::size_t z = 0; z < in_vol.shape().depth_; ++z) { out_vol.set_ignore_rank(tensor_pos(y, x, z), interpolate_2d_value_bilinearly(in_vol, y_in, x_in, z)); } } } return out_vol; } inline float_type interpolate_2d_value_area(const tensor& t, float_type top, float_type bottom, float_type left, float_type right, std::size_t z) { const std::size_t top_int_outer = fplus::floor(top); const std::size_t left_int_outer = fplus::floor(left); const std::size_t top_int_inner = fplus::ceil(top); const std::size_t bottom_int_inner = fplus::floor(bottom); const std::size_t left_int_inner = fplus::ceil(left); const std::size_t right_int_inner = fplus::floor(right); const float_type top_weight = static_cast(top_int_inner) - top; const float_type left_weight = static_cast(left_int_inner) - left; const float_type bottom_weight = bottom - static_cast(bottom_int_inner); const float_type right_weight = right - static_cast(right_int_inner); const float_type top_left_weight = top_weight * left_weight; const float_type top_right_weight = top_weight * right_weight; const float_type bottom_left_weight = bottom_weight * left_weight; const float_type bottom_right_weight = bottom_weight * right_weight; float_type inner_sum = static_cast(0); std::size_t inner_pixels = 0; for (std::size_t y = top_int_inner; y < bottom_int_inner; ++y) { for (std::size_t x = left_int_inner; x < right_int_inner; ++x) { inner_sum += t.get_ignore_rank(tensor_pos(y, x, z)); inner_pixels += 1; } } float_type top_sum = static_cast(0); std::size_t top_pixels = 0; for (std::size_t x = left_int_inner; x < right_int_inner; ++x) { top_sum += t.get_ignore_rank(tensor_pos(top_int_outer, x, z)); top_pixels += 1; } float_type bottom_sum = static_cast(0); std::size_t bottom_pixels = 0; for (std::size_t x = left_int_inner; x < right_int_inner; ++x) { bottom_sum += t.get_ignore_rank(tensor_pos(bottom_int_inner, x, z)); bottom_pixels += 1; } float_type left_sum = static_cast(0); std::size_t left_pixels = 0; for (std::size_t y = top_int_inner; y < bottom_int_inner; ++y) { left_sum += t.get_ignore_rank(tensor_pos(y, left_int_outer, z)); left_pixels += 1; } float_type right_sum = static_cast(0); std::size_t right_pixels = 0; for (std::size_t y = top_int_inner; y < bottom_int_inner; ++y) { right_sum += t.get_ignore_rank(tensor_pos(y, right_int_inner, z)); right_pixels += 1; } const float_type top_left_val = t.get_ignore_rank(tensor_pos(top_int_outer, left_int_outer, z)); const float_type top_right_val = t.get_ignore_rank(tensor_pos(top_int_outer, right_int_inner, z)); const float_type bottom_left_val = t.get_ignore_rank(tensor_pos(bottom_int_inner, left_int_outer, z)); const float_type bottom_right_val = t.get_ignore_rank(tensor_pos(bottom_int_inner, right_int_inner, z)); const float_type weighted_sum = inner_sum + top_weight * top_sum + bottom_weight * bottom_sum + left_weight * left_sum + right_weight * right_sum + top_left_weight * top_left_val + top_right_weight * top_right_val + bottom_left_weight * bottom_left_val + bottom_right_weight * bottom_right_val; const float_type num_pixels = static_cast(inner_pixels) + top_weight * static_cast(top_pixels) + bottom_weight * static_cast(bottom_pixels) + left_weight * static_cast(left_pixels) + right_weight * static_cast(right_pixels) + top_left_weight + top_right_weight + bottom_left_weight + bottom_right_weight; return weighted_sum / num_pixels; } inline tensor resize2d_area(const tensor& in_vol, const shape2& target_size) { tensor out_vol(tensor_shape(target_size.height_, target_size.width_, in_vol.shape().depth_), 0); const float_type scale_y = static_cast(target_size.height_) / static_cast(in_vol.shape().height_); const float_type scale_x = static_cast(target_size.width_) / static_cast(in_vol.shape().width_); for (std::size_t y = 0; y < out_vol.shape().height_; ++y) { const auto y_in_top = (static_cast(y)) / scale_y; const auto y_in_bottom = (static_cast(y + 1)) / scale_y; for (std::size_t x = 0; x < out_vol.shape().width_; ++x) { const auto x_in_left = (static_cast(x)) / scale_x; const auto x_in_right = (static_cast(x + 1)) / scale_x; for (std::size_t z = 0; z < in_vol.shape().depth_; ++z) { out_vol.set_ignore_rank(tensor_pos(y, x, z), interpolate_2d_value_area(in_vol, y_in_top, y_in_bottom, x_in_left, x_in_right, z)); } } } return out_vol; } inline tensor resize_tensor_2d(const tensor& in_vol, const shape2& target_size, const std::string& interpolation) { if (interpolation == "nearest") { return resize2d_nearest(in_vol, target_size); } else if (interpolation == "bilinear") { return resize2d_bilinear(in_vol, target_size); } else if (interpolation == "area") { return resize2d_area(in_vol, target_size); } else { raise_error("Invalid interpolation method: " + interpolation); return in_vol; } } inline tensor smart_resize_tensor_2d(const tensor& in_vol, const shape2& target_size, const std::string& interpolation) { const std::size_t height = in_vol.shape().height_; const std::size_t width = in_vol.shape().width_; std::size_t crop_height = static_cast( static_cast(width * target_size.height_) / static_cast(target_size.width_)); std::size_t crop_width = static_cast( static_cast(height * target_size.width_) / static_cast(target_size.height_)); crop_height = std::min(height, crop_height); crop_width = std::min(width, crop_width); const std::size_t crop_box_hstart = static_cast(static_cast(height - crop_height) / 2.0f); const std::size_t crop_box_wstart = static_cast(static_cast(width - crop_width) / 2.0f); const tensor cropped = crop_tensor( 0, 0, crop_box_hstart, height - crop_height - crop_box_hstart, crop_box_wstart, width - crop_width - crop_box_wstart, in_vol); return resize_tensor_2d(cropped, target_size, interpolation); } inline tensor softmax(const tensor& input) { tensor output = tensor(input.shape(), static_cast(0)); // Softmax function is applied along channel dimension. for (size_t y = 0; y < input.shape().height_; ++y) { for (size_t x = 0; x < input.shape().width_; ++x) { float_type m = std::numeric_limits::lowest(); for (size_t z_class = 0; z_class < input.shape().depth_; ++z_class) { m = std::max(m, input.get_ignore_rank(tensor_pos(y, x, z_class))); } // We are not using Kahan summation, since the number // of object classes is usually quite small. float_type sum_shifted = 0.0f; for (size_t z_class = 0; z_class < input.shape().depth_; ++z_class) { sum_shifted += std::exp(input.get_ignore_rank(tensor_pos(y, x, z_class)) - m); } const auto log_sum_shifted = std::log(sum_shifted); for (size_t z_class = 0; z_class < input.shape().depth_; ++z_class) { const auto result = std::exp(input.get_ignore_rank(tensor_pos(y, x, z_class)) - m - log_sum_shifted); output.set_ignore_rank(tensor_pos(y, x, z_class), std::isinf(result) ? static_cast(0) : result); } } } return output; } } using float_type = internal::float_type; using float_vec = internal::float_vec; using shared_float_vec = internal::shared_float_vec; using tensor = internal::tensor; using tensors = internal::tensors; using tensors_vec = internal::tensors_vec; inline std::string show_tensor(const tensor& t) { const auto xs = *t.as_vector(); const auto test_strs = fplus::transform( fplus::fwd::show_float_fill_left(' ', 0, 4), xs); const auto max_length = fplus::size_of_cont(fplus::maximum_on( fplus::size_of_cont, test_strs)); const auto strs = fplus::transform( fplus::fwd::show_float_fill_left(' ', max_length, 4), xs); return fplus::show_cont( fplus::split_every(t.shape().size_dim_4_, fplus::split_every(t.shape().height_, fplus::split_every(t.shape().width_, fplus::split_every(t.shape().depth_, strs))))); } inline std::string show_tensors(const tensors& ts) { return fplus::show_cont(fplus::transform(show_tensor, ts)); } // Converts a memory block holding 8-bit values into a tensor. // Data must be stored row-wise (and channels_last). // Scales the values from range [0, 255] into [low, high]. // Example: // With low = 0.0 and high = 1.0 every value is essentially divided by 255. // May be used to convert an image (bgr, rgba, gray, etc.) to a tensor. inline tensor tensor_from_bytes(const std::uint8_t* value_ptr, std::size_t height, std::size_t width, std::size_t channels, internal::float_type low = 0.0f, internal::float_type high = 1.0f) { const std::vector bytes( value_ptr, value_ptr + height * width * channels); auto values = fplus::transform_convert( [low, high](std::uint8_t b) -> internal::float_type { return fplus::reference_interval(low, high, static_cast(0.0f), static_cast(255.0f), static_cast(b)); }, bytes); return tensor(tensor_shape(height, width, channels), std::move(values)); } // Converts a tensor into a memory block holding 8-bit values. // Data will be stored row-wise (and channels_last). // Scales the values from range [low, high] into [0, 255]. // May be used to convert a tensor into an image. inline void tensor_into_bytes(const tensor& t, std::uint8_t* value_ptr, std::size_t bytes_available, internal::float_type low = 0.0f, internal::float_type high = 1.0f) { const auto values = t.as_vector(); internal::assertion(bytes_available == values->size(), "invalid buffer size"); const auto bytes = fplus::transform( [low, high](internal::float_type v) -> std::uint8_t { return fplus::round( fplus::reference_interval( static_cast(0.0f), static_cast(255.0f), low, high, v)); }, *values); for (std::size_t i = 0; i < values->size(); ++i) { *(value_ptr++) = bytes[i]; } } // Converts a tensor into a vector of bytes. // Data will be stored row-wise (and channels_last). // Scales the values from range [low, high] into [0, 255]. inline std::vector tensor_to_bytes(const tensor& t, internal::float_type low = 0.0f, internal::float_type high = 1.0f) { std::vector bytes(t.shape().volume(), 0); tensor_into_bytes(t, bytes.data(), bytes.size(), low, high); return bytes; } inline tensors_vec reshape_tensor_vectors( std::size_t vectors_size, std::size_t vector_size, std::size_t depth, std::size_t height, std::size_t width, const tensors_vec& tss) { const auto values = fplus::concat(fplus::concat( fplus::transform_inner( [](const tensor& t) -> float_vec { return *t.as_vector(); }, tss))); fdeep::internal::assertion(values.size() == vectors_size * vector_size * height * width * depth, "Invalid number of values for reshape target."); const auto ts = fplus::transform( [&](fdeep::float_vec v) -> tensor { return tensor(tensor_shape(height, width, depth), std::move(v)); }, fplus::split_every(depth * height * width, values)); return fplus::split_every(vector_size, ts); } } frugally-deep-0.17.1/include/fdeep/tensor_pos.hpp000066400000000000000000000114141476372554500217750ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/common.hpp" #include #include #include namespace fdeep { namespace internal { class tensor_pos { public: // The dimensions are right-aligned (left-padded) compared to Keras. // I.e., if you have a position (or shape) of (a, b) in Keras // it corresponds to (0, 0, 0, a, b) in frugally-deep. explicit tensor_pos( std::size_t pos_dim_5, std::size_t pos_dim_4, std::size_t y, std::size_t x, std::size_t z) : pos_dim_5_(pos_dim_5) , pos_dim_4_(pos_dim_4) , y_(y) , x_(x) , z_(z) , rank_(5) { } explicit tensor_pos( std::size_t pos_dim_4, std::size_t y, std::size_t x, std::size_t z) : pos_dim_5_(0) , pos_dim_4_(pos_dim_4) , y_(y) , x_(x) , z_(z) , rank_(4) { } explicit tensor_pos( std::size_t y, std::size_t x, std::size_t z) : pos_dim_5_(0) , pos_dim_4_(0) , y_(y) , x_(x) , z_(z) , rank_(3) { } explicit tensor_pos( std::size_t x, std::size_t z) : pos_dim_5_(0) , pos_dim_4_(0) , y_(0) , x_(x) , z_(z) , rank_(2) { } explicit tensor_pos( std::size_t z) : pos_dim_5_(0) , pos_dim_4_(0) , y_(0) , x_(0) , z_(z) , rank_(1) { } std::size_t rank() const { return rank_; } std::vector dimensions() const { if (rank() == 5) return { pos_dim_5_, pos_dim_4_, y_, x_, z_ }; if (rank() == 4) return { pos_dim_4_, y_, x_, z_ }; if (rank() == 3) return { y_, x_, z_ }; if (rank() == 2) return { x_, z_ }; return { z_ }; } std::size_t pos_dim_5_; std::size_t pos_dim_4_; std::size_t y_; std::size_t x_; std::size_t z_; private: std::size_t rank_; }; inline tensor_pos create_tensor_pos_from_dims( const std::vector& dimensions) { assertion(dimensions.size() >= 1 && dimensions.size() <= 5, "Invalid tensor-pos dimensions"); if (dimensions.size() == 5) return tensor_pos( dimensions[0], dimensions[1], dimensions[2], dimensions[3], dimensions[4]); if (dimensions.size() == 4) return tensor_pos( dimensions[0], dimensions[1], dimensions[2], dimensions[3]); if (dimensions.size() == 3) return tensor_pos( dimensions[0], dimensions[1], dimensions[2]); if (dimensions.size() == 2) return tensor_pos( dimensions[0], dimensions[1]); return tensor_pos(dimensions[0]); } inline tensor_pos tensor_pos_with_changed_rank(const tensor_pos& s, std::size_t rank) { assertion(rank >= 1 && rank <= 5, "Invalid target rank"); if (rank == 4) { assertion(s.pos_dim_5_ == 0, "Invalid target rank"); return tensor_pos(s.pos_dim_4_, s.y_, s.x_, s.z_); } if (rank == 3) { assertion(s.pos_dim_5_ == 0, "Invalid target rank"); assertion(s.pos_dim_4_ == 0, "Invalid target rank"); return tensor_pos(s.y_, s.x_, s.z_); } if (rank == 2) { assertion(s.pos_dim_5_ == 0, "Invalid target rank"); assertion(s.pos_dim_4_ == 0, "Invalid target rank"); assertion(s.y_ == 0, "Invalid target rank"); return tensor_pos(s.x_, s.z_); } if (rank == 1) { assertion(s.pos_dim_5_ == 0, "Invalid target rank"); assertion(s.pos_dim_4_ == 0, "Invalid target rank"); assertion(s.y_ == 0, "Invalid target rank"); assertion(s.x_ == 0, "Invalid target rank"); return tensor_pos(s.z_); } return tensor_pos(s.pos_dim_5_, s.pos_dim_4_, s.y_, s.x_, s.z_); } } using tensor_pos = internal::tensor_pos; } frugally-deep-0.17.1/include/fdeep/tensor_shape.hpp000066400000000000000000000317001476372554500222740ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/common.hpp" #include "fdeep/shape2.hpp" #include "fdeep/tensor_shape_variable.hpp" #include #include #include #include #include namespace fdeep { namespace internal { class tensor_shape { public: // The outer (left-most) dimensions are not used for batch prediction. // If you like to do multiple forward passes on a model at once, // use fdeep::model::predict_multi instead. explicit tensor_shape( std::size_t size_dim_5, std::size_t size_dim_4, std::size_t height, std::size_t width, std::size_t depth) : size_dim_5_(size_dim_5) , size_dim_4_(size_dim_4) , height_(height) , width_(width) , depth_(depth) , rank_(5) { } explicit tensor_shape( std::size_t size_dim_4, std::size_t height, std::size_t width, std::size_t depth) : size_dim_5_(1) , size_dim_4_(size_dim_4) , height_(height) , width_(width) , depth_(depth) , rank_(4) { } explicit tensor_shape( std::size_t height, std::size_t width, std::size_t depth) : size_dim_5_(1) , size_dim_4_(1) , height_(height) , width_(width) , depth_(depth) , rank_(3) { } explicit tensor_shape( std::size_t width, std::size_t depth) : size_dim_5_(1) , size_dim_4_(1) , height_(1) , width_(width) , depth_(depth) , rank_(2) { } explicit tensor_shape( std::size_t depth) : size_dim_5_(1) , size_dim_4_(1) , height_(1) , width_(1) , depth_(depth) , rank_(1) { } std::size_t volume() const { return size_dim_5_ * size_dim_4_ * height_ * width_ * depth_; } void assert_is_shape_2() const { assertion( size_dim_5_ == 1 && size_dim_4_ == 1 && depth_ == 1, "Only height and width may be not equal 1."); } void assert_is_shape_3() const { assertion( size_dim_5_ == 1 && size_dim_4_ == 1, "Only height, width and depth may be not equal 1."); } shape2 without_depth() const { assert_is_shape_3(); return shape2(height_, width_); } std::size_t rank() const { assertion(rank_ >= 1 && rank_ <= 5, "Invalid rank"); return rank_; } std::size_t minimal_rank() const { if (size_dim_5_ > 1) return 5; if (size_dim_4_ > 1) return 4; if (height_ > 1) return 3; if (width_ > 1) return 2; return 1; } void shrink_rank() { rank_ = minimal_rank(); } void shrink_rank_with_min(std::size_t min_rank_to_keep) { rank_ = fplus::max(minimal_rank(), min_rank_to_keep); } void maximize_rank() { rank_ = 5; } std::vector dimensions() const { if (rank() == 5) return { size_dim_5_, size_dim_4_, height_, width_, depth_ }; if (rank() == 4) return { size_dim_4_, height_, width_, depth_ }; if (rank() == 3) return { height_, width_, depth_ }; if (rank() == 2) return { width_, depth_ }; return { depth_ }; } std::size_t size_dim_5_; std::size_t size_dim_4_; std::size_t height_; std::size_t width_; std::size_t depth_; private: std::size_t rank_; }; inline tensor_shape create_tensor_shape_from_dims( const std::vector& dimensions) { assertion(dimensions.size() >= 1 && dimensions.size() <= 5, "Invalid tensor-shape dimensions"); if (dimensions.size() == 5) return tensor_shape( dimensions[0], dimensions[1], dimensions[2], dimensions[3], dimensions[4]); if (dimensions.size() == 4) return tensor_shape( dimensions[0], dimensions[1], dimensions[2], dimensions[3]); if (dimensions.size() == 3) return tensor_shape( dimensions[0], dimensions[1], dimensions[2]); if (dimensions.size() == 2) return tensor_shape( dimensions[0], dimensions[1]); return tensor_shape(dimensions[0]); } inline tensor_shape make_tensor_shape_with( const tensor_shape& default_shape, const tensor_shape_variable shape) { if (shape.rank() == 1) return tensor_shape( fplus::just_with_default(default_shape.depth_, shape.depth_)); if (shape.rank() == 2) return tensor_shape( fplus::just_with_default(default_shape.width_, shape.width_), fplus::just_with_default(default_shape.depth_, shape.depth_)); if (shape.rank() == 3) return tensor_shape( fplus::just_with_default(default_shape.height_, shape.height_), fplus::just_with_default(default_shape.width_, shape.width_), fplus::just_with_default(default_shape.depth_, shape.depth_)); if (shape.rank() == 4) return tensor_shape( fplus::just_with_default(default_shape.size_dim_4_, shape.size_dim_4_), fplus::just_with_default(default_shape.height_, shape.height_), fplus::just_with_default(default_shape.width_, shape.width_), fplus::just_with_default(default_shape.depth_, shape.depth_)); else return tensor_shape( fplus::just_with_default(default_shape.size_dim_5_, shape.size_dim_5_), fplus::just_with_default(default_shape.size_dim_4_, shape.size_dim_4_), fplus::just_with_default(default_shape.height_, shape.height_), fplus::just_with_default(default_shape.width_, shape.width_), fplus::just_with_default(default_shape.depth_, shape.depth_)); } inline tensor_shape derive_fixed_tensor_shape( std::size_t values, const tensor_shape_variable shape) { const auto inferred = values / shape.minimal_volume(); return make_tensor_shape_with( tensor_shape(inferred, inferred, inferred, inferred, inferred), shape); } inline bool tensor_shape_equals_tensor_shape_variable( const tensor_shape& lhs, const tensor_shape_variable& rhs) { return (rhs.rank() == lhs.rank()) && (rhs.size_dim_5_.is_nothing() || lhs.size_dim_5_ == rhs.size_dim_5_.unsafe_get_just()) && (rhs.size_dim_4_.is_nothing() || lhs.size_dim_4_ == rhs.size_dim_4_.unsafe_get_just()) && (rhs.height_.is_nothing() || lhs.height_ == rhs.height_.unsafe_get_just()) && (rhs.width_.is_nothing() || lhs.width_ == rhs.width_.unsafe_get_just()) && (rhs.depth_.is_nothing() || lhs.depth_ == rhs.depth_.unsafe_get_just()); } inline bool operator==(const tensor_shape& lhs, const tensor_shape_variable& rhs) { return tensor_shape_equals_tensor_shape_variable(lhs, rhs); } inline bool operator==(const std::vector& lhss, const std::vector& rhss) { return fplus::all(fplus::zip_with(tensor_shape_equals_tensor_shape_variable, lhss, rhss)); } inline bool operator==(const tensor_shape& lhs, const tensor_shape& rhs) { return lhs.rank() == rhs.rank() && lhs.size_dim_5_ == rhs.size_dim_5_ && lhs.size_dim_4_ == rhs.size_dim_4_ && lhs.height_ == rhs.height_ && lhs.width_ == rhs.width_ && lhs.depth_ == rhs.depth_; } inline tensor_shape tensor_shape_with_changed_rank(const tensor_shape& s, std::size_t rank) { assertion(rank >= 1 && rank <= 5, "Invalid target rank"); if (rank == 4) { assertion(s.size_dim_5_ == 1, "Invalid target rank"); return tensor_shape(s.size_dim_4_, s.height_, s.width_, s.depth_); } if (rank == 3) { assertion(s.size_dim_5_ == 1, "Invalid target rank"); assertion(s.size_dim_4_ == 1, "Invalid target rank"); return tensor_shape(s.height_, s.width_, s.depth_); } if (rank == 2) { assertion(s.size_dim_5_ == 1, "Invalid target rank"); assertion(s.size_dim_4_ == 1, "Invalid target rank"); assertion(s.height_ == 1, "Invalid target rank"); return tensor_shape(s.width_, s.depth_); } if (rank == 1) { assertion(s.size_dim_5_ == 1, "Invalid target rank"); assertion(s.size_dim_4_ == 1, "Invalid target rank"); assertion(s.height_ == 1, "Invalid target rank"); assertion(s.width_ == 1, "Invalid target rank"); return tensor_shape(s.depth_); } return tensor_shape(s.size_dim_5_, s.size_dim_4_, s.height_, s.width_, s.depth_); } inline tensor_shape dilate_tensor_shape( const shape2& dilation_rate, const tensor_shape& s) { assertion(dilation_rate.height_ >= 1, "invalid dilation rate"); assertion(dilation_rate.width_ >= 1, "invalid dilation rate"); const std::size_t height = s.height_ + (s.height_ - 1) * (dilation_rate.height_ - 1); const std::size_t width = s.width_ + (s.width_ - 1) * (dilation_rate.width_ - 1); return tensor_shape_with_changed_rank( tensor_shape(s.size_dim_5_, s.size_dim_4_, height, width, s.depth_), s.rank()); } inline std::size_t get_tensor_shape_dimension_by_index(const tensor_shape& s, const std::size_t idx) { if (idx == 0) return s.size_dim_5_; if (idx == 1) return s.size_dim_4_; if (idx == 2) return s.height_; if (idx == 3) return s.width_; if (idx == 4) return s.depth_; raise_error("Invalid tensor_shape index."); return 0; } inline tensor_shape change_tensor_shape_dimension_by_index(const tensor_shape& in, const std::size_t idx, const std::size_t dim) { assertion(idx <= 4, "Invalid dimension index"); assertion(dim > 0, "Invalid dimension size"); const std::size_t out_rank = std::max(5 - idx, in.rank()); assertion(out_rank >= 1 && out_rank <= 5, "Invalid target rank"); const std::size_t size_dim_5 = idx == 0 ? dim : in.size_dim_5_; const std::size_t size_dim_4 = idx == 1 ? dim : in.size_dim_4_; const std::size_t height = idx == 2 ? dim : in.height_; const std::size_t width = idx == 3 ? dim : in.width_; const std::size_t depth = idx == 4 ? dim : in.depth_; if (out_rank == 1) return tensor_shape(depth); if (out_rank == 2) return tensor_shape(width, depth); if (out_rank == 3) return tensor_shape(height, width, depth); if (out_rank == 4) return tensor_shape(size_dim_4, height, width, depth); return tensor_shape(size_dim_5, size_dim_4, height, width, depth); } } using tensor_shape = internal::tensor_shape; inline std::string show_tensor_shape(const tensor_shape& s) { const std::vector dimensions = { s.size_dim_5_, s.size_dim_4_, s.height_, s.width_, s.depth_ }; return std::to_string(s.rank()) + fplus::show_cont_with_frame(", ", "(", ")", fplus::drop(5 - s.rank(), dimensions)); } inline std::string show_tensor_shapes( const std::vector& shapes) { return fplus::show_cont(fplus::transform(show_tensor_shape, shapes)); } template void loop_over_all_dims(const tensor_shape& shape, F f) { for (std::size_t dim5 = 0; dim5 < shape.size_dim_5_; ++dim5) { for (std::size_t dim4 = 0; dim4 < shape.size_dim_4_; ++dim4) { for (std::size_t y = 0; y < shape.height_; ++y) { for (std::size_t x = 0; x < shape.width_; ++x) { for (std::size_t z = 0; z < shape.depth_; ++z) { f(dim5, dim4, y, x, z); } } } } } } } frugally-deep-0.17.1/include/fdeep/tensor_shape_variable.hpp000066400000000000000000000100451476372554500241400ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #pragma once #include "fdeep/common.hpp" #include #include #include #include #include namespace fdeep { namespace internal { class tensor_shape_variable { public: explicit tensor_shape_variable( fplus::maybe size_dim_5, fplus::maybe size_dim_4, fplus::maybe height, fplus::maybe width, fplus::maybe depth) : size_dim_5_(size_dim_5) , size_dim_4_(size_dim_4) , height_(height) , width_(width) , depth_(depth) , rank_(5) { } explicit tensor_shape_variable( fplus::maybe size_dim_4, fplus::maybe height, fplus::maybe width, fplus::maybe depth) : size_dim_5_(1) , size_dim_4_(size_dim_4) , height_(height) , width_(width) , depth_(depth) , rank_(4) { } explicit tensor_shape_variable( fplus::maybe height, fplus::maybe width, fplus::maybe depth) : size_dim_5_(1) , size_dim_4_(1) , height_(height) , width_(width) , depth_(depth) , rank_(3) { } explicit tensor_shape_variable( fplus::maybe width, fplus::maybe depth) : size_dim_5_(1) , size_dim_4_(1) , height_(1) , width_(width) , depth_(depth) , rank_(2) { } explicit tensor_shape_variable( fplus::maybe depth) : size_dim_5_(1) , size_dim_4_(1) , height_(1) , width_(1) , depth_(depth) , rank_(1) { } std::size_t minimal_volume() const { return fplus::just_with_default(1, size_dim_5_) * fplus::just_with_default(1, size_dim_4_) * fplus::just_with_default(1, height_) * fplus::just_with_default(1, width_) * fplus::just_with_default(1, depth_); } std::size_t rank() const { return rank_; } fplus::maybe size_dim_5_; fplus::maybe size_dim_4_; fplus::maybe height_; fplus::maybe width_; fplus::maybe depth_; private: std::size_t rank_; }; inline bool operator==(const tensor_shape_variable& lhs, const tensor_shape_variable& rhs) { return lhs.size_dim_5_ == rhs.size_dim_5_ && lhs.size_dim_4_ == rhs.size_dim_4_ && lhs.height_ == rhs.height_ && lhs.width_ == rhs.width_ && lhs.depth_ == rhs.depth_; } inline bool operator!=(const tensor_shape_variable& lhs, const tensor_shape_variable& rhs) { return !(lhs == rhs); } } using tensor_shape_variable = internal::tensor_shape_variable; inline std::string show_tensor_shape_variable(const tensor_shape_variable& s) { const std::vector> dimensions = { s.size_dim_5_, s.size_dim_4_, s.height_, s.width_, s.depth_ }; const auto dimensions_repr = fplus::transform( fplus::show_maybe, fplus::drop(5 - s.rank(), dimensions)); return std::to_string(s.rank()) + fplus::show_cont_with_frame(", ", "(", ")", dimensions_repr); } inline std::string show_tensor_shapes_variable( const std::vector& shapes) { return fplus::show_cont(fplus::transform(show_tensor_shape_variable, shapes)); } } frugally-deep-0.17.1/keras_export/000077500000000000000000000000001476372554500170705ustar00rootroot00000000000000frugally-deep-0.17.1/keras_export/convert_model.py000077500000000000000000000677051476372554500223240ustar00rootroot00000000000000#!/usr/bin/env python3 """Convert a Keras model to frugally-deep format. """ import argparse import base64 import datetime import hashlib import json from typing import Tuple, Union, Mapping, List, Callable, Set, Any, TypeVar import numpy as np import numpy.typing # pylint: disable=unused-import from keras import backend as K, Layer from keras.layers import Input, Embedding, CategoryEncoding from keras.models import Model, load_model from keras.src import Functional __author__ = "Tobias Hermann" __copyright__ = "Copyright 2017, Tobias Hermann" __license__ = "MIT" __maintainer__ = "Tobias Hermann, https://github.com/Dobiasd/frugally-deep" __email__ = "editgym@gmail.com" NDFloat32Array = np.typing.NDArray[np.float32] NDUInt32Array = np.typing.NDArray[np.int32] Shape = Tuple[int, ...] LayerConfig = Union[None, Mapping[str, Union[float, list[str], list[list[str]]]]] TensorRepr = Mapping[str, Union[Shape, List[str]]] TypeT = TypeVar('TypeT') def as_list(value_or_values: Union[TypeT, List[TypeT]]) -> List[TypeT]: """Leave lists untouched, convert non-list types to a singleton list""" if isinstance(value_or_values, list): return value_or_values return [value_or_values] def keras_shape_to_fdeep_tensor_shape(raw_shape: Shape) -> Shape: """Convert a keras shape to an fdeep shape""" return raw_shape[1:] def get_layer_input_shape(layer: Layer) -> Shape: """It is stored in a different property depending on the situation.""" if hasattr(layer, 'batch_shape'): return tuple(layer.batch_shape) return tuple(layer.input.shape) def get_layer_input_shape_tensor_shape(layer: Layer) -> Shape: """Convert layer input shape to an fdeep shape""" return keras_shape_to_fdeep_tensor_shape(get_layer_input_shape(layer)) def show_tensor(tens: NDFloat32Array) -> TensorRepr: """Serialize 3-tensor to a dict""" return { 'shape': tens.shape[1:], 'values': encode_floats(tens.flatten()) } def get_model_input_layers(model: Model) -> List[Layer]: """Gets the input layers from model.layers in the correct input order.""" if len(model.inputs) == 1: from keras.src.layers.core.input_layer import InputLayer input_layers = [] for layer in model.layers: if isinstance(layer, InputLayer): input_layers.append(layer) return input_layers input_layer_names = [model_input.name for model_input in model.inputs] model_layers = {layer.name: layer for layer in model.layers} return [model_layers[layer_names] for layer_names in input_layer_names] def measure_predict(model: Model, data_in: List[NDFloat32Array]) -> Tuple[List[NDFloat32Array], float]: """Returns output and duration in seconds""" start_time = datetime.datetime.now() data_out = model.predict(data_in) end_time = datetime.datetime.now() duration = end_time - start_time print('Forward pass took {} s.'.format(duration.total_seconds())) return data_out, duration.total_seconds() def replace_none_with(value: int, shape: Shape) -> Shape: """Replace every None with a fixed value.""" return tuple(list(map(lambda x: x if x is not None else value, shape))) def get_first_outbound_op(layer: Layer) -> Functional: """Determine primary outbound operation""" return layer._outbound_nodes[0].operation def are_embedding_and_category_encoding_layer_positions_ok_for_testing(model: Model) -> bool: """ Test data can only be generated if all Embedding layers and CategoryEncoding layers are positioned directly behind the input nodes. """ def embedding_layer_names(model: Model) -> Set[str]: layers = model.layers result = set() for layer in layers: if isinstance(layer, Embedding): result.add(layer.name) layer_type = type(layer).__name__ if layer_type in ['Model', 'Sequential', 'Functional']: result.union(embedding_layer_names(layer)) return result def embedding_layer_names_at_input_nodes(model: Model) -> Set[str]: result = set() for input_layer in get_model_input_layers(model): if input_layer._outbound_nodes and ( isinstance(get_first_outbound_op(input_layer), Embedding) or isinstance(get_first_outbound_op(input_layer), CategoryEncoding)): result.add(get_first_outbound_op(input_layer).name) return result return embedding_layer_names(model) == embedding_layer_names_at_input_nodes(model) def gen_test_data(model: Model) -> Mapping[str, List[TensorRepr]]: """Generate data for model verification test.""" def set_shape_idx_0_to_1_if_none(shape: Shape) -> Shape: """Change first element in tuple to 1.""" if shape[0] is not None: return shape shape_lst = list(shape) shape_lst[0] = 1 shape = tuple(shape_lst) return shape def generate_input_data(input_layer: Layer) -> NDFloat32Array: """Random data fitting the input shape of a layer.""" random_fn: Callable[[Shape], Union[NDFloat32Array, NDUInt32Array]] if input_layer._outbound_nodes and isinstance( get_first_outbound_op(input_layer), Embedding): random_fn = lambda size: np.random.randint( 0, get_first_outbound_op(input_layer).input_dim, size) elif input_layer._outbound_nodes and isinstance( get_first_outbound_op(input_layer), CategoryEncoding): random_fn = lambda size: np.random.randint( 0, get_first_outbound_op(input_layer).num_tokens, size) else: random_fn = lambda size: np.random.normal(size=size).astype(np.float32) shape = get_layer_input_shape(input_layer) data = random_fn(replace_none_with(32, set_shape_idx_0_to_1_if_none(shape))).astype(np.float32) return data assert are_embedding_and_category_encoding_layer_positions_ok_for_testing( model), 'Test data can only be generated if embedding layers are positioned directly after input nodes.' data_in: List[NDFloat32Array] = list(map(generate_input_data, get_model_input_layers(model))) data_out_test: List[NDFloat32Array] warm_up_runs = 3 test_runs = 5 for i in range(warm_up_runs): if i == 0: data_out_test_raw, _ = measure_predict(model, data_in) data_out_test = as_list(data_out_test_raw) else: measure_predict(model, data_in) duration_sum = 0.0 print('Starting performance measurements.') for _ in range(test_runs): data_out, duration = measure_predict(model, data_in) duration_sum = duration_sum + duration duration_avg = duration_sum / test_runs print('Forward pass took {} s on average.'.format(duration_avg)) return { 'inputs': list(map(show_tensor, data_in)), 'outputs': list(map(show_tensor, data_out_test)) } def split_every(size: int, seq: str) -> List[str]: """Split a sequence every seq elements.""" return [seq[pos:pos + size] for pos in range(0, len(seq), size)] def encode_floats(arr: NDFloat32Array) -> List[str]: """Serialize a sequence of floats.""" return list(split_every(1024, base64.b64encode(arr).decode('ascii'))) def prepare_filter_weights_conv_2d(weights: NDFloat32Array) -> NDFloat32Array: """Change dimension order of 2d filter weights to the one used in fdeep""" assert len(weights.shape) == 4 return np.moveaxis(weights, [0, 1, 2, 3], [1, 2, 3, 0]).flatten() def prepare_filter_weights_slice_conv_2d(weights: NDFloat32Array) -> NDFloat32Array: """Change dimension order of 2d filter weights to the one used in fdeep""" assert len(weights.shape) == 4 return np.moveaxis(weights, [0, 1, 2, 3], [1, 2, 0, 3]).flatten() def prepare_filter_weights_conv_1d(weights: NDFloat32Array) -> NDFloat32Array: """Change dimension order of 1d filter weights to the one used in fdeep""" assert len(weights.shape) == 3 return np.moveaxis(weights, [0, 1, 2], [1, 2, 0]).flatten() def prepare_filter_weights_conv_1d_transpose(weights: NDFloat32Array) -> NDFloat32Array: """Change dimension order of 1d filter weights to the one used in fdeep""" assert len(weights.shape) == 3 return np.moveaxis(weights, [0, 1, 2], [1, 0, 2]).flatten() def prepare_filter_weights_conv_2d_transpose(weights: NDFloat32Array) -> NDFloat32Array: """Change dimension order of 2d filter weights to the one used in fdeep""" assert len(weights.shape) == 4 return np.moveaxis(weights, [0, 1, 2, 3], [1, 2, 0, 3]).flatten() def show_conv_1d_layer(layer: Layer) -> Mapping[str, list[str]]: """Serialize Conv1D layer to dict""" weights = layer.get_weights() assert len(weights) == 1 or len(weights) == 2 assert len(weights[0].shape) == 3 weights_flat = prepare_filter_weights_conv_1d(weights[0]) assert layer.padding in ['valid', 'same', 'causal'] assert layer.groups == 1 assert len(get_layer_input_shape(layer)) == 3 assert get_layer_input_shape(layer)[0] in {None, 1} result = { 'weights': encode_floats(weights_flat) } if len(weights) == 2: bias = weights[1] result['bias'] = encode_floats(bias) return result def show_conv_2d_layer(layer: Layer) -> Mapping[str, list[str]]: """Serialize Conv2D layer to dict""" weights = layer.get_weights() assert len(weights) == 1 or len(weights) == 2 assert len(weights[0].shape) == 4 weights_flat = prepare_filter_weights_conv_2d(weights[0]) assert layer.padding in ['valid', 'same'] assert layer.groups == 1 assert len(get_layer_input_shape(layer)) == 4 assert get_layer_input_shape(layer)[0] in {None, 1} result = { 'weights': encode_floats(weights_flat) } if len(weights) == 2: bias = weights[1] result['bias'] = encode_floats(bias) return result def show_separable_conv_2d_layer(layer: Layer) -> Mapping[str, list[str]]: """Serialize SeparableConv2D layer to dict""" weights = layer.get_weights() assert layer.depth_multiplier == 1 assert len(weights) == 2 or len(weights) == 3 assert len(weights[0].shape) == 4 assert len(weights[1].shape) == 4 # probably incorrect for depth_multiplier > 1? slice_weights = prepare_filter_weights_slice_conv_2d(weights[0]) stack_weights = prepare_filter_weights_conv_2d(weights[1]) assert layer.padding in ['valid', 'same'] assert len(get_layer_input_shape(layer)) == 4 assert get_layer_input_shape(layer)[0] in {None, 1} result = { 'slice_weights': encode_floats(slice_weights), 'stack_weights': encode_floats(stack_weights), } if len(weights) == 3: bias = weights[2] result['bias'] = encode_floats(bias) return result def show_depthwise_conv_2d_layer(layer: Layer) -> Mapping[str, list[str]]: """Serialize DepthwiseConv2D layer to dict""" weights = layer.get_weights() assert layer.depth_multiplier == 1 assert len(weights) in [1, 2] assert len(weights[0].shape) == 4 # probably incorrect for depth_multiplier > 1? slice_weights = prepare_filter_weights_slice_conv_2d(weights[0]) assert layer.padding in ['valid', 'same'] assert len(get_layer_input_shape(layer)) == 4 assert get_layer_input_shape(layer)[0] in {None, 1} result = { 'slice_weights': encode_floats(slice_weights), } if len(weights) == 2: bias = weights[1] result['bias'] = encode_floats(bias) return result def show_conv_1d_transpose_layer(layer: Layer) -> Mapping[str, list[str]]: """Serialize Conv1D transpose layer to dict""" weights = layer.get_weights() assert len(weights) == 1 or len(weights) == 2 assert len(weights[0].shape) == 3 weights_flat = prepare_filter_weights_conv_1d_transpose(weights[0]) assert layer.padding in ['valid', 'same', 'causal'] assert layer.strides[0] <= layer.kernel_size[0] assert len(get_layer_input_shape(layer)) == 3 assert get_layer_input_shape(layer)[0] in {None, 1} result = { 'weights': encode_floats(weights_flat) } if len(weights) == 2: bias = weights[1] result['bias'] = encode_floats(bias) return result def show_conv_2d_transpose_layer(layer: Layer) -> Mapping[str, list[str]]: """Serialize Conv2D transpose layer to dict""" weights = layer.get_weights() assert len(weights) == 1 or len(weights) == 2 assert len(weights[0].shape) == 4 weights_flat = prepare_filter_weights_conv_2d_transpose(weights[0]) assert layer.padding in ['valid', 'same'] assert layer.strides[0] <= layer.kernel_size[0] assert layer.strides[1] <= layer.kernel_size[1] assert sum([ layer.dilation_rate[0] == layer.dilation_rate[1], layer.strides[0] == layer.strides[1], layer.kernel_size[0] == layer.kernel_size[1]]) >= 2 assert len(get_layer_input_shape(layer)) == 4 assert get_layer_input_shape(layer)[0] in {None, 1} result = { 'weights': encode_floats(weights_flat) } if len(weights) == 2: bias = weights[1] result['bias'] = encode_floats(bias) return result def show_batch_normalization_layer(layer: Layer) -> Mapping[str, list[str]]: """Serialize batch normalization layer to dict""" moving_mean = layer.moving_mean.numpy() moving_variance = layer.moving_variance.numpy() result = {} result['moving_mean'] = encode_floats(moving_mean) result['moving_variance'] = encode_floats(moving_variance) if layer.center: beta = layer.beta.numpy() result['beta'] = encode_floats(beta) if layer.scale: gamma = layer.gamma.numpy() result['gamma'] = encode_floats(gamma) return result def show_layer_normalization_layer(layer: Layer) -> Mapping[str, list[str]]: """Serialize layer normalization layer to dict""" result = {} if layer.center: beta = layer.beta.numpy() result['beta'] = encode_floats(beta) if layer.scale: gamma = layer.gamma.numpy() result['gamma'] = encode_floats(gamma) return result def show_dense_layer(layer: Layer) -> Mapping[str, list[str]]: """Serialize dense layer to dict""" weights = layer.get_weights() assert len(weights) == 1 or len(weights) == 2 assert len(weights[0].shape) == 2 weights_flat = weights[0].flatten() result = { 'weights': encode_floats(weights_flat) } if len(weights) == 2: bias = weights[1] result['bias'] = encode_floats(bias) return result def show_dot_layer(layer: Layer) -> None: """Check valid configuration of Dot layer""" assert len(get_layer_input_shape(layer)) == 2 assert isinstance(layer.axes, int) or (isinstance(layer.axes, list) and len(layer.axes) == 2) assert layer.input.shape[0][0] is None assert layer.input.shape[1][0] is None assert len(layer.output_shape) <= 5 def show_prelu_layer(layer: Layer) -> Mapping[str, list[str]]: """Serialize prelu layer to dict""" weights = layer.get_weights() assert len(weights) == 1 weights_flat = weights[0].flatten() result = { 'alpha': encode_floats(weights_flat) } return result def show_embedding_layer(layer: Layer) -> Mapping[str, list[str]]: """Serialize Embedding layer to dict""" weights = layer.get_weights() assert len(weights) == 1 result = { 'weights': encode_floats(weights[0]) } return result def show_input_layer(layer: Layer) -> None: """Serialize input layer to dict""" assert not layer.sparse def show_softmax_layer(layer: Layer) -> None: """Serialize softmax layer to dict""" assert layer.axis == -1 def show_normalization_layer(layer: Layer) -> Mapping[str, list[str]]: """Serialize normalization layer to dict""" assert len(layer.axis) <= 1, 'Multiple normalization axes are not supported' if len(layer.axis) == 1: assert layer.axis[0] in (-1, 1, 2, 3, 4, 5), 'Invalid axis for Normalization layer.' return { 'mean': encode_floats(layer.mean), 'variance': encode_floats(layer.variance) } def show_upsampling2d_layer(layer: Layer) -> None: """Serialize UpSampling2D layer to dict""" assert layer.interpolation in ['nearest', 'bilinear'] def show_resizing_layer(layer: Layer) -> None: """Serialize Resizing layer to dict""" assert layer.interpolation in ['nearest', 'bilinear', 'area'] def show_rescaling_layer(layer: Layer) -> None: """Serialize Rescaling layer to dict""" assert isinstance(layer.scale, float) def show_category_encoding_layer(layer: Layer) -> None: """Serialize CategoryEncoding layer to dict""" assert layer.output_mode in ['multi_hot', 'count', 'one_hot'] def show_attention_layer(layer: Layer) -> Mapping[str, float]: """Serialize Attention layer to dict""" assert layer.score_mode in ['dot', 'concat'] data = {} if layer.scale is not None: data['scale'] = float(layer.scale.numpy()) if layer.score_mode == 'concat': data['concat_score_weight'] = float(layer.concat_score_weight.numpy()) return data def show_additive_attention_layer(layer: Layer) -> Mapping[str, List[str]]: """Serialize AdditiveAttention layer to dict""" data = {} if layer.scale is not None: data['scale'] = encode_floats(layer.scale.numpy()) return data def show_multi_head_attention_layer(layer: Layer) -> Mapping[str, List[list[str]]]: """Serialize MultiHeadAttention layer to dict""" assert layer._output_shape is None assert layer._attention_axes == (1,), 'MultiHeadAttention supported only with attention_axes=None' return { 'weight_shapes': list(map(lambda w: list(w.shape), layer.weights)), 'weights': list(map(lambda w: encode_floats(w.numpy()), layer.weights)), } def get_layer_functions_dict() -> Mapping[str, Callable[[Layer], LayerConfig]]: return { 'Conv1D': show_conv_1d_layer, 'Conv2D': show_conv_2d_layer, 'Conv1DTranspose': show_conv_1d_transpose_layer, 'Conv2DTranspose': show_conv_2d_transpose_layer, 'SeparableConv2D': show_separable_conv_2d_layer, 'DepthwiseConv2D': show_depthwise_conv_2d_layer, 'BatchNormalization': show_batch_normalization_layer, 'Dense': show_dense_layer, 'Dot': show_dot_layer, 'PReLU': show_prelu_layer, 'Embedding': show_embedding_layer, 'LayerNormalization': show_layer_normalization_layer, 'TimeDistributed': show_time_distributed_layer, 'Input': show_input_layer, 'Softmax': show_softmax_layer, 'Normalization': show_normalization_layer, 'UpSampling2D': show_upsampling2d_layer, 'Resizing': show_resizing_layer, 'Rescaling': show_rescaling_layer, 'CategoryEncoding': show_category_encoding_layer, 'Attention': show_attention_layer, 'AdditiveAttention': show_additive_attention_layer, 'MultiHeadAttention': show_multi_head_attention_layer, } def show_time_distributed_layer(layer: Layer) -> Union[None, LayerConfig]: show_layer_functions = get_layer_functions_dict() config = layer.get_config() class_name = str(config['layer']['class_name']) if show_layer_functions and class_name in show_layer_functions: input_shape_new: Shape if len(get_layer_input_shape(layer)) == 3: input_shape_new = (get_layer_input_shape(layer)[0], get_layer_input_shape(layer)[2]) elif len(get_layer_input_shape(layer)) == 4: input_shape_new = ( get_layer_input_shape(layer)[0], get_layer_input_shape(layer)[2], get_layer_input_shape(layer)[3]) elif len(get_layer_input_shape(layer)) == 5: input_shape_new = ( get_layer_input_shape(layer)[0], get_layer_input_shape(layer)[2], get_layer_input_shape(layer)[3], get_layer_input_shape(layer)[4]) elif len(get_layer_input_shape(layer)) == 6: input_shape_new = ( get_layer_input_shape(layer)[0], get_layer_input_shape(layer)[2], get_layer_input_shape(layer)[3], get_layer_input_shape(layer)[4], get_layer_input_shape(layer)[5]) else: raise Exception('Wrong input shape') layer_function = show_layer_functions[class_name] attributes = dir(layer.layer) class CopiedLayer: pass copied_layer = CopiedLayer() for attr in attributes: try: if attr not in ['batch_shape', '__class__']: setattr(copied_layer, attr, getattr(layer.layer, attr)) except Exception: continue setattr(copied_layer, 'batch_shape', input_shape_new) setattr(copied_layer, 'output_shape', layer.output.shape) return layer_function(copied_layer) else: return None def get_dict_keys(d: Mapping[str, LayerConfig]) -> list[str]: """Return keys of a dictionary""" return [key for key in d] def merge_two_disjunct_dicts(x: Mapping[str, LayerConfig], y: Mapping[str, LayerConfig]) -> Mapping[str, LayerConfig]: """Given two dicts, merge them into a new dict as a shallow copy. No Key is allowed to be present in both dictionaries. """ assert set(get_dict_keys(x)).isdisjoint(get_dict_keys(y)) assert isinstance(x, dict) and isinstance(y, dict) z = x.copy() z.update(y) return z def is_ascii(some_string: str) -> bool: """Check if a string only contains ascii characters""" try: some_string.encode('ascii') except UnicodeEncodeError: return False else: return True def get_layer_weights(layer: Layer, name: str) -> Mapping[str, LayerConfig]: """Serialize all weights of a single normal layer""" result: dict[str, LayerConfig] = {} layer_type = type(layer).__name__ if hasattr(layer, 'data_format'): assert layer.data_format == 'channels_last' show_func = get_layer_functions_dict().get(layer_type, None) shown_layer = None if show_func: shown_layer = show_func(layer) if shown_layer: result[name] = shown_layer if show_func and layer_type == 'TimeDistributed': result[name] = {'td_input_len': encode_floats( np.array([len(get_layer_input_shape(layer)) - 1], dtype=np.float32)), 'td_output_len': encode_floats(np.array([len(layer.output.shape) - 1], dtype=np.float32))} return result def get_all_weights(model: Model, prefix: str) -> Mapping[str, LayerConfig]: """Serialize all weights of the models layers""" result: dict[str, LayerConfig] = {} layers = model.layers assert K.image_data_format() == 'channels_last' for layer in layers: layer_type = type(layer).__name__ for node in layer._inbound_nodes: if 'training' in node.arguments.kwargs: is_layer_with_accidental_training_flag = layer_type in ('CenterCrop', 'Resizing') has_training = node.arguments.kwargs['training'] is True assert not has_training or is_layer_with_accidental_training_flag, \ 'training=true is not supported, see https://github.com/Dobiasd/frugally-deep/issues/284' name = prefix + layer.name assert is_ascii(name) if name in result: raise ValueError('duplicate layer name ' + name) if layer_type in ['Model', 'Sequential', 'Functional']: result = dict(merge_two_disjunct_dicts(result, get_all_weights(layer, name + '_'))) elif layer_type in ['TimeDistributed'] and type(layer.layer).__name__ in ['Model', 'Sequential', 'Functional']: inner_layer = layer.layer result = dict(merge_two_disjunct_dicts(result, get_layer_weights(layer, name))) result = dict(merge_two_disjunct_dicts(result, get_all_weights(inner_layer, name + '_'))) else: result = dict(merge_two_disjunct_dicts(result, get_layer_weights(layer, name))) return result def get_model_name(model: Model) -> str: """Return .name or ._name""" if hasattr(model, 'name'): return str(model.name) return str(model._name) def convert_sequential_to_model(model: Model) -> Model: """Convert a sequential model to the underlying functional format""" if type(model).__name__ in ['Sequential']: name = get_model_name(model) inbound_nodes = model._inbound_nodes input_layer = Input(batch_shape=get_layer_input_shape(model.layers[0])) prev_layer = input_layer for layer in model.layers: layer._inbound_nodes = [] prev_layer = layer(prev_layer) funcmodel = Model([input_layer], [prev_layer], name=name) model = funcmodel model._inbound_nodes = inbound_nodes if type(model).__name__ == 'TimeDistributed': model.layer = convert_sequential_to_model(model.layer) if type(model).__name__ in ['Model', 'Functional']: for i in range(len(model.layers)): new_layer = convert_sequential_to_model(model.layers[i]) if new_layer == model.layers[i]: continue # https://stackoverflow.com/questions/78297541/how-to-replace-a-model-layer-using-tensorflow-2-16 model._operations[i] = new_layer assert model.layers[i] == new_layer return model def get_shapes(tensors: List[Mapping[str, Shape]]) -> List[Shape]: """Return shapes of a list of tensors""" return [t['shape'] for t in tensors] def calculate_hash(model: Model) -> str: layers = model.layers hash_m = hashlib.sha256() for layer in layers: for weights in layer.get_weights(): if isinstance(weights, np.ndarray): hash_m.update(weights.tobytes()) hash_m.update(layer.name.encode('ascii')) return hash_m.hexdigest() def model_to_fdeep_json(model: Model, no_tests: bool = False) -> Mapping[str, Any]: """Convert any Keras model to the frugally-deep model format.""" # Force creation of underlying functional model. # see: https://github.com/fchollet/keras/issues/8136 # Loss and optimizer type do not matter, since we do not train the model. model.compile(loss='mse', optimizer='sgd') model = convert_sequential_to_model(model) test_data = None if no_tests else gen_test_data(model) json_output = {} print('Converting model architecture.') json_output['architecture'] = json.loads(model.to_json()) json_output['image_data_format'] = K.image_data_format() json_output['input_shapes'] = list(map(get_layer_input_shape_tensor_shape, get_model_input_layers(model))) json_output['output_shapes'] = list(map(keras_shape_to_fdeep_tensor_shape, as_list(model.output_shape))) if test_data: json_output['tests'] = [test_data] print('Converting model weights.') json_output['trainable_params'] = get_all_weights(model, '') print('Done converting model weights.') print('Calculating model hash.') json_output['hash'] = calculate_hash(model) print('Model conversion finished.') return json_output def assert_model_type(model: Model) -> None: import keras assert type(model) in [keras.src.models.sequential.Sequential, keras.src.models.functional.Functional] def convert(in_path: str, out_path: str, no_tests: bool = False) -> None: """Convert any (h5-)stored Keras model to the frugally-deep model format.""" print('loading {}'.format(in_path)) model = load_model(in_path, compile=False) json_output = model_to_fdeep_json(model, no_tests) print('writing {}'.format(out_path)) with open(out_path, 'w') as f: json.dump(json_output, f, allow_nan=False, separators=(',', ':')) def main() -> None: """Parse command line and convert model.""" parser = argparse.ArgumentParser( prog='frugally-deep model converter', description='Converts models from Keras\' .keras format to frugally-deep\'s .json format.') parser.add_argument('input_path', type=str) parser.add_argument('output_path', type=str) parser.add_argument('--no-tests', action='store_true') args = parser.parse_args() convert(args.input_path, args.output_path, args.no_tests) if __name__ == '__main__': main() frugally-deep-0.17.1/keras_export/generate_test_models.py000066400000000000000000001071151476372554500236430ustar00rootroot00000000000000#!/usr/bin/env python3 """Generate a test model for frugally-deep. """ import sys from typing import Tuple, List, Union import numpy as np from keras.layers import ActivityRegularization from keras.layers import AdditiveAttention from keras.layers import Attention from keras.layers import BatchNormalization, Concatenate, LayerNormalization, UnitNormalization from keras.layers import CategoryEncoding, Embedding from keras.layers import Conv1D, ZeroPadding1D, Cropping1D from keras.layers import Conv2D, ZeroPadding2D, Cropping2D, CenterCrop from keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D from keras.layers import GlobalAveragePooling3D, GlobalMaxPooling3D from keras.layers import Identity, Conv2DTranspose, Conv1DTranspose from keras.layers import Input, Dense, Dropout, Flatten, Activation from keras.layers import LeakyReLU, ELU, PReLU, ReLU from keras.layers import MaxPooling1D, AveragePooling1D, UpSampling1D from keras.layers import MaxPooling2D, AveragePooling2D, UpSampling2D from keras.layers import MaxPooling3D, AveragePooling3D from keras.layers import MultiHeadAttention from keras.layers import Multiply, Add, Subtract, Average, Maximum, Minimum from keras.layers import Normalization, Rescaling, Resizing from keras.layers import Permute, Reshape, RepeatVector from keras.layers import SeparableConv2D, DepthwiseConv2D from keras.layers import ZeroPadding3D, Cropping3D from keras.models import Model, load_model, Sequential __author__ = "Tobias Hermann" __copyright__ = "Copyright 2017, Tobias Hermann" __license__ = "MIT" __maintainer__ = "Tobias Hermann, https://github.com/Dobiasd/frugally-deep" __email__ = "editgym@gmail.com" NDFloat32Array = np.typing.NDArray[np.float32] NDUInt32Array = np.typing.NDArray[np.int32] Shape = Tuple[int, ...] VariableShape = Tuple[Union[None, int], ...] def replace_none_with(value: int, shape: VariableShape) -> Shape: """Replace every None with a fixed value.""" return tuple(list(map(lambda x: x if x is not None else value, shape))) def get_shape_for_random_data(data_size: int, shape: Shape) -> Shape: """Include size of data to generate into shape.""" if len(shape) == 5: return (data_size, shape[0], shape[1], shape[2], shape[3], shape[4]) if len(shape) == 4: return (data_size, shape[0], shape[1], shape[2], shape[3]) if len(shape) == 3: return (data_size, shape[0], shape[1], shape[2]) if len(shape) == 2: return (data_size, shape[0], shape[1]) if len(shape) == 1: return (data_size, shape[0]) raise ValueError('can not use shape for random data:', shape) def generate_random_data(data_size: int, shape: VariableShape) -> NDFloat32Array: """Random data for training.""" return np.random.random( size=get_shape_for_random_data(data_size, replace_none_with(42, shape))).astype(np.float32) def generate_input_data(data_size: int, input_shapes: List[VariableShape]) -> List[NDFloat32Array]: """Random input data for training.""" return [generate_random_data(data_size, input_shape) for input_shape in input_shapes] def generate_integer_random_data(data_size: int, low: int, high: int, shape: Shape) -> NDUInt32Array: """Random data for training.""" return np.random.randint( low=low, high=high, size=get_shape_for_random_data(data_size, replace_none_with(42, shape))) def generate_integer_input_data(data_size: int, low: int, highs: List[int], input_shapes: List[Shape]) -> List[ NDUInt32Array]: """Random input data for training.""" return [generate_integer_random_data(data_size, low, high, input_shape) for high, input_shape in zip(highs, input_shapes)] def as_list(value_or_values: Union[NDFloat32Array, List[NDFloat32Array]]) -> List[NDFloat32Array]: """Leave lists untouched, convert non-list types to a singleton list""" if isinstance(value_or_values, list): return value_or_values return [value_or_values] def generate_output_data(data_size: int, outputs: List[NDFloat32Array]) -> List[NDFloat32Array]: """Random output data for training.""" return [generate_random_data(data_size, output.shape[1:]) for output in as_list(outputs)] def get_test_model_exhaustive() -> Model: """Returns a exhaustive test model.""" input_shapes: List[VariableShape] = [ (2, 3, 4, 5, 6), # 0 (2, 3, 4, 5, 6), (7, 8, 9, 10), (7, 8, 9, 10), (11, 12, 13), (11, 12, 13), (14, 15), (14, 15), (16,), (16,), (2,), # 10 (1,), (2,), (1,), (1, 3), (1, 4), (1, 1, 3), (1, 1, 4), (1, 1, 1, 3), (1, 1, 1, 4), (1, 1, 1, 1, 3), # 20 (1, 1, 1, 1, 4), (26, 28, 3), (4, 4, 3), (4, 4, 3), (4,), (2, 3), (1,), (1,), (1,), (2, 3), # 30 (9, 16, 1), (1, 9, 16), (6, 1, 1), (1, 1, 1, 1, 6), (1, 1, 1, 10), (1, 1, 13), (1, 15), (1, 1, 1, 1, 6), (1, 1, 1, 5, 1), (1, 1, 4, 1, 1), # 40 (1, 3, 1, 1, 1), (2, 1, 1, 1, 1), (1, 1, 4, 1, 6), (1, 3, 1, 5, 1), (2, 1, 4, 1, 1), # 45 (1,), (3, 1), (6, 5, 4, 3, 2), (5, 4), (7, 4), # 50 (7, 4), ] inputs = [Input(shape=s) for s in input_shapes] outputs = [] outputs.append(Conv1DTranspose(4, 3, padding='valid', use_bias=False)(inputs[6])) outputs.append(Conv2DTranspose(4, (5, 3), padding='valid', use_bias=False)(inputs[4])) outputs.append(Conv1DTranspose(4, 1, padding='valid', use_bias=False)(inputs[6])) outputs.append(Conv1DTranspose(4, 1, padding='same', use_bias=False)(inputs[6])) outputs.append(Conv1DTranspose(1, 3, padding='valid', use_bias=False)(inputs[6])) outputs.append(Conv1DTranspose(2, 3, padding='same')(inputs[6])) outputs.append(Conv1DTranspose(2, 5, padding='same', strides=2)(inputs[6])) outputs.append(Conv1DTranspose(2, 5, padding='valid', strides=2)(inputs[6])) # Current CPU implementations do not yet support dilation rates larger than 1 # https://github.com/keras-team/keras/issues/20408 # https://github.com/keras-team/keras/pull/20737 #outputs.append(Conv1DTranspose(3, 5, padding='same', dilation_rate=2)(inputs[6])) #outputs.append(Conv1DTranspose(3, 5, padding='valid', dilation_rate=2)(inputs[6])) outputs.append(Conv2DTranspose(4, (1, 1), padding='valid', use_bias=False)(inputs[4])) outputs.append(Conv2DTranspose(4, (1, 1), padding='same', use_bias=False)(inputs[4])) outputs.append(Conv2DTranspose(1, (3, 3))(inputs[4])) outputs.append(Conv2DTranspose(4, (3, 3), padding='valid', use_bias=False)(inputs[4])) outputs.append(Conv2DTranspose(4, (3, 3), padding='same')(inputs[4])) outputs.append(Conv2DTranspose(4, (5, 5), padding='same', strides=(2, 3))(inputs[4])) outputs.append(Conv2DTranspose(4, (5, 5), padding='valid', strides=(2, 3))(inputs[4])) #outputs.append(Conv2DTranspose(4, (5, 5), padding='same', dilation_rate=(2, 3))(inputs[4])) #outputs.append(Conv2DTranspose(4, (5, 5), padding='valid', dilation_rate=(2, 3))(inputs[4])) outputs.append(Conv1DTranspose(1, 3, padding='valid')(inputs[6])) outputs.append(Conv1DTranspose(2, 1, padding='same')(inputs[6])) #outputs.append(Conv1DTranspose(3, 4, padding='same', dilation_rate=2)(inputs[6])) outputs.append(Conv2DTranspose(4, (3, 3))(inputs[4])) outputs.append(Conv2DTranspose(4, (3, 3), use_bias=False)(inputs[4])) outputs.append(Conv2DTranspose(4, (2, 4), strides=(2, 2), padding='same')(inputs[4])) #outputs.append(Conv2DTranspose(4, (2, 4), padding='same', dilation_rate=(2, 2))(inputs[4])) outputs.append(Conv1D(1, 3, padding='valid')(inputs[6])) outputs.append(Conv1D(2, 1, padding='same')(inputs[6])) outputs.append(Conv1D(2, 1, padding='same', strides=2)(inputs[6])) outputs.append(Conv1D(3, 4, padding='causal', dilation_rate=2)(inputs[6])) outputs.append(ZeroPadding1D(2)(inputs[6])) outputs.append(Cropping1D((2, 3))(inputs[6])) outputs.append(MaxPooling1D(2)(inputs[6])) outputs.append(MaxPooling1D(2, strides=2, padding='same')(inputs[6])) outputs.append(AveragePooling1D(2)(inputs[6])) outputs.append(AveragePooling1D(2, strides=2, padding='same')(inputs[6])) outputs.append(GlobalMaxPooling1D()(inputs[6])) outputs.append(GlobalAveragePooling1D()(inputs[6])) outputs.append(GlobalMaxPooling1D(keepdims=True)(inputs[6])) outputs.append(GlobalAveragePooling1D(keepdims=True)(inputs[6])) outputs.append(Normalization(axis=None, mean=2.1, variance=2.2)(inputs[4])) # outputs.append(Normalization(axis=-1, mean=2.1, variance=2.2)(inputs[6])) # No longer supported in TensorFlow 2.16 outputs.append(Normalization(axis=-1, mean=2.1, variance=2.2)(inputs[46])) outputs.append(Normalization(axis=1, mean=2.1, variance=2.2)(inputs[46])) outputs.append(Normalization(axis=-1, mean=2.1, variance=2.2)(inputs[47])) # outputs.append(Normalization(axis=1, mean=2.1, variance=2.2)(inputs[47])) # No longer supported in TensorFlow 2.16 outputs.append(Normalization(axis=2, mean=2.1, variance=2.2)(inputs[47])) for axis in range(1, 6): outputs.append(Normalization(axis=axis, mean=4.2, variance=2.3)(inputs[0])) outputs.append(Rescaling(23.5, 42.1)(inputs[0])) outputs.append(Conv2D(4, (3, 3))(inputs[4])) outputs.append(Conv2D(4, (3, 3), use_bias=False, padding='valid')(inputs[4])) outputs.append(Conv2D(4, (2, 4), strides=(2, 3), padding='same')(inputs[4])) outputs.append(Conv2D(4, (2, 4), padding='same', dilation_rate=(2, 3))(inputs[4])) outputs.append(SeparableConv2D(3, (3, 3))(inputs[4])) outputs.append(DepthwiseConv2D((3, 3))(inputs[4])) outputs.append(DepthwiseConv2D((1, 2))(inputs[4])) outputs.append(MaxPooling2D((2, 2))(inputs[4])) outputs.append(MaxPooling3D((2, 2, 2))(inputs[2])) outputs.append(MaxPooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4])) outputs.append(MaxPooling3D((1, 3, 5), strides=(2, 3, 4), padding='same')(inputs[2])) outputs.append(AveragePooling2D((2, 2))(inputs[4])) outputs.append(AveragePooling3D((2, 2, 2))(inputs[2])) outputs.append(AveragePooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4])) outputs.append(AveragePooling3D((1, 3, 5), strides=(2, 3, 4), padding='same')(inputs[2])) outputs.append(GlobalAveragePooling2D()(inputs[4])) outputs.append(GlobalAveragePooling3D()(inputs[2])) outputs.append(GlobalMaxPooling2D()(inputs[4])) outputs.append(GlobalMaxPooling3D()(inputs[2])) outputs.append(GlobalAveragePooling2D(keepdims=True)(inputs[4])) outputs.append(GlobalAveragePooling3D(keepdims=True)(inputs[2])) outputs.append(GlobalMaxPooling2D(keepdims=True)(inputs[4])) outputs.append(GlobalMaxPooling3D(keepdims=True)(inputs[2])) outputs.append(CenterCrop(4, 5)(inputs[4])) outputs.append(CenterCrop(5, 6)(inputs[4])) outputs.append(CenterCrop(19, 53)(inputs[23])) outputs.append(UpSampling2D(size=(1, 2), interpolation='nearest')(inputs[4])) outputs.append(UpSampling2D(size=(5, 3), interpolation='nearest')(inputs[4])) outputs.append(UpSampling2D(size=(1, 2), interpolation='bilinear')(inputs[4])) outputs.append(UpSampling2D(size=(5, 3), interpolation='bilinear')(inputs[4])) outputs.append(Resizing(4, 5)(inputs[4])) outputs.append(Resizing(5, 6)(inputs[4])) outputs.append(Resizing(19, 53, interpolation="bilinear")(inputs[23])) outputs.append(Resizing(19, 53, interpolation="nearest")(inputs[23])) # outputs.append(Resizing(7, 9, interpolation="area")(inputs[22])) # No longer supported in TensorFlow 2.16 # outputs.append(Resizing(19, 53, interpolation="area")(inputs[23])) # No longer supported in TensorFlow 2.16 outputs.append(Resizing(19, 53, crop_to_aspect_ratio=True)(inputs[23])) outputs.append(Permute((3, 4, 1, 5, 2))(inputs[0])) outputs.append(Permute((1, 5, 3, 2, 4))(inputs[0])) outputs.append(Permute((3, 4, 1, 2))(inputs[2])) outputs.append(Permute((2, 1, 3))(inputs[4])) outputs.append(Permute((2, 1))(inputs[6])) outputs.append(Permute((1,))(inputs[8])) outputs.append(Permute((3, 1, 2))(inputs[31])) outputs.append(Permute((3, 1, 2))(inputs[32])) outputs.append(BatchNormalization(center=False, scale=False)(inputs[11])) outputs.append(BatchNormalization()(inputs[11])) outputs.append(BatchNormalization()(inputs[10])) outputs.append(BatchNormalization()(inputs[14])) outputs.append(BatchNormalization()(inputs[26])) outputs.append(BatchNormalization()(inputs[23])) outputs.append(BatchNormalization()(inputs[0])) outputs.append(BatchNormalization(center=False)(inputs[0])) outputs.append(BatchNormalization(scale=False)(inputs[0])) outputs.append(BatchNormalization(center=False, scale=False)(inputs[0])) outputs.append(BatchNormalization()(inputs[0])) outputs.append(BatchNormalization(axis=1)(inputs[0])) outputs.append(BatchNormalization(axis=2)(inputs[0])) outputs.append(BatchNormalization(axis=3)(inputs[0])) outputs.append(BatchNormalization(axis=4)(inputs[0])) outputs.append(BatchNormalization(axis=5)(inputs[0])) outputs.append(BatchNormalization()(inputs[2])) outputs.append(BatchNormalization(axis=1)(inputs[2])) outputs.append(BatchNormalization(axis=2)(inputs[2])) outputs.append(BatchNormalization(axis=3)(inputs[2])) outputs.append(BatchNormalization(axis=4)(inputs[2])) outputs.append(BatchNormalization()(inputs[4])) # todo: check if TensorFlow >= 2.1 supports this # outputs.append(BatchNormalization(axis=1)(inputs[4])) # tensorflow.python.framework.errors_impl.InternalError: The CPU implementation of FusedBatchNorm only supports NHWC tensor format for now. outputs.append(BatchNormalization(axis=2)(inputs[4])) outputs.append(BatchNormalization(axis=3)(inputs[4])) outputs.append(BatchNormalization()(inputs[6])) outputs.append(BatchNormalization(axis=1)(inputs[6])) outputs.append(BatchNormalization(axis=2)(inputs[6])) outputs.append(BatchNormalization()(inputs[8])) outputs.append(BatchNormalization(axis=1)(inputs[8])) outputs.append(BatchNormalization()(inputs[27])) outputs.append(BatchNormalization(axis=1)(inputs[27])) outputs.append(BatchNormalization()(inputs[14])) outputs.append(BatchNormalization(axis=1)(inputs[14])) outputs.append(BatchNormalization(axis=2)(inputs[14])) outputs.append(BatchNormalization()(inputs[16])) # todo: check if TensorFlow >= 2.1 supports this # outputs.append(BatchNormalization(axis=1)(inputs[16])) # tensorflow.python.framework.errors_impl.InternalError: The CPU implementation of FusedBatchNorm only supports NHWC tensor format for now. outputs.append(BatchNormalization(axis=2)(inputs[16])) outputs.append(BatchNormalization(axis=3)(inputs[16])) outputs.append(BatchNormalization()(inputs[18])) outputs.append(BatchNormalization(axis=1)(inputs[18])) outputs.append(BatchNormalization(axis=2)(inputs[18])) outputs.append(BatchNormalization(axis=3)(inputs[18])) outputs.append(BatchNormalization(axis=4)(inputs[18])) outputs.append(BatchNormalization()(inputs[20])) outputs.append(BatchNormalization(axis=1)(inputs[20])) outputs.append(BatchNormalization(axis=2)(inputs[20])) outputs.append(BatchNormalization(axis=3)(inputs[20])) outputs.append(BatchNormalization(axis=4)(inputs[20])) outputs.append(BatchNormalization(axis=5)(inputs[20])) outputs.append(LayerNormalization()(inputs[11])) outputs.append(LayerNormalization()(inputs[10])) outputs.append(LayerNormalization()(inputs[26])) outputs.append(LayerNormalization()(inputs[24])) outputs.append(LayerNormalization()(inputs[0])) outputs.append(LayerNormalization(axis=1)(inputs[0])) outputs.append(LayerNormalization(axis=2)(inputs[0])) outputs.append(LayerNormalization(axis=3)(inputs[0])) outputs.append(LayerNormalization(axis=4)(inputs[0])) outputs.append(LayerNormalization(axis=5)(inputs[0])) outputs.append(LayerNormalization(axis=[1, 2])(inputs[0])) outputs.append(LayerNormalization(axis=[2, 3, 5])(inputs[0])) outputs.append(UnitNormalization()(inputs[11])) outputs.append(UnitNormalization()(inputs[10])) outputs.append(UnitNormalization()(inputs[26])) outputs.append(UnitNormalization()(inputs[24])) outputs.append(UnitNormalization()(inputs[0])) outputs.append(UnitNormalization(axis=1)(inputs[0])) outputs.append(UnitNormalization(axis=2)(inputs[0])) outputs.append(UnitNormalization(axis=3)(inputs[0])) outputs.append(UnitNormalization(axis=4)(inputs[0])) outputs.append(UnitNormalization(axis=5)(inputs[0])) outputs.append(UnitNormalization(axis=[1, 2])(inputs[0])) outputs.append(UnitNormalization(axis=[2, 3, 5])(inputs[0])) outputs.append(Dropout(0.5)(inputs[4])) outputs.append(ActivityRegularization(0.3, 0.4)(inputs[4])) outputs.append(ZeroPadding2D(2)(inputs[4])) outputs.append(ZeroPadding2D((2, 3))(inputs[4])) outputs.append(ZeroPadding2D(((1, 2), (3, 4)))(inputs[4])) outputs.append(Cropping2D(2)(inputs[4])) outputs.append(Cropping2D((2, 3))(inputs[4])) outputs.append(Cropping2D(((1, 2), (3, 4)))(inputs[4])) outputs.append(ZeroPadding3D(2)(inputs[2])) outputs.append(ZeroPadding3D((2, 3, 4))(inputs[2])) outputs.append(ZeroPadding3D(((1, 2), (3, 4), (5, 6)))(inputs[2])) outputs.append(Cropping3D(2)(inputs[2])) outputs.append(Cropping3D((2, 3, 4))(inputs[2])) outputs.append(Cropping3D(((1, 2), (3, 4), (2, 1)))(inputs[2])) outputs.append(Dense(3, use_bias=True)(inputs[0])) outputs.append(Dense(3, use_bias=True)(inputs[2])) outputs.append(Dense(3, use_bias=True)(inputs[4])) outputs.append(Dense(3, use_bias=True)(inputs[6])) outputs.append(Dense(3, use_bias=True)(inputs[8])) outputs.append(Dense(3, use_bias=True)(inputs[13])) outputs.append(Dense(3, use_bias=True)(inputs[14])) outputs.append(Dense(4, use_bias=False)(inputs[16])) outputs.append(Dense(4, use_bias=False, activation='tanh')(inputs[18])) outputs.append(Dense(4, use_bias=False)(inputs[20])) outputs.append(Reshape(((2 * 3 * 4 * 5 * 6),))(inputs[0])) outputs.append(Reshape((2, 3 * 4 * 5 * 6))(inputs[0])) outputs.append(Reshape((2, 3, 4 * 5 * 6))(inputs[0])) outputs.append(Reshape((2, 3, 4, 5 * 6))(inputs[0])) outputs.append(Reshape((2, 3, 4, 5, 6))(inputs[0])) outputs.append(Maximum()([inputs[0], inputs[1]])) outputs.append(Maximum()([inputs[2], inputs[3]])) outputs.append(Maximum()([inputs[4], inputs[5]])) outputs.append(Maximum()([inputs[6], inputs[7]])) outputs.append(Maximum()([inputs[8], inputs[9]])) outputs.append(Minimum()([inputs[0], inputs[1]])) outputs.append(Minimum()([inputs[2], inputs[3]])) outputs.append(Minimum()([inputs[4], inputs[5]])) outputs.append(Minimum()([inputs[6], inputs[7]])) outputs.append(Minimum()([inputs[8], inputs[9]])) # No longer works in TensorFlow 2.16, see: https://github.com/tensorflow/tensorflow/issues/65056 # for normalize in [True, False]: # outputs.append(Dot(axes=(1, 1), normalize=normalize)([inputs[8], inputs[9]])) # outputs.append(Dot(axes=(1, 1), normalize=normalize)([inputs[0], inputs[10]])) # outputs.append(Dot(axes=1, normalize=normalize)([inputs[0], inputs[10]])) # outputs.append(Dot(axes=(3, 1), normalize=normalize)([inputs[31], inputs[32]])) # outputs.append(Dot(axes=(2, 3), normalize=normalize)([inputs[31], inputs[32]])) # outputs.append(Dot(axes=(2, 3), normalize=normalize)([inputs[14], inputs[16]])) # outputs.append(Dot(axes=(3, 2), normalize=normalize)([inputs[24], inputs[26]])) outputs.append(Reshape((16,))(inputs[8])) outputs.append(Reshape((2, 8))(inputs[8])) outputs.append(Reshape((2, 2, 4))(inputs[8])) outputs.append(Reshape((2, 2, 2, 2))(inputs[8])) outputs.append(Reshape((2, 2, 1, 2, 2))(inputs[8])) outputs.append(RepeatVector(3)(inputs[8])) outputs.append(ReLU()(inputs[0])) for axis in [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5]: outputs.append(Concatenate(axis=axis)([inputs[0], inputs[1]])) for axis in [-4, -3, -2, -1, 1, 2, 3, 4]: outputs.append(Concatenate(axis=axis)([inputs[2], inputs[3]])) for axis in [-3, -2, -1, 1, 2, 3]: outputs.append(Concatenate(axis=axis)([inputs[4], inputs[5]])) for axis in [-2, -1, 1, 2]: outputs.append(Concatenate(axis=axis)([inputs[6], inputs[7]])) for axis in [-1, 1]: outputs.append(Concatenate(axis=axis)([inputs[8], inputs[9]])) for axis in [-1]: # [-1, 2] no longer supported in TensorFlow 2.16 outputs.append(Concatenate(axis=axis)([inputs[14], inputs[15]])) for axis in [-1, 3]: outputs.append(Concatenate(axis=axis)([inputs[16], inputs[17]])) # for axis in [-1, 4]: # outputs.append(Concatenate(axis=axis)([inputs[18], inputs[19]])) # no longer supported in TensorFlow 2.16 # for axis in [-1, 5]: # outputs.append(Concatenate(axis=axis)([inputs[20], inputs[21]])) # no longer supported in TensorFlow 2.16 outputs.append(UpSampling1D(size=2)(inputs[6])) # outputs.append(UpSampling1D(size=2)(inputs[8])) # ValueError: Input 0 of layer up_sampling1d_1 is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: [None, 16] outputs.append(Multiply()([inputs[10], inputs[11]])) outputs.append(Multiply()([inputs[11], inputs[10]])) outputs.append(Multiply()([inputs[11], inputs[13]])) outputs.append(Multiply()([inputs[10], inputs[11], inputs[12]])) outputs.append(Multiply()([inputs[11], inputs[12], inputs[13]])) outputs.append(Multiply()([inputs[14], inputs[16], inputs[18], inputs[20]])) outputs.append(Multiply()([inputs[14], inputs[16]])) outputs.append(Multiply()([inputs[16], inputs[18]])) outputs.append(Multiply()([inputs[18], inputs[20]])) outputs.append(Multiply()([inputs[30], inputs[33]])) outputs.append(Multiply()([inputs[34], inputs[0]])) outputs.append(Multiply()([inputs[35], inputs[2]])) outputs.append(Multiply()([inputs[36], inputs[4]])) outputs.append(Multiply()([inputs[37], inputs[6]])) outputs.append(Multiply()([inputs[0], inputs[38]])) outputs.append(Multiply()([inputs[0], inputs[39]])) outputs.append(Multiply()([inputs[0], inputs[40]])) outputs.append(Multiply()([inputs[0], inputs[41]])) outputs.append(Multiply()([inputs[0], inputs[42]])) outputs.append(Multiply()([inputs[43], inputs[44]])) outputs.append(Multiply()([inputs[44], inputs[45]])) outputs.append(Attention(use_scale=False, score_mode='dot')([inputs[49], inputs[50]])) outputs.append(Attention(use_scale=False, score_mode='dot')([inputs[49], inputs[50], inputs[51]])) outputs.append(Attention(use_scale=True, score_mode='dot')([inputs[49], inputs[50]])) outputs.append(Attention(use_scale=False, score_mode='concat')([inputs[49], inputs[50]])) outputs.append(AdditiveAttention(use_scale=False)([inputs[49], inputs[50]])) outputs.append(AdditiveAttention(use_scale=False)([inputs[49], inputs[50], inputs[51]])) outputs.append(AdditiveAttention(use_scale=True)([inputs[49], inputs[50]])) outputs.append(AdditiveAttention(use_scale=True)([inputs[49], inputs[50], inputs[51]])) outputs.append(MultiHeadAttention( num_heads=1, key_dim=1, value_dim=None, use_bias=False, output_shape=None, attention_axes=None)(inputs[49], inputs[50])) outputs.append(MultiHeadAttention( num_heads=1, key_dim=1, value_dim=None, use_bias=True, output_shape=None, attention_axes=None)(inputs[49], inputs[50])) outputs.append(MultiHeadAttention( num_heads=1, key_dim=2, value_dim=None, use_bias=False, output_shape=None, attention_axes=None)(inputs[49], inputs[50])) outputs.append(MultiHeadAttention( num_heads=1, key_dim=2, value_dim=None, use_bias=True, output_shape=None, attention_axes=None)(inputs[49], inputs[50])) outputs.append(MultiHeadAttention( num_heads=1, key_dim=1, value_dim=2, use_bias=False, output_shape=None, attention_axes=None)(inputs[49], inputs[50])) outputs.append(MultiHeadAttention( num_heads=1, key_dim=1, value_dim=2, use_bias=True, output_shape=None, attention_axes=None)(inputs[49], inputs[50])) outputs.append(MultiHeadAttention( num_heads=3, key_dim=1, value_dim=None, use_bias=False, output_shape=None, attention_axes=None)(inputs[49], inputs[50])) outputs.append(MultiHeadAttention( num_heads=3, key_dim=1, value_dim=None, use_bias=True, output_shape=None, attention_axes=None)(inputs[49], inputs[50])) outputs.append(MultiHeadAttention( num_heads=1, key_dim=1, value_dim=None, use_bias=False, output_shape=None, attention_axes=None)(inputs[49], inputs[50], inputs[51])) outputs.append(MultiHeadAttention( num_heads=2, key_dim=3, value_dim=5, use_bias=False, output_shape=None, attention_axes=None)(inputs[49], inputs[50], inputs[51])) outputs.append(MultiHeadAttention( num_heads=2, key_dim=3, value_dim=5, use_bias=True, output_shape=None, attention_axes=None)(inputs[49], inputs[50], inputs[51])) shared_conv = Conv2D(1, (1, 1), padding='valid', name='shared_conv', activation='relu') up_scale_2 = UpSampling2D((2, 2)) x1 = shared_conv(up_scale_2(inputs[23])) # (1, 8, 8) x2 = shared_conv(up_scale_2(inputs[24])) # (1, 8, 8) x3 = Conv2D(1, (1, 1), padding='valid')(up_scale_2(inputs[24])) # (1, 8, 8) x = Concatenate()([x1, x2, x3]) # (3, 8, 8) outputs.append(x) x = Conv2D(3, (1, 1), padding='same', use_bias=False)(x) # (3, 8, 8) outputs.append(x) x = Dropout(0.5)(x) outputs.append(x) x = Concatenate()([ MaxPooling2D((2, 2))(x), AveragePooling2D((2, 2))(x)]) # (6, 4, 4) outputs.append(x) x = Flatten()(x) # (1, 1, 96) x = Dense(4, use_bias=False)(x) outputs.append(x) x = Dense(3)(x) # (1, 1, 3) outputs.append(x) outputs.append(Add()([inputs[26], inputs[30], inputs[30]])) outputs.append(Subtract()([inputs[26], inputs[30]])) outputs.append(Multiply()([inputs[26], inputs[30], inputs[30]])) outputs.append(Average()([inputs[26], inputs[30], inputs[30]])) outputs.append(Maximum()([inputs[26], inputs[30], inputs[30]])) outputs.append(Concatenate()([inputs[26], inputs[30], inputs[30]])) # TensorFlow 2.16 no longer puts # "inbound_nodes": [] # for such nested models. # todo: Check if the situation resolved with later versions. if False: intermediate_input_shape = (3,) intermediate_in = Input(intermediate_input_shape) intermediate_x = intermediate_in intermediate_x = Dense(8)(intermediate_x) intermediate_x = Dense(5, name='duplicate_layer_name')(intermediate_x) intermediate_model = Model( inputs=[intermediate_in], outputs=[intermediate_x], name='intermediate_model') intermediate_model.compile(loss='mse', optimizer='nadam') x = intermediate_model(x)[0] # (1, 1, 5) intermediate_model_2 = Sequential(name="intermediate_model_2") intermediate_model_2.add(Dense(7, input_shape=(5,))) intermediate_model_2.add(Dense(5, name='duplicate_layer_name')) intermediate_model_2.compile(optimizer='rmsprop', loss='categorical_crossentropy') x = intermediate_model_2(x) # (1, 1, 5) intermediate_model_3_nested = Sequential(name="intermediate_model_3_nested") intermediate_model_3_nested.add(Dense(7, input_shape=(6,))) intermediate_model_3_nested.compile(optimizer='rmsprop', loss='categorical_crossentropy') intermediate_model_3 = Sequential(name="intermediate_model_3") intermediate_model_3.add(Dense(6, input_shape=(5,))) intermediate_model_3.add(intermediate_model_3_nested) intermediate_model_3.add(Dense(8)) intermediate_model_3.compile(optimizer='rmsprop', loss='categorical_crossentropy') x = intermediate_model_3(x) # (1, 1, 8) x = Dense(3)(x) # (1, 1, 3) shared_activation = Activation('tanh') outputs = outputs + [ Activation('tanh')(inputs[25]), Activation('hard_sigmoid')(inputs[25]), Activation('selu')(inputs[25]), Activation('sigmoid')(inputs[25]), Activation('softplus')(inputs[25]), Activation('softmax')(inputs[25]), Activation('relu')(inputs[25]), Activation('relu6')(inputs[25]), Activation('swish')(inputs[25]), Activation('exponential')(inputs[25]), Activation('gelu')(inputs[25]), Activation('softsign')(inputs[25]), LeakyReLU(negative_slope=0.5)(inputs[25]), ReLU()(inputs[25]), ReLU(max_value=0.4, negative_slope=1.1, threshold=0.3)(inputs[25]), ELU()(inputs[25]), PReLU()(inputs[24]), PReLU()(inputs[25]), PReLU()(inputs[26]), shared_activation(inputs[25]), Activation('linear')(inputs[26]), Activation('linear')(inputs[23]), x, shared_activation(x), ] model = Model(inputs=inputs, outputs=outputs, name='test_model_exhaustive') model.compile(loss='mse', optimizer='nadam') # fit to dummy data training_data_size = 2 data_in = generate_input_data(training_data_size, input_shapes) initial_data_out = model.predict(data_in) data_out = generate_output_data(training_data_size, initial_data_out) model.fit(data_in, data_out, epochs=10) return model def get_test_model_embedding() -> Model: """Returns a minimalistic test model for the Embedding and CategoryEncoding layers.""" input_dims = [ 1023, # maximum integer value in input data 255, 15, ] input_shapes: list[tuple[int, ...]] = [ (100,), # must be single-element tuple (for sequence length) (1000,), (1,), ] assert len(input_dims) == len(input_shapes) output_dims = [8, 3] # embedding dimension inputs = [Input(shape=s) for s in input_shapes] outputs = [] for k in range(2): embedding = Embedding(input_dim=input_dims[k], output_dim=output_dims[k])(inputs[k]) outputs.append(embedding) outputs.append(CategoryEncoding(1024, output_mode='multi_hot', sparse=False)(inputs[0])) # No longer working since TF 2.16: https://github.com/tensorflow/tensorflow/issues/65390 # Error: Value passed to parameter 'values' has DataType float32 not in list of allowed values: int32, int64 # outputs.append(CategoryEncoding(1024, output_mode='count', sparse=False)(inputs[0])) # outputs.append(CategoryEncoding(16, output_mode='one_hot', sparse=False)(inputs[2])) # Error: Value passed to parameter 'values' has DataType float32 not in list of allowed values: int32, int64 # outputs.append(CategoryEncoding(1023, output_mode='multi_hot', sparse=True)(inputs[0])) model = Model(inputs=inputs, outputs=outputs, name='test_model_embedding') model.compile(loss='mse', optimizer='adam') # fit to dummy data training_data_size = 2 data_in = generate_integer_input_data(training_data_size, 0, input_dims, input_shapes) initial_data_out = model.predict(data_in) data_out = generate_output_data(training_data_size, initial_data_out) model.fit(data_in, data_out, epochs=1) return model def get_test_model_variable() -> Model: """Returns a model with variably shaped input tensors.""" input_shapes = [ (None, None, 1), (None, None, 3), (None, 4), ] inputs = [Input(shape=s) for s in input_shapes] outputs = [] # same as axis=-1 outputs.append(Concatenate()([inputs[0], inputs[1]])) outputs.append(Conv2D(8, (3, 3), padding='same', activation='elu')(inputs[0])) outputs.append(Conv2D(8, (3, 3), padding='same', activation='relu')(inputs[1])) outputs.append(GlobalMaxPooling2D()(inputs[0])) outputs.append(Reshape((2, -1))(inputs[2])) outputs.append(Reshape((-1, 2))(inputs[2])) outputs.append(MaxPooling2D()(inputs[1])) outputs.append(AveragePooling1D(2)(inputs[2])) outputs.append(PReLU(shared_axes=[1, 2])(inputs[0])) outputs.append(PReLU(shared_axes=[1, 2])(inputs[1])) outputs.append(PReLU(shared_axes=[1, 2, 3])(inputs[1])) outputs.append(PReLU(shared_axes=[1])(inputs[2])) model = Model(inputs=inputs, outputs=outputs, name='test_model_variable') model.compile(loss='mse', optimizer='nadam') # fit to dummy data training_data_size = 2 data_in = generate_input_data(training_data_size, input_shapes) initial_data_out = model.predict(data_in) data_out = generate_output_data(training_data_size, initial_data_out) model.fit(data_in, data_out, epochs=10) return model def get_test_model_autoencoder() -> Model: """Returns a minimal autoencoder test model.""" input_img = Input(shape=(1,), name='input_img') encoded = Identity()(input_img) # Since it's about testing node connections, this suffices. encoder = Model(input_img, encoded, name="encoder") input_encoded = Input(shape=(1,), name='input_encoded') decoded = Identity()(input_encoded) decoder = Model(input_encoded, decoded, name="decoder") autoencoder_input = Input(shape=(1,), name='input_autoencoder') x = encoder(autoencoder_input) autoencodedanddecoded = decoder(x) autoencoder = Model(inputs=autoencoder_input, outputs=autoencodedanddecoded, name="autoencoder") autoencoder.compile(optimizer='sgd', loss='mse') return autoencoder def get_test_model_sequential() -> Model: """Returns a typical (VGG-like) sequential test model.""" model = Sequential() model.add(Conv2D(8, (3, 3), activation='relu', input_shape=(32, 32, 3))) model.add(Conv2D(8, (3, 3), activation='relu')) model.add(Permute((3, 1, 2))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Permute((2, 3, 1))) model.add(Dropout(0.25)) model.add(Conv2D(16, (3, 3), activation='elu')) model.add(Conv2D(16, (3, 3))) model.add(ELU()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(64, activation='sigmoid')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd') # fit to dummy data training_data_size = 2 data_in = [np.random.random(size=(training_data_size, 32, 32, 3))] data_out = [np.random.random(size=(training_data_size, 10))] model.fit(data_in, data_out, epochs=10) return model def main() -> None: """Generate different test models and save them to the given directory.""" if len(sys.argv) != 3: print('usage: [model name] [destination file path]', flush=True) sys.exit(1) else: model_name = sys.argv[1] dest_path = sys.argv[2] get_model_functions = { 'exhaustive': get_test_model_exhaustive, 'embedding': get_test_model_embedding, 'variable': get_test_model_variable, 'autoencoder': get_test_model_autoencoder, 'sequential': get_test_model_sequential, } if not model_name in get_model_functions: print('unknown model name: ', model_name) sys.exit(2) np.random.seed(0) model_func = get_model_functions[model_name] model = model_func() model.save(dest_path) # Make sure models can be loaded again, # see https://github.com/fchollet/keras/issues/7682 model = load_model(dest_path) model.summary() # plot_model(model, to_file= str(model_name) + '.png', show_shapes=True, show_layer_names=True) if __name__ == "__main__": main() frugally-deep-0.17.1/keras_export/save_application_examples.py000077500000000000000000000102371476372554500246670ustar00rootroot00000000000000#!/usr/bin/env python3 """Save application models mentioned in Keras documentation """ import keras from keras.models import Model import convert_model __author__ = "Tobias Hermann" __copyright__ = "Copyright 2017, Tobias Hermann" __license__ = "MIT" __maintainer__ = "Tobias Hermann, https://github.com/Dobiasd/frugally-deep" __email__ = "editgym@gmail.com" def save_model(file_name_base: str, model: Model) -> None: """Save and convert Keras model""" keras_file = f'{file_name_base}.keras' fdeep_file = f'{file_name_base}.json' print(f'Saving {keras_file}') model.save(keras_file) print(f'Converting {keras_file} to {fdeep_file}.') convert_model.convert(keras_file, fdeep_file) print(f'Conversion of model {keras_file} to {fdeep_file} done.') def main() -> None: """Save famous example models in Keras-h5 and fdeep-json format.""" print('Saving application examples') # save_model('convnextbase', keras.applications.convnext.ConvNeXtBase()) # custom object LayerScale # save_model('convnextlarge', keras.applications.convnext.ConvNeXtLarge()) # custom object LayerScale # save_model('convnextsmall', keras.applications.convnext.ConvNeXtSmall()) # custom object LayerScale # save_model('convnexttiny', keras.applications.convnext.ConvNeXtTiny()) # custom object LayerScale # save_model('convnextxlarge', keras.applications.convnext.ConvNeXtXLarge()) # custom object LayerScale save_model('densenet121', keras.applications.densenet.DenseNet121()) save_model('densenet169', keras.applications.densenet.DenseNet169()) save_model('densenet201', keras.applications.densenet.DenseNet201()) save_model('efficientnetb0', keras.applications.efficientnet.EfficientNetB0(weights=None)) save_model('efficientnetb1', keras.applications.efficientnet.EfficientNetB1(weights=None)) save_model('efficientnetb2', keras.applications.efficientnet.EfficientNetB2(weights=None)) save_model('efficientnetb3', keras.applications.efficientnet.EfficientNetB3(weights=None)) save_model('efficientnetb4', keras.applications.efficientnet.EfficientNetB4(weights=None)) save_model('efficientnetb5', keras.applications.efficientnet.EfficientNetB5(weights=None)) save_model('efficientnetb6', keras.applications.efficientnet.EfficientNetB6(weights=None)) save_model('efficientnetb7', keras.applications.efficientnet.EfficientNetB7(weights=None)) save_model('efficientnetv2b0', keras.applications.efficientnet_v2.EfficientNetV2B0()) save_model('efficientnetv2b1', keras.applications.efficientnet_v2.EfficientNetV2B1()) save_model('efficientnetv2b2', keras.applications.efficientnet_v2.EfficientNetV2B2()) save_model('efficientnetv2b3', keras.applications.efficientnet_v2.EfficientNetV2B3()) save_model('efficientnetv2l', keras.applications.efficientnet_v2.EfficientNetV2L()) save_model('efficientnetv2m', keras.applications.efficientnet_v2.EfficientNetV2M()) save_model('efficientnetv2s', keras.applications.efficientnet_v2.EfficientNetV2S()) # save_model('inceptionresnetv2', keras.applications.inception_resnet_v2.InceptionResNetV2(input_shape=(299, 299, 3))) # CustomScaleLayer save_model('inceptionv3', keras.applications.inception_v3.InceptionV3(input_shape=(299, 299, 3))) save_model('mobilenet', keras.applications.mobilenet.MobileNet()) save_model('mobilenetv2', keras.applications.mobilenet_v2.MobileNetV2()) save_model('nasnetlarge', keras.applications.nasnet.NASNetLarge(input_shape=(331, 331, 3))) save_model('nasnetmobile', keras.applications.nasnet.NASNetMobile(input_shape=(224, 224, 3))) save_model('resnet101', keras.applications.ResNet101()) save_model('resnet101v2', keras.applications.ResNet101V2()) save_model('resnet152', keras.applications.ResNet152()) save_model('resnet152v2', keras.applications.ResNet152V2()) save_model('resnet50', keras.applications.ResNet50()) save_model('resnet50v2', keras.applications.ResNet50V2()) save_model('vgg16', keras.applications.vgg16.VGG16()) save_model('vgg19', keras.applications.vgg19.VGG19()) save_model('xception', keras.applications.xception.Xception(input_shape=(299, 299, 3))) if __name__ == "__main__": main() frugally-deep-0.17.1/logo/000077500000000000000000000000001476372554500153225ustar00rootroot00000000000000frugally-deep-0.17.1/logo/fdeep.png000066400000000000000000001064151476372554500171220ustar00rootroot00000000000000PNG  IHDR@ZmbKGD pHYs  tIME '5etEXtCommentCreated with GIMPW IDATxw|\ŹM]I&7ܰ1`m\o$Bz{{{/{ $4P&6{ofuj%+9=glz>̜ggy!((((((((" p(((((((((¡" p((((((((¡" p(((((((($ݪhlld۶mJ 't\"y[ `ɒ%g!?|<Ov G[[˖-S0q9dHهQ G>e¡|6?RꔊBڡ" p(((((((((¡}ؕ҅C]n GtElDdtk:^[rW :)EPPPPPP0n8>Kn7pB*iխBltE4V5=L˛GRlaAY=6] UaC[;,)=䝆`fI" C^6E,bFq<~%lE8ZCNV6 '$mikc{9:|Yހѣ`GX V7 GQb~Yv!Ug(¡0@IVSuiV)p(((((  Y:,ڴ;P@ |[ة:I|?lgeU[#̙exaip(((((6۝m FhF:[vXu" unV;Z8ۮ:S\C}WkZ#y.:Jc^i# T*¡mtEllNGs{8A%SCAAAA![`Ѡ~ǎw0&p(((((d G z zUtE8ҍ'+GڐrvrfY=N&]wc_ mz)Qw(3nS"=￉=Li=+0ن\LsEf)(#a;ԏVd# ojDs,\hj̪ܨR6SZ80Y~uDƲ,mIX*"7þv%fvHXMP*d:Sq$7|f bs{)oՏ%R\U?P^׺u(-+BfOQTfVY d O QSjPvz[xR =bV;% tmt#QqLLu$٣ǃŸ iSOm;BV4[7Hfml(嬲z!X˧WP*WK:&[zoZm qޭRˡP m,ndUp!.B2b)t$'Ê<3,ʾ ģp:5{*XbިOC`9]C$>K$1I1T&A$zelVuMm>v y9hBDuIODD"].1&Mi8mMC6ZKcnԒVeȆ#oL$T+fY?dOmdK: VַU 9G.;LwHG p94|#J]*uQQv(phh.z"4vPM}km!z":RF+*a.UT%*id;{⓷r0cKq4D J" @4:p=C(MuIk DDx LŜ8n$%:U`vv`>&胙~&}s{)gSasXl"cOF3>fsfrX'b淊h(iឡ!Ԧ C:Mc"?o gL*aZJS+=|tDpO4ڏ.%n6HK+ 2 GNZ8ʈj1V+HLc:e,A3Cr [o QvoH;BN+ӆ1edv36;N*`k>i`V4EAr73Ԣ6eᰚx$`4@=ʱ0Qfo$/)Tµ+B.#lgpwT4AWHÉ<\{8fM]GcL jW+ϭ8ā.*8zH}paE8,aڤ?_H"eotm6ŽYgN l%֥, CA7Z%5ti sѩScq+{)>L| ہe-w,><9Ì.T 埪ވcIĬUlʹ'YcE+P,%s4A'BK ė{Tqp'p%'". }Pk`ζ2_+LddĻ&i"a?EJQlY`a @g·%{ &xsOptP M%?lNg8w7.rp \2D('QdQ9"@G_6AE%RfGO;{H lNThK|Z &k롼+G?Q'כgQ\p{v`ED1{3Sf%_li,+( J?&Δ8wFJKW_Snh{ICqJD}` JiS9y^qP_څmi&iDx ll`FqpUf 7:+̓ M@mȗĨU;ݶ@ ) Ao :;mnN[īrdhWkbh/-yoRb4ʊ,ݰ_`8XLp]&NH +|C3Q&:"-z RQ0S7**b:l(ǺQ>`cw\}?D'rקQԕdʊ<oqvE8YeۈuM ʗYLXBTmmY&Ɉj$R.WԲa؊LL)(š.ϠxM@;î0ң:itGyuDtBHnvʫS*Z!>^cda6ڨu>o]]MJ/'= UPtiQswOV|dΝVFmk6-wg])p)r-kf{X:t))+r_|;G1}-li51`"KwV ɯ&4AUSWnmW" v,dtRy x{fm mp)DАP$fp ԌҲgCfX6f;LN8r#.;:h2P* Ɏ|l:"82͙!}UEq ﰣ)}"f# ya)TUlF#ʣȆ6YYڗVT M;&qIE>rm` ٰ 5덳pH|_yiu$uSHGgw%vB6cٞ/@e5;QցaP45A#SiFX[Q5")ߐMKK ~@'===D"l6NǍ르5h-B:l`o}'L<65ӄ3? ܖ:E.BӋVOdmnH! K=S˪FD9Əlk|>'\dI,eՈPO={PuZZ[[*QY9q1i$)WЄ?^}#K\jPENtn}zc SNsKfSȢu[ظ@hy[adaP$"6"fsiwa={6[g rH$4i5qFmNgg%e֭p >}:3fΠXi4%÷ͩ㊔02i^hO/=D7yr-æ}cFp:~RZ#QR\#֐DJ>Q=NjJa&S<ʲQ0}d#fKao\l<c 6ld횵dݺX#<ey%|RI#<\1bƗf}h u|B;:yo 3F[8dHje0%*2€s8eR!dۍζfeJ & ˗`݇V `%,^gy睫i&O\;C #KW{>0%nҏ8x0{D# %l*gLT;G#8Z 2&c7{oXrk֬,Mzj>^11kr?f*eS=9ןCJM(-vtNl£Or\FYgSedrKF9> ]x!ekHHb+8pߍ;ѕ92|>߀ϷL]]=55>xp]Wa5\v7Ni-hy)/r*adŅv~tx[YCqT"75KaȇF[8N̕nuփd K1d ,%fV!dWHʑo[+/ 4fLz"||>&NxZKK ;wd˖-w$,㕗_eܸ\ve9;ue׸bA9/7 quJttuțho+BpH(}#kI|L{=\}sL`ApC+G8l/vod b+ܿV}fgO>d͛#̛?y5kٲe뀿:w7y5J{ 8 7R1\~G)+SpW=j2"900"3c # |^W^lY1v,$ݧPd reX#WG}IJ>i>}ş6Ory0kRaB4@ߞbJ(<.W5;ٴ{?| IѸlH ʰšӟdB{/礗^ltoƪ$FGYfǪ ;:BiMov3N /j;g.c-q[2j/ɉ'`@.7*%mZm%+uB.thz,us;_N."ѱ4r߽;+7C&rOx͟OՂcŰ};L}IFpz̞M1(FOO O0Sm}I uڴ穯{rׯqL/"_ƍsN::ڹ+\#M}nB+]FUORA}[846&1߳KBIwX&#}NN[ggcH1e̻MC:W>pƗFY8̮mȤm€E@]o|R_uCQdC`a2cLܧ+co'+3#xgill{383s{z=b >\;>Hmm<,jv4M Λ^mn橥լӊ |.@x Td"d6 ,ftYA֝OM<S/J5%V8g #;T^wz2Ѿh]|q(ɜ1Xm-(?*V6l##a&m<ׯÇsYgqyboll_+9'ph&`SUwcumTx*-@Jydo$爤0 ",D Z7QYZ-L`oV,}V_ĤaȈȆQ/ 5$"MԑJx@Yv\c)hyU_|1ŽӁd`D|*ds.s2ޙh^ۀIlO'ouuun?!/F ƍ7$nZz󭬴-W w9Krk.ڻ;B0]Cץ% RFɊˮ1)%?_kLcG蘶H5mT~qߡ(P<V i9bjD&o wah~6^y%EgX 9a 2}/}Ȫ&"[X~=;8ru$1KTfѢhkiI̞3s9; UӸ?GLn]Yf>{BY5u-/dυ.I9{jzT%|.=c$]4.m֎ru,z+%+1$,FD IDAT-`ysݪK !rd'xx=fzaJLyzdhT|m4'w&%IFp,yv'E6ʕx饗kb3[к5֯^C>!35˜.]66`QߨTB:_cVhapw& w&cC*#aw}Y&VD;F9!%)DJ t IRRr/~;ﴌl=0۾#dChWw5i&?3˜)b_sqzvIg֮YI6c1[Mwcwi:ps{﹗H$kn<9C3/;cDN͛:yx\6CI҅.씸|ul:aYX <1q젶O#x , uV̻$.lWߧN6zY|͝wd} Wɋb5'"qkH44X ٌN.i/?.;:lzBlF}ݟQJN?鮿+sGX|k<Gzrj/%L֧dKSJD-=ŷHB1 4hHbD&ߟ껛JG6&ZC6onl=1XV'ǘ&y'# 5Jk&p{W2NZ|6þ38\˝RUUعQ~UlÄô@>9~{GHn_*<jhNDs#݄s4FXL^âM&47qϞ@W:ì"Hu E4X}B%,Qd# %{W5?)C6R| SCxcM#t8!AH~Z^j'+G!`طqһg>İ%+xWc^;Ot@B]9(qofH9cS2lhQ9іGNk"nn^Vj5L6<Ĺẘ݈âY]1סK8X3R%%fYM6N=7D7X9vN]|65?-lHgu5toAs؎T"hnpm[X҉KZ 'y*~&XrUl kMP>%^ұ+"gu'իWvzD2/mo 3yxnZ6bY:&t/{r"Y0e>5wWn>QyҰ%"e.'Sq8 A!%1@R&~S{/Q>>ל`8 losSC:a=\&CvC1[7P~9/Uq @IU_26X&c>[91~ L81u?{}~a97Ȃ=EK)1T GZi3[/8F *W_;Ђw+[IdNVdOH ;BhNc{.lAk#A8ŀ_w5c/Xti#85kQY ^SN=%e˖lZZ;C|prze Q+n;2AOGQrޝ|T G.QC‘Q̊; ^qʣ2,vlix9 96@2I$?Zd)csٓ5nhyWڣ>(~B :|TօZLb,?ZY*t c^hGr2@wHg Ym=lښ;jՊʽ ➿oi8lOCs +•!'ܷG6f/@6}3><_ũK΢3WjZOsc{ZB dV9@ 5]%5FIM'DD† c^?^N| yƼq㦌|sFE+S~h<δ>}{7').kJE8&FW\r!&Rr… ַl[.wߍl`.Bh,O4lre"2ӵ vcCRtC#V`#@+L^wC2&Y::i' \`BV.1.֍VPThjPT,D])ppZw%{`O E8#YSMvfD RN; !h/DŕWZfX?{6-#w:UaZ^hG%. u__K+ji0]9d. ?twK餶6qO>唜&Wv_ GH'O?泥`S{:5x].%B(/f^] 6\$-JdHfҡPl}5/ܲ?>L:ׯa@F+D4>BDzNp[k|TGƏf e}v3 $jKgoEoK!۰#;7|aL K&pg Ж$l"eEZ `P>![CprJ%UGTWV[9JɼߦK_5&йjTFC~FnBDͽ\mں"qqoٲk✝n^p 7 =R7?%ZIů B+grp_U&Z* ..S_z)sSP"D 8H0ڨ~AF_^kK`~u$$` t2!p{;B6Nxf4o.䳖+$|sP 5P<ѥn(zY6>ڼn(C<͠ic ey9NOn:+Y;" / yEG}FWfNwJO? N/ !q=]Z!rY3 WIǮkw)cB9ASZdeq&E2ym#hw5|D0bRFU҂tB,.,DTWp!mWJWғdf,CȁJl4Lpk7Yt:7҈l7o01)5Mdӌ4kE"fTUu 3@p"1=u#ayu_%ikNL3"v$NzFOu5KƎh]g g.Ε+c=8aZ)aIUA| }]C8BK5,L A]qp9xس}'p~|zX-! H0J2D^AŬB|@pqGkz9}  AQ+;+|Ԭ9|g{Ȥ*tp _^nRǏy[24FRb(/+e嚏Xr("J}H]PjBQ)C${pJu^soì3/ͨ?1y+G砟d$afz:5k2t u)GӖ/%>承 l# ^QFW8RK6112$+]GD缙cyƬ{6z%7jFf"#A" F?vg1\ d{*N.5lf{3^?%Jxl},I)Ŀ' 8by-A8!FRaOͷFu \.<7o-Z–%P\n$ᘔJVw =ѥ%b/{`T "FP_PGq_1ce'vlV=oFA'TpV-. [_!kD;a\pfdH?;B=/( w'Px -WVP pD|l)3mՀUX9|l ]9]8vEOqј ;ݴbT ]S ND I aN殬]qPTh7su{<< t;PX-c|$%hZto66Td_ @J |$)Kl_2:q\m`Sv_zucnޟ<9+FibM)[Ľ HNjǕQQ>BeH<<5-dP US0d;6M$ AqdݪJ{[^'o E-3 ѝˢ$xC V ;i6ҧ,5]۶Q8m5~$f"5SkcF:Əcm]oiit榸 ӆF*K]}~*Jٶc7?.8cv'ha),&z,]cw?un& Y‘( `rL2ݖDչXrI|_ewCӤU*9-$,1dڑi9iҤwllLaB6iJvfL[he簨KIiq<ӴwI mP1*JD}Vv~%=㏫&XӚ:Mޖj +j1 xִ@1~y!xg&b=Ҵ%A4VRKdăKbƍyD6&llp,<7v{^ic쑎+T~I])t(..╷O=Eq'AEcx4TCSmZ;l_QuD u(aLyLSY()ݨ՚YۗO%UkGnuthXݬ:f)8O?k mmmClCf:dzs9#hưr>޼|}uз}fb qE8)N$kC(P;|Љf= [믷[~SoV1sFk׬o3drFTƼ> &Y5mj(E{au:r8耚=v C+I"#12^>"?L /XWY|B+~FXfєcačOtZ glDXlq_ym0> P&{ {h]ږ0k$=gv-z/dA(;9cΠ}no~ܐv|Pv[ a|-;YGt\Lr IDAT7O9m7+HX>I*m9_ pvCA!Ljo^u/nH{?5BX&#032s9;͛Ҧ, ϖͱ>A;! `!Ne!a UlX닢ZWѫ)1Y*0`0#\\g7͹rDst$t Խ%уB ZgѣQuڜh?r&nCn?}w~.4#D4die?ʹ{.P$F aFHL,9iB?g~Yկ2[NT:z+Ǽ먋( @]CdclF]]}ޥ}yhvq/c~`Nn ~|T}و.Yc߯gb*1T Q R,#_Xu))9azuܪVl[?;Ƽpncw-@{0ela_{h78{e^][3mnCnʭ ZB 7Un_:#b;Ɩyh mH8Z!Rt$2g0VA*> :ʜxq%ڮ dn3ixK{S*|HQS.?g00iFHNCƺыƫT>3w2 U¦A{gN\&%gL@{q6KOTG8Rd: *iQEQrb?2o|#C#p۹dl~; C} >XFNMGtd @SOQ`TNv:3Vs\Yf1mڴMtcF}nno2" q߽%SȬYYaܧUq?@.a_=t- DgX|?2RN1_^j@waI"{Ff*LS9B'ǖuOȯ}-fs+[HllӉ^F^$|A}]=`mQ#s zXڗʣsд׬)UP=QMn D>czV74#6N`;aѯԬ `Y8A p'!Ο&=o @c Kel9F)#y`XCfBu^@1"2Jl+"[Ȋ+r~Y|9 >_+P38zgL?=d?sO@8CC=_{0z5LU#nƷ.8Ld˝1}|E//YDǙt]-M+v$ 9ڄ.R\ Y}V's彊 >ǼWyv] '2HDq~W/R[;0ԇkױu6x=9v瞏hˆ"qK9wZn=:;c<:hm`8Hv=vQ9 +8i0FHy[ʼo[b`ٶZPYIHNXaIK"+ӧs֭G>TqAk+| q\m)}:䟫+o͎;cw wlݺﺇU+We+WIH6> )`XEUjd6ĘGtC?Fi}ZP99fCPPe'ȐWpXlE#VH'WϚ鴤g| K C46Z*o#edgoSť]+ a횵]1cFS9z4#Ft߷L}]=555TW27/eرCޢa(/+aʵZWs_:)#}(d[7+ckH v;CmR()H8zO"V*1cQILO1Xc %JÆqAuޱEc<}< LXBR!r-7zjVZcwՇ⒆>=]f-8sH1h!(/gA9"@"#H0u'^;?*9X'\[ZÑP:P:(brCkv_~gRbmMPf鳷_=WVƧv=Ե$z~Nf%ϟͷܹg\KKٳ9/_FaAQrttD摈jggg"fY(Jxdwpt]JzÕC!,Sxv@톾W<+pݬ޾>rx&(9vyN&F~@sЦp ,ϲ#Dkf-3v킮]@V#==Yq-<~,n$M6嫈 QrŦﶢT ݽlV 6WKGy@k*}>@-n){\l9X}% Ε\[+ }u 7bh;*W.u老PH< ?#\UfM666Gxxx屒<~y(,,Dqq14Jvvvpppa+%sC˲prtZ~ځ[7oqsllosLdZtsqi@xTT΋DDuRc˖TxpPeK-rn~8sᓗ)r ѕ\ >WW Qׯmm*@KW0svC;hԨ5jD3HAV#&I+jDx|p'GSr_g06j J2,MV[*]lFJ|J GJUNvZʸfRömC<+Cynz"84;o _ý|䰥0pGԉ2έ id\a&JUEIgcY2' ߭|V<kg/Vg#|j8*ݑff;3`2y.VϨ0:R"<0&6*_\ &]4Y /y_|W)Zĺ\RC o,}i/jl8&N[C#`~cj렁²,6n o!T|]"߿3?m0a>u#l5ݤ+9e?|k-90lc7  n2eynHAr`3 =\ϝ;۰0eN"NACP]O"rw1jR 69gU6ز˾dLdRs}RnߡQ2ʴ|#x=DІȡR.hRY+? QH4QZ Ƌ4ǾPXOw+>>[fP݌ΖDc͘dZҟ1~7eQJt-PC{8NzBBapg2: jd躗ܸ4R ]·]YRΌ aewa4(%l8;AT્cǎAXvyWϡِixÇ|/MvOiI*CQ[=ȼKZ\[g0 ̚uq1Bz "efTMin傋XRŶ#t4ݑWP#D E/Qvvo@ea㝝UV,l5sнuupfU@/AYYdeq7hx4*;ՠ @CD#߄IJ ZĽ>b}>aGvՕnM'n SSCnυl8Eoz@8| e_j  [%w1rh;l]1^k3*s6cP4Dc_, h4䆭/JE!CTn[ؐC–wmPנϡʿU3jt)}uyVǶXVB鯃?,RCne_lyR m'Doi9ۓ2@tu&Ô-= F60A!dғIka^B:LhrH2[F!A$|0oYiF6Px23ԛ>}Wv|A"PX27341YQ>xTL:-[')eL^\ m)SbUU,h1qvDƏǮ>`)ɻjG\~Xadz糥՘,R==I 9 j Lol4s(Vփx9f l[瀐MY]wsYQ{!5J(#cZ\2!1Cz[>eJ%$$ 71:מ=ާXL'`1Lc|˱< l_z憠q+V;gj ` x#)qj3_nQ%cߑzq˄ Jzl$<;f'pqa .]Ehr#@AТE ,>}ФyxȫYA>=Ȋ]g<ɮOmUk˴pI!^5P`hZUqb׾d@hYMTkcrl'_c1 !v򮙟)&|&@>e[l(lTEmqo>ϯv'g" m5Dܮ g͕eێ]Ŷ:cvٳmM;<փS8v:j-V.d@@g,;Xc!(_?ɛ!_J(s7&Rc\ptvLYo94 RNj8_JK"AmOg뮸Z 46 $\G`|njG&/Xϋquv*.Bn3@ 4rK~A^/(l]32_gr5\-TiodžP˪˾APƕ11Ȉ~/ +Exz j A(ͮWk2LVaP@8UY)vv= lP) 5VGyExn?Ɲp#<ʇYi-Wne+l69wE f/p<|^ńS>ޠYAW~jL ۵C>W߾`YOH7vYa5Hck_}F#GBaW)Av]` .Yρa۰!@[\G#k2<=~^#|j<kd.^eVhqQ.ƌp ]gqW,QrnχS*E\yq ;#=塛XPk7b,YJԀZS& ԌUPJQ*0v6` TvmՏ9ލƔƜE>/KE%Ru ьNEuޣ).FB "~5#WV8"6l@)e|oH]!kx>Oڹ/-[puLå[7tbVT%9"06lS1QhDJIg=LiPX s4)и)\[T( M]h4~0PC Q4<ᣂo^))z幦ϛ@m[ܟV3,m˱MoT$Uk [~έ[ùuʿ31жYjDb^H/5bۆ͟5=_/Q'0J%ߺX.NG5T>ɓ駼5 ^[ST505nm48֨5D|H<%bsq EJo LE!ׁBuӢHذ (CBi9"Du݃r{e x3h,B_~ JbEm|:{62=h샂*?~O={t;;>z5\ʷ}޿[6!wֽ;OV V eusJ''43'µ[@tP$}q .;ùcGظŷ}X'lkJ3l!'iqU ~T8*P&_0戝K"ÓY5fE Y&<\/暁5q7SWXc 諧6`dhGFD4gq>Lo ,5>I aFLQVM f: !BKYQʩKSJ_# r\W}t j@ޫAAQD+aC*f!W~ DW14>82z\tX e0UvJH=xR A7-nGabir OJ`Y1cP3f #`*юL+dkᅯ|9#!:ڷדax:ͱhh/)TdK "2Ô{tXӧ/&ȃ= > 0J J呖8BVȖzt6c+hl;uq싑|!j4 uf&&+K^ȵrX7F(bt/ +(X'aX4Z_Ne>Ɛ.X8$J*b4jQG۶[pz9&v9e#FD~C` h7K<{Uhnj7 a5zRwNãRj x80L8z,MKٴ Z*xHhȥ;Ws?`xr\ 7 ʢJ_<`EZ 8x7Bs7A/DAJQcL7+-rR.V >-l?s6$JA! ˷ 1ÚVIadb!Alxd3)s^XQ掼# AAD=#~z5~K~/ިmfE   (/|:^zX/ܿ'H_G: z[}irKz[m=irK6pAs%4j^z`pkojt8 k\ I{Pkl8 Y;߾A^ $߾ђ +4Al;\ǧq'D>G&hDIAAp _NlG8s JL(Z;[3wx8H  ax80,!E7ƨ AA$pAAAA AAXIHAV^E@AVaYZ @OP:0uϟ7o,Zw~lJTy{nܸehea׭[X=bccy {7 '| _֭[,Tvĉg)((Y~ve/<)]uVWPZ*]bE􍬟\.wBSH5駟0 *șRʗWE?8q >Fnn.&M@e,MWWWٳΝ;6m ~wQӒX~s~Q^ds֭36KII=̙bA޾}FBhh(rssͪ^ѬY3lڴ jZRzYYY0X`1{l0 SJ~eZn\tN5EXXF۷o JχJ•+WL:ꕏI&aѢ'dkBMN:INƍwE~mL4 W^5J{ 8Ǐ0Vs7mڤիW#22RQtȑ#u^7VۣK.2J1ӧ.ݲe ~j:dffe˖1bY#GMruZ͛cժU&1rHZaŋeM7**JP|!888]lق}Z.3f<^XX3TNj… [oɚn믿Y:R3gΘ$:`… ;w6}vۣ_EeMsΜ9X`IuoBQk:tVZYҗ7`ʕmӦ ڵkFeYܽ{gΜu^TgH4k 7lmmQPP$''HHH@LL 㭲?YF繥KV.- rovm~z,[(ڮ!CӰ5i֮]W4&˖-Cjj*m&[(.._nR8s b?c4ioq+"1Ve&ԸH_>ҟ3gh/e/7ސ7uV_^^9Lj_JJQ)5,Q4i¦YDWF8۩cǎСHrqqqZԮ68^Uصk޴dƍӛuu?~΅_-_6mj}`ggg2T?a,g%0yddee!44oVo|ݻ7O{[o{111:tÇtbرX`/^z}Y^y嗭},y_R DŽ ,^Qoz^'pme6lUj8X'Çei 6cfݐ~7*+--e<Ⱦu|( ŋD1x`6!!9͛7>urr6-H|AYd׾}8*##è*p,˹VjGII'Kɓ'f5bbb$Ml0"* j7yh;v(W\ǖ-[lrON*:~Tpe^Z~!88XrݻŵܩSrr<Illl@Jի"k x{{<Z7osb\)ƞlpuNnv-E4h )֭[ P51KR?t6m*B)Ɗ͈1BSXLܮ];2P苂&_~~>كcǎҥKHKKݻwremnQ>#t$$$@V ޯ\[n<׽{wۛU333EݧJBiiy{<==l2PllDW_91ƅ D<3 #*^U}ZBPP |7n='H&ZqqqXv-33!V!%K5f׿%h}ҥzMGu&vKOC)Buޗb)a…&wiiizTUD@9iiiFkum۶u_zzs/4lؐ*pyӷ`?m4(J&lg7o/tY~y\R2P(0l?.E Ybcy=6(7\^]CGGFF"uY"L[Cz$a\v p%Pռ٬]V91[M%31wc RS<}T﹢"e(S|5^SKOFXXQ:ے N7|<{i&L>={DHHK24\ y$ϢY>Gna-78zݿ_ݻ'[Z{4b~:֮]+ڕo]?愯/Zl)Jٹ2D9"vۡbNJ}FP;d:'?P/V+׶&MJ3==Z]k׮l֭o%F>}p… cY}={fҥQcڡH#b|}&H0#.i޼9Ν;ѹsgY;lis3l0YHָ͒tRrFr}s'v`RR$XQ 0;ް0A̘1NX5ɓ'%ݟ$>τ^A\b%Ԛpy^Q%.oe>~8 rbbbt?|0KNN֫>|588X}g~~Q|ٳK.Scij5aCt-L8NJp..{]_tW1՚H3\j.qY 8`˖-FЈ͏=ݘp]͛7W{9y\NdI.ӧOdH-ouЭ[7-[K`6Jc0l0Q]({LŗF_DQ19VĎͿo$p!!!:XW$g92C;0H,V>3nQD'&;ڴiꫯsիWaܹh;d͛ѣ%1<|Po.F3] EIDAT,aD7ᅲ3g{n޽;##Cڶ/m}~}6 :U,pΘ1b*vyÇ:u*={sۣ{kWM0LݕqUݻwcŊɓܹ37nÇt|3yyy>}%29I /% &Z~qN۷orwY8p hZcǎȑ#z3P6lweee4IŰ%p1tP0 S+m!!!/0$#//˗/ :vX...O.z 5tbcc-"}}[JjZeY^L_l7męf߾}Ew,)))z ܏/2!66VQO]zDԵ~g͛7+Slbb";|pIi^zkРAɓ'yumb &k=nܸ!J]jVywQ6&& >:t({2휜vլᐈJBDDRRRt>|8v)xvtc{„ xzݿ?:w, SNW'<<\﹫Wbƍ4i'M$Ě:t(v@Q ́~˗/uoaa!f͚YfM}{&[zмysGVVe7J@@OXgٳѨQ#p{RѸqc돺p0q)nkR'|-ZpR,[ `͜9Sv7~~~ذaf̘aжm[L2-… o ::XMfe0 FaѢE`Fn"}R;}믿nrI2▓^zO?57LSqu#"wF|2Zl GGG 0M6E PPP,?~ny;>s8b2e ._UVٳ8{,ﴳ׸qc+駟tiiҤ \]]ѧO(ıcpylܸ[-%Ku2ydkf_7ߔeYP'Nu7!f͚e1{lBJiIԩSWe͚5K״b&K*;VvJ2j+S}Y}y^b2;::E8c ,X˒7n»ksN*v|P/^XR=uV/T*{׮]F);ceJf={- ٳgF-sŗ[ll,C^[ظr'NQ4H9χ# *ʲ,-[?\tۇ_|յkW,+J(Βf>|۶m1b62d:$k| >#D%Cf<(qݻG't?ek烋 6n܈7nã8 7g rҨx6fΜ+WHtp=v}PN0w\\|O>ŀ$9b,kq׻wol޼,&}wt?.]\^3]O ./_lQwAƖr>˲ػw/M֭[ײpssC^/_Fnn.&NX+-Pb ?ݻwOJqqq`Y_ݞPѣG[b|'l޼7n܀>[j$ ePMgj#\777\v ?x^\F||iK.ջ]ӳN=,ڵs0,ˢ[n8vk֭[G EA(Z,Q5k`͚5ʼnpwwJBQQn߾Ty… T* FNNrrr$3e,X IHHeDAT|rlذ<)b5׮]#7 Fpp0 fGRR5jD AoHAݻТE Yٳ':JaڶmO,9Ai84l/ŋNq%ڵk{nh4iԩuF.-P* -Z@o MO7$#- 1eODu +l={$  dAA AAAAAAA$pAAAAt y{ BIENDB`frugally-deep-0.17.1/logo/fdeep.xcf000066400000000000000000005571421476372554500171250ustar00rootroot00000000000000gimp xcf file8BBAG gimp-commentCreated with GIMPgimp-image-grid(style solid) (fgcolor (color-rgba 0.000000 0.000000 0.000000 1.000000)) (bgcolor (color-rgba 1.000000 1.000000 1.000000 1.000000)) (xspacing 10.000000) (yspacing 10.000000) (spacing-unit inches) (xoffset 0.000000) (yoffset 0.000000) (offset-unit inches) L} dkKpV play button     ALL !2;{@H~AB9C8CBCC6DCC4DCC2BC C0C. +CC'ECC, +CC&C" +CC%C  +CC$C  +CC#C  )CC"C  )CC!C  )CC C  )CCC  )CCC  )CCC  )CCC  &CCC  &CCC  &CCC  &CCC  &CCC  $CCC  $CCM@CAABACB BABAAC@CBCBBCBACB$BABE.BCBC AB3B 7BC8BA &DDC'D*(DDC(IDD#,DDC*BDD/8DDB,CDDB/ $6DDB.CD DE0ED DA2IBCDDBE7@BFGED:3CDD9EDD8EDD5ADD3@EDDC/BDCA(ADBDDC9 9BBDDBCA@FECD"DCBCF@99@FBEBBD.DCDBDDBDCDDBD9DB8DCF6DB<3DBF BCD,DBCCED$DBCBCIEBCDDCBAI9ACBCDDBEA9XR R!R"R#E$%%'(*`, w- V0K'2dG7,:o9O8G5N3 f5.6 ߑ3(+ّH IĉNOtT4  4Uu,^;U9M605l0݇06(̃5^"ם]OڬN +LlɶlK+ X CB#DC C% C$ C$AC CD"/<;;<<;<,<<;;<;<<;<;;<'<;;<<;< <;(;<<;(<;<<;<<"<;<;<<;!;;<;;<<<;;<;<;;<<:<<<;<<;;;<;<<;<<<<<;<<;<<;<<<;;<;<<;88;<;<<<<<;<<;<<7,#-29<<<<<;-%#$,,.3<<;<<;<;<;<<;5%#$$,-,,.8;<;<< ;;;<<;<<6+"#$$,--,28<<;<;<;<;+$#$$,--,-2;< ;<<;<<;2##$$,--,-8;<<;<;4)"#$$,--,+<;<<;<<;<8*$##$$,--<;<<;<<;<<;0##$ $,--;<<;3'##$ $,--<<<;<<;<6($##$ $,--;;;<;<<;<;;-##$$,--<<;<<;<<;<<:1%#$$,--;<<;;<;<<3'##$$,--;<<;:+#$#$$,--18<<;<<;<90$##$$,--,--,-1:<<;<=1&##$$#--,-,-5;<<;<<;<<;8(#$$,-,--,/7;<;<<;<<8/##$$,,--,--,-19<<;<<;<<0&##$$/,-,--,3;;<<6&#$!$-,- -,.6;<7-##$$# $ -,0-%##$$#$ $.,- -,&#$$#$#*)$ $ -,&#$$#$',-)$ $ -,&#$$#'+--)$ $ -,&#$ $#$+-,--)$ $ -,&#$ $#%(--,--)$ $ -,&#$ $#',--,--)$ $ ,- -,&#$$#%,-,--)$$#$#$$;-,,--,&#$$#(- -)$$#%+;>;<,-,&#$$--,--)$$#"*6<<;; -,--,&#$$#$- -)$$#%4<<;<<--,--,&#$$#$- -)$$#&.:<;<<;<<,,--,&#$$-,- -)$$#-7<<;<<;<<,---,&#$#$##-,- -)$$#%6<<;<<;<;-,--,&#$$$ -)#&0<<;< <;<<,,,&#$$-,- -*/8<<;<<-,&##-,--,-5<<;<<;< -,/7;<<;<;4-,- -,-08< <;<=6*#-,- -,-4<<;<<;<:+#$#-,.7;<;<<:3%#$$-,--,08=<<;<=4(##$$-,--,-1;<<:)##$-,-51##$$-,--,#$-,--,$#$$1 85v[1*&-*#+x'>%:#!X!JpX, fFt[   q. + <6LR R  6 D ~ '               ѿV<r w9 X |  Q| Z 3_| J B| 7| k (N|   | | | | ||||||                d  vY^x¿[[Z\ [[ZUo[[Y\tÿ[ZW[Wi [Y[pÿ­ [ZW [Xq [v [v [v [v [Z[ [v Z[ [v Z[ [v[[[ZZ[[v[[Z[Z[[vZ[[v[v[[[v Z[ZZ[v[[[vZq[d<<;<;;<;<;<<;<;<<<;<;<;<; <;< <;<< <;<;< <;<;<<<;<;;;<;<;<<;<<;<;<<;<08<<;<<;D;.'&"##$$$$#$$#$$$#$$#$$0/>< X K u Y , h Hx     :o]D1! G( s'}4?`         " # # " # # " # # " # # " #" $$)*.4[\[\[Z[[\[\ [Z [\[Z[[Za[Z[[\ [Zon]ZZ[[\ [ZmlXZ[[\[Z[[Zm_Z[[\ [Zmp^ZZ[[\ [ZmoXZ[\[Z[[ZmcY\ [Zmua [Zm[Z[[Zm" [Zm " [Zm "[Z[[Zm " [Zm " [Zm "[Z[[Zm " [Zm " [Zm "[Z[[Zm " [Zm " [Zm "[Z[[Zm " [Zm"[Z[[Zm" [Zm$[[[Zm$[Z[Z[[Zm)Z[Z[Z[[Zm*[[[Zm.[[[Zm4ZZ[Yn-,$#$$-,--,$#$$-,--,$#$$-,$#$$-,--,- -,$#$$-,- -+--,--,$#$$ -)$),-,--,$#$$#$$-,- -)$#%*--,$#$$-,- -)$##$%,-,--,$#$$# -)$$#),-,--,$#$$$-,- -)$$#%)--,,$##$$#-,- -)$$#$%+-,$#$##$# -)$$#(+$#$-,- -)$$#$&#-,- -)$ $## -)$ $#-,- -)$ $#-,- -)$ $# -)$ $#-,- -)$ $#-,- -)$ $# -)$ $#-,- -)$ $#-,- -)$ $# -)$ $#-,- -)$ $#-,- -)$ $#,- -)$ $#$$", -)$ $#-,--)$ $$,--,--)$$#$$$$-,,-,--)$$#$)-,-,--)$$#$$#*+-,,--)$$#.,--,-)$##$4,-,-)$$#||||| | | | /| 0| || $| %| b!|  "| "| "| "| "| "| "| "| "| "| "| "| "| "} "]"?{&')(V,+c0p3H6O*)$#$$#$$$#$$$$#$*c.2frugally-deep     $tgimp-text-layerX(markup "frugally-deep") (font "Sans") (font-size 18.000000) (font-size-unit pixels) (antialias yes) (language "en-us") (base-direction ltr) (color (color-rgb 0.000000 1.000000 0.000000)) (justify left) (box-mode dynamic) (box-unit pixels) (hinting yes) 5Y%dgb**n{bpPc4&/;"8hN[;owzwh`Al &4#l"d 34!p  | DžT/ % ",D @. A/ / $/N 0z 0 V0 10 0 0 0 2 2 2 2 2 2 %////////  'OvֳR*Y  ;  N   l      ( ( ( ( ( ( ( (  Dqʨ}S'*tH%B$3"!9 n =} # qK  b 2       3X|βzQ%+jn"^w!&#%S'()* O                                       @::::::::::::::::::::::::::::::: b Y  ((  U [ 3 / d. .h / ,.5 / \.  6f7 _6(421$ 0W /q .-_%%%%%%%%%%%%%%%%%%%%%%%%%%%%ʦ{P! ؊= k |A8;8 Ddãe%<}#]!9\O* u!#8$ B<֟e8u"5C3 U1 4/ ,*5)uu'$8 DdãeB#<}֟e]u"9!C\'UO+4 . 25 3u5  k |A8;9    Nyֺh:<c -&\v // 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 "  O  ̑bA!  %Fg  c m 3 I"#$%&'R()2)*N**+U++,,,\,C,,,,,,,....................^.5. / / /X / 0 0c 0 1 1 2 2 2( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ' ' % %7 %S %v 1% H% k$ $c $ " 9" !   E3   D    J 64 B b e> =^ * V*W j BlE"#Gn> A )[J* + + , ', _-k . &-$ .} #. . T.T . / j. /.' .S /x / y/ W/ A/ ,/ / / / / / / / / +/ A/ W/ w/y /T /( . ./ h/ /V . R/ / ".& / $.n . \. $- - , ,^F,*< ?EkC !Emn  Z  " M` 1# / *,d--*..&/ /W / 0 0k 03 0 1 1 1{ 1Y 1C "b . h  A      &        K -  C e X  z @    (  1 e j  {  I  $ U     %    ( 0 Za '  PL  J " q  Z  '   +ժ`G- 8R|wz>&u'R >-J X- . Q/ / "/ ~0a 0 0n `0 1 1 0W 306 _0 0 0 1 +Osa( .- 0 2 3 3 3 3 3 ɦmV?1$  ԏR  ! g   ! "' "#B 1\ 1 1 1 0 0f 0 / /n . .]--,0+  p :  4 Tc E 2 m  ޙf< <` = #z                                                                  )  H ] v +  : _ h -  - a [ .  " c  M 0  {  e  @ 2   n   g ! !3 4" "` " i# #& 6$ $S $ k% % 8& &F & l't ' :( (8 ) n)f ) <* *, K+ p+Y +,, --K-.y. /j / %.7 / U. .l / .9 / M. .n }/ .; / F. .p v/ .= / >. /q n/ .? / 7. /s g/ A  / u _ C  ( w X E  !. /y P. .G / . /{ I. .I y/ . /} A/ 0 r0 0 1 :1 2 j2 2 3 234c4 4,k+P*"))(/('Z&&\%%I%$$c$##A#""b!!J"q""dD//D"d""q"J""b"""A#$$c$%%I%&\&'Z((/))*"+P,l38888sC  Eu"   inlo##e#$$$$f$E$1$$$ $$$ $$$1$E$f$$$##d#jngl   qA  Ct"88887W%l&`'T(7)*+lńR0  @st vs 4#  %!!Z"\#5" #Sl####5g#[2 {2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 |#]4#8_#$$%#Y##m#e"#w"/h" B'  Zy͕kE,   1+*B)d(k(q'^% "W l`Tv76 "l#4  XV4 F ^C%  ! u\   e SL )  A 5/e [/ {/ / / / / / / 0 0 0 0 0 0 0 -|-]-8-...Y. 4 'e ' 6 T/ # n B 2u   :l Nky&%$B#d"k"q!^6 7 89:v:6; ńR0  @st v4 s #XV F ^"!Z5lL)g545_555%443 m14.' ,' w)6h%T!#'n 2uZ :l&͕kE,   1Nk3    ((((#yG !! 'C!!uy  x e  i " A e  f F 0         0 F f####""h""!v! v  %!#wE((((   4    !n!S"$ # $ $/ % &Z  Bq\Ibgl@cHpfE1  1EfpHbj@ebI~\  @o &Z % $/ $ # "$ !S!o    2 2 2 2 2 2 2    2 2 2 2 2 2 2    )g 'w $u "N  . o - -/ dd)6nӻf:    r!<"$4&D(vI* Fs˩T'`=<G9i7L4j. J mN50Ki!!!!!!!* !i#+f(9WuβzW   ! 7 f  w L s/ PH " b b n l G0 /* 0n 0 / N/ . / z.& !- . e-{ + v+ )db(_(*)[*++,- R/ !0 M2^35668%=   " !  v L  8 - <ޙS(QӾrL *              /> 0 0l 0 1 11 2 16z55D544u33J 3 20 2 12 1 0X . l- , l+ j & 3fS " l!""#$%4'2) m0    5+56[667$78T8899:L::|;;<@<=\=>   -./r 0W 1$ 246)8 _;7g   >CA" P y  ߔH-̪U&2   7$#k! <?!#>'\,!Mkͺ   7k. B#ޏ?&m(* r7-ФxL2z[;8   :.<5B?/ޏ?)m(>!r7\ФxL!Mkͺz[;   :<9C } l %%%%%%%%%%%%%%%%%%%%%%%%%%%%% w\'->cP{׻i:U*~? hC++     +hcdd h ( w׮gxB x!$,3R;uB%DHNSUXZ&\ _O`"`2`Bb=cch;med:dcdd;:;: ;|:W?h<cd:d9ed7[edd6cdd4cdd2hd d0d d/d-cdd+hcdd*d(d&fcdd$^dd#cdd!bddfdd]edd"dcd"dbd$dfd&dcd'ded)dbcd*dUded+ded.d dcd/d id2d 5d7ded7dfd9dcd:dd=dedd?<:97642 0 / -+)(&$#!!#&'++,0 0 1 35989?<;9764 2 0 / -+)(&$#!""&&))*,. 0 1 3479:?<l:J8+76d4>2 0 /^ -;+)u(R&2$#l!F( fC!"#}$Z&:(*s+N- ./ 1 n2J4&6 7b9B;*fddeddeddf)dedded#ccdd#edd!cddfcdddded dfd"dfcd#d'dded&dfcd(d`ed*ded,d0d cd0d `cd1d 5d7dbed6dcd8ddd=dcd?d;;864 4 1 /.,*((%#!  "$&'(*--/ 1 2 468:;;;;864 3 1 /.-*((%#!  !#$&)+,-/ 2 3 4ѿ68:<<d;[:;86{4X3 21 / p.O,-*(e'B%$#!^ 8 y"X$5&'l)J++,.d 0> 2 35^7;9:v;R=? f=d^;dc:db8df6de]4 d4 de1 db/df-dc,de*dcb(dU&de%dcd#di!d!d de"df$d&d&deh(dem)dcd =;:8743 1 /.,+(&&#! !!"%')* =;:864 4 20--*(Ӫ&%#!  "#%')) 2=;l:F8(6 4f3 D1 "/ }-Z,:*(s&N%.#!n J&! "c$B& '|)W    9cdd7fdd6d4cedd2cd d0fcd d/d-edd+bdd)bcdd(d&d$fdd"edd!eddcddfdd"ded"d&died%dcd'dbd)decd*dfed,ddcd-d 2d ed1d 4d 5ded4d7dfd6dqd7d9d:dfd9d;dhd:dd(ȷ!йƪ˨乘ޯ Ⰽ!𶍊#ɕ%榇'Ǐ(* ♈+ э- ‡. / 0133̆5܉5678܇99:≘;;=<>(! "$&'** - - . 0 13455667999:;<==><_t9^VKM^6d^QJW~3de`QKc1 d[JU/ de_LR- deaJR+df_I^*dfYHy(dedMU'df[G%decIc$dgRP#dfZG"df_E ddebDddecE.ddedEMdebEpdeaDde]H7dfWS [ dgOk } !dfG "dbF="dfV]`#dfI $daI+#dcqgeO!dckv^,dcgvi8dcdrpFdcnuTdciv`-dcfuk=dcdqrK!dcmuX!#dchvd0%dcetl? &dcptO(dclv[%* dchvh4+ dcesoB- dcouQ/dcjv^)1dcgui82dcdrqG4dcnuT6<9631 / - +*('%$#" Xl:I@BXYNVYX X#V:QZXX$j:JZXX$BBYYXX!qI@UZXXzVLVYXX^KSYXXgMPXXrQMWYXX}XLUYXX`KRYXXjNOX!XtSMWYX!XZLTYX#XbLRYX%XlONX(X wULVYX(X \KSYX*X eMQYX,XoPNWX.XzWLUYX/X^KSYX1XhMPX4X=9631 / - +*('%$#" Мߩ﵎Ėԡ⭋ 񻑍 ɘ! ٤ "簌 # #Λ$ާ$̸!ͯԷȨϰպ¢ʪ!ѳ!׻#Ĥ%̬( ӵ( ؽ* Ʀ,ή/շ/1ȩ4Ecddckw\' Hfddcguh7 fQfddcdspE O\eddcouSEddcjw_+Kfd dcfui:YXed dcdrrIGcddcmuWJfddcivc.[Yeddceul=FddcdqrLzNfclvZ#Ocve2WB !a#$%&C(g*+1- Q/ t1 2:4_67!9?;cW W0 0 0 0 0 0 0 0 0 ˆnONXYXXxULVYXX]KSYX X猕fMPX X̆qQMWYX X {VLUYXX󓒛 _LSYXXׇhMPXXsRMWYXX󔓛~YKUYXX҆aLRYXXkNOXXꌚvTLVYXX֔[KTYXXZJQXXKLXYXXT9RZX!Xh9KZX#X}>DYYX$XK=VZX&XZ9PZX(Xo;HZX*X @AXYX+X NDYYX7XI>VYX9X9QZX;XZXNX\E; ;E\X-XWM MWX,XW> >WX,XWB BWX,XWB BWX,XWB BWX,XWB BWX,XWB BWX,XWB BWX,XWB BWX,XWB BWXXͭӶپ ǧ ϰ Ը  ɩѲֺã˫Ҵҽ塑!͚!ݦ#겍$”&ѝ(᪋* 𷏎+ Ɨ- ֢/䮌1򽑍2̙4ۤ6鱍79;N . . 荜. ݏ. ݏ. ݏ. ݏ. ݏ. ݏ. ݏ. ݏsV V         &XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYXY[E; ;E[YXXYXVO OVXXYXTC CTXXYXUF FUXXYXUF FUXXYXUF FUXXYXUF FUXXYXUF FUXXYXUF FUXXYXUF FUXXYXUF FUXXYJ   萚 ݒ ݒ ݒ ݒ ݒ ݒ ݒ ݒ&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%:dL9dcQ9d`V9d_\9d]a9d\d9d[g9dZj9dZk9dZl9dZl9dZl9dZk9dZj9d[g9d\d9d]a9d_\9d`V9dcQ:dL:dL:dM:dN:dP:dUxd9d[hd9d`Zd9dbSd9dcKd:dKd:dSd:dZld:d`Yd;dMd;dLd;dQd;d[gd;dbRd===x=W====<<<;;;::9998877766577777777666667777777778888999::::::;;<<=>====<<<;;;::99988777665~dcd;dcdqd:dcmvXd8dchwe1d6dcetm? d4dcdpsO4dckv[&2dchvg60dcesoD /dcotR -dcjv_* +dcgui9 )dcdrrI(dcnuV&dcivc-$dcfuk<"dcdqrL!dclvY"dcive2dcetn@ dcpsOdckw\(dcgvg7dcespF !dcouS#dcjw_*%dcfui:&dcdrqI(dcmvW* dcivc/, dcful>-dcdqrL/dclvZ$1dchvd23dcetoB 4dcotP6eckw]'8Zvh69LD;2==F<;;R:998z7^6L5G4 I3 W2 i1 0 .>-y,,*~)S'D%~=:8uS6ZKT6dLQYX3mONXX2xULVYXX0\KTYXX/_PSXX-e:6IV\ZXX+𙜩rC4@E@CMX\YXX)N3=EDDC@EPZ[XX(Z68DDB@GS[ZXX%i=5CEDDAKV\Y#vF3?ED D@CNY#R4EDDU5:EDDb97CEDDpA4AEDD~K3>EDDW69D!Df;6CED!DsC4@ED#DO4ED1DU5:D4Dc:7CED4DoA4AED6DL4=ED8Dl";D;D#AD;Dz"GD:D?2FD9D!@ED8D FD8DQ(GD7D23FD6D$;FD5D@ED4DBED3DzCED2DsCED1D tAED0D >FD/D !7GD.D +.GD-D B$CED+Dg :GD*D0)EED(DZ!8GD'D:#?FD%Dx,'AFD#Dl,(?FD!D~=:8ҳ6׼6Ť3̬2ӵ0ܿ/-Ϡyu+׭s~+޻s{}(Ɨvw}%Ҥ|t~#ٰr~ }#߾sz} ɚvvӧ~t۴r}ty̞xvիsܸs|Ôtx!ϡzu!׮s~#ݻs{%Ǘvw( Ѥ|t( ڱr~* ޾tz,ʛww-ӧ}s/۵r}1ty3͞yv4֪s6κs|8hy:l;k:v9j9j8p7vw6m|5i4h3i2i1 i0 i~/ kz. rs- ہl+k{*tp(jz'{l~%rn#۝ro!dciwa.8fuk=9sL;! eO<'9? 6]592J / x. V+3(t%Yk#FH C(G T h"#$@%|'0(*Y+J-rQMWYX4X}XKUYX6XaLRYX8XkNOX;XMWYX;XYXX\YXCYYDDECph9KDDEArTDDF>} DDG8!"DG.+#DEC#C$DG9 h%DED(2'DF6!](DF=#<* DF@%.|+ DF>'.q-б5չ6â8;;<;}9~7}5}4~2}0}/ }- ~+})}(}&~$}"}!~~}}~!~"}$s$lۤ#l˘"w|𼑍 "j㬋 !j ա q ŕxt }lިhϝii걌iڤi˙~i zk"sr#l${j%ov'yk(~l}* ns+ ~os- 0 0 "02 2 2 20/22& $ $ $ $ $ $ $ $ $ $ $&02 2 2 20/22 0 0 0 0 0 0 0 0 0 0 0 0W W_=9;9{8Y654 2 m1 J/+-XWB BWX,XWB BWXXYWEDDC+ +CDDEWYXXZVEDDXVZwyyxg gxyyxZVXXU[wyyXLb&eKXXJfXMa$dLXXKeXMa$dLXXKeXMa$dLXXKeXMa$dLXXKeXMa$dLXXKeXMa$dLXXKeXMa$dLXXKeXMa$dLXXKeXMa$dLXXKeXMa$dLXXKeXMa$dLXXKeXLb&eKXXJfXVZwyyxg gxyyxZVXXU[wyyXYWEDDC+ +CDDEWYXXZVEDDXWB BWX,XWB BWX,XWB BWX,XWB BWX,XWB BWX,XWB BWX,XWB BWX,XWB BWX,XWB BWX,XWB BWX,XW> >WX,XWM MWX-X\E; ;E\XXZX=X:RZX;XG?WYX9XvCYYX+Xi9JZX*X ݏ. ݏ ق  䥰 &$$$$$$$$$$$& 䥰  ق  ݏ. ݏ. ݏ. ݏ. ݏ. ݏ. ݏ. ݏ. ݏ. ݏ. 荜. . =;9㬋8ՠ6ŕ42ߩ1 Н/ - 鲍,ۥ*   2 2 2/2 2 2/            V V>XUF FUXXYXUF FUXXYDA0 0ADDEVZXXYywk kwyyw[UXXYiIXXYhJXXYhJXXYhJXXYhJXXYhJXXYhJXXYhJXXYhJXXYhJXXYhJXXYhJXXYiIXXYywk kwyyw[UXXYDA0 0ADDEVZXXYXUF FUXXYXUF FUXXYXUF FUXXYXUF FUXXYXUF FUXXYXUF FUXXYXUF FUXXYXUF FUXXYXUF FUXXYXUF FUXXYXTC CTXXYXVO OVXXYXY[E; ;E[YXXYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XYX%XY ݒ ݒ م 䨮 䨮 م ݒ ݒ ݒ ݒ ݒ ݒ ݒ ݒ ݒ ݒ 萚  &%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%dcd;dcetd9dcdptOd8dclv[%d6dchvg4d4dcdsoC 3dcotQ2dckv^*0dcguh9.dcdrrH -dcnvU +dciwb. )dcftj<'dcdqrK&dcmvY"$dcivd1"dcetm@ "dpsNdckv\&dcgvf5dcespE dcotRdcjv_* dcfui9!cddcdrqI#dcmvV%cddcivb-'cd dcful<(fd dcdqrK+ dclvY#- dchvd2/UddcetnA 1eddcptO3addckv\'6dcgvf67pvpE:J9;;8`96oA4A4|J3>ED3W5:EDD2d:6CEDD0rC4@EDD.M4=EDD-Z68DEDD+h<6CED D)uE4@ED D'Q4EDDW6:DDe;6CEDDrC4@EDDN4=ED DZ69D#Dh=5BED#D vE3?ED%D R4;ED'D ^78DED(Dl?4AED*DzI3>ED,DT5:ED.Db97DED/DpA4AED1D~L4=ED3D[69D6DF9CED6DCD8D9D9D7DFD6D7DED4D 5D ED2D BD1D @D0DECD-DMCD,DCD+DED)DED'D;;8˜x6թs4ݶr|3”tx2Πyu0׭s~.޺s{-Ɨuw +Ѥ{u )ذs~ )߽sz &Țvw$ӧ}t"ڴs}"ty̝wvԩtܷr|Óuyϡzu׭s~ݺs{ Ɨuw#Ѥ{t# ڱr~% ߽tz' ɚvw*ӧ}t*۴s},sy/̟xv/իs1۸s|3ux6tx6897874 3 2 1 00-*))=====================================t=;=<<O<;;;: :9 8W87 676^5w4 {3 k2 I0 /\- +&*F(h&deO|5dK4deQw4dG3deTl2deG2dcohF1dciv`.F1dcfuj<0dcdqrK0dcmuX x/iwd/ .l? *. -,$,i+*)1)h('& %'$F$_#w"!  !|"l#U$:%&()O* ,-J. 0Y 1 3L46%8A:T<i=_?54432 2~1hz1uE4;*0󙚥P45BEDDE93.xG3>EDDG'b.T5;ED D -7CED DE81,ED DG(\,DE+DE=&*DF0?)DG%e)D(DE>#'DF5/&DG,D%DG%[%DE t$DEC #DE@"DF<"!DF:$ DG7&DG6(DG6&DG7'DG8"DF;" DF>|!DEB f"DED"O$DF(;%DG1)&DG:"w'DEB!Q(DEF+2*DG8#q+DFB#C -DG1( .DF?!G 0DG0't1DFA$:3DG7"U4DE-(k6DFA'2|8DF>$9D9DF;#:D;DF;$D=DFDD54432 2ܺ1Уg1دs{q0߽s{r0əwwi/Ӧ}t{w.ڳr}o.tz j-v zu, p,j+}n*u~)m)j(~l'xt&r%m%j$i#j"}k!{m ynyoxnynzk|k ~i!j"k$p|%up&{k'j)qv*zl+l -uo .k 0un1l{3yk4so6ov8~l{9|l{;|m=<#Z ޅ.v-"ЌT%ѢwJ0 3L}*Ů =\;@8#6T3 p1 .y.)l*J&R"k/%:EDD~@$.@EDD^2%/>DDϕb:''4=DDEC!ۧ}V;)'*/6=?@ABCCBA@?<6.*'*$ۻzdQ?72.+)(()+.37@Rf|*ĸ =9;<$1m8DF?)+Z6DEB0$A~3DE9&.X1DEB2$4_. DE?.&5`* DE@0$/Mz&DA6('6V|"֜tm{ݩmsvmt~߸|oox~"Ĩ|popty}}xspoq$һyvsqpoopqtvy* ={;|mu9pr6tl3{ns1vmx. tnx* umt&yooy" D/ `1531}6Y9<:P9 97H64U31G /u . ,4*L(Y&Q$O!5tCd l##c&A) ۲yG-DE9$2p/DE?,%C1DC=-$4c3=4&(=f6>X9<*:w"99Q!BE81*FED6o#8GDD6B#BEDD4{&2GDD3D"@FDD1o%2GDD/5%BFDD.O"9FD D,c%0FED D*v.)BED D(}3&@FDD&{5$=FDD$v3%=FDD!c-&>FDDۖQ',AFDDr9$3DEDDLJO+(T9RZX(XG?WYX&XwED+DC@C $:FD.DB&7FD0D*5GD1D*4GD2D)4GD3D)5GD4D$6GD5D#:GD6D=FD7Di AED8DQ"DED9D=(FD;D0GDF<:974 5 2 0.,+))%#!"!#$&()+-/0 2 4679;=>=B;c9 8&6J4 n2 1 ./N-s+*:(Z&}$"#D!f (!F#l$&2(R)u+-;/^ 0 2 4>6d79+;J<l=7Y:]w&'5@Q]eotxyzvqnfYM?4#?DE?4('3IiDCA9-)&0BUnDDA;5.&'-2:HT`js}ri]RFD"DCBA?:51-,+*)(()*+,-26:D DCD=DCCD;DCD:DCD8Dxonw{rpntހ|xsnnsv{#~{xusrqppoopqrsvy| ==:8=5;W9y7׫Q12ɰrY1dD0%*6AED+DĮhQ@-&*.;AD0D91,&'07 .d ,++)J'l&$5"W yDED=DD;D;D9DF7DE7D6D3DF 1DC /DC .DG+DCF*DC)DI'DH%DF#DC"DM DCDEDC DEK!DEC#DE&DEU&DE(DB*D-DC3- DED/ DCD1 D4DEF4DF6DE8D;DC;B~=;97643 0 0.-*)'%#" !!#%&(*-- / 1 4468;;~;^9 8(7J5i42, 0P .v -+7)W'|& $B"c! &Jn !.#N%s&(:*Z, }- "/ D1f3 4(6F8l:;2~ DCK DC DE DED@DD!D3!D$C   !!# . Qs1V |!#=@@@@@@@@@ED7DDCD4D 5D @D2D GD0DFD.D.D@ED*D+DDCD&D'D@D$DBD"DCD DDDDCDD!EDD#CDD$EDD'CDDC)GFDCDDCDFG75 5 2 /.,++''$""!#$')684 ^2 0 $/B-e+*-(O&p$#2!X{!;#[$d'.||.)BuѷuB@ DEDCDD!D@!DG#DF%D(DE@(D+DCD,D/ D@/ DB1 DC3D4DCD6DE8DC:DE !!#%'(+,/ / 1 4469:8^ !$#B%e'(-*O, p. / 21X3{46;8[:d@@@@HHHHZe-2->     7Ogimp-text-layer3(markup "->") (font "Courier New") (font-size 18.000000) (font-size-unit pixels) (antialias yes) (language "en-us") (base-direction ltr) (color (color-rgb 0.000000 0.000000 0.000000)) (justify left) (box-mode dynamic) (box-unit pixels) (hinting yes) ff7k3k?fgfygh2hrjCkkk#4;Չ#5'30; s. S-4+) 'b%CB%y&$W#p#8P$Y1#y# #,_#J>#k$##"%<'\) }+-//N 0o 0mM.{ [ ; !   4 4 4 4 4 4 4 4 4 4 4 4 2$ 4?5`7 9;3;::8 75j4J4-20 { .[ -;+")'l&L$.!  |]7 =W #wm* ME &f j !"K6$V&v'))+ E-: f/ 15<ՆciI,xWXx+Gh   8 Ww*Fg@@@@@@@@MH&$8 .predict     ]Jgimp-text-layer.(markup ".predict") (font "Sans") (font-size 18.000000) (font-size-unit pixels) (antialias yes) (language "en-us") (base-direction ltr) (color (color-rgb 1.000000 1.000000 1.000000)) (justify left) (box-mode dynamic) (box-unit pixels) (hinting yes) mN8mj8mw (        (77777777a(        (77777777a(        (77777777a7џH(<9 y q m: gbk0,7tdc2323dbt0,8 fbm < q{ t>< ;ӡJ(77777777a                                                         gTոw2FLj6y) M9  p $ m ;s. q y8t  x ; DٺZHʏ?C'''''''77      0 C'''''''77      0 C'''''''77      0 C'''''''77 A}̬T;  x n ,q->x6/a d3G3Gda /+-p+7n    pC  0 BͭU                                                                                           :C ^  T W  gi4^^Keras     @FM^^mLXd^^ ?W,t)5v>IRXbnu}1F:" # Ž  ǽ ߾  ׹ м Ƽ л   ػ     ƿ  ֻ  Ӽͽ  غ     ػ о     ۺ  ѽ н ѽ ѽ ѽ ѽ ѽ ѽ  Ѽ Ѽ Ѽ Ѽ Ѽ Ѽ Ѽ Ѽ м  м  м мм  м л л  л  л  " #鳁i=277كW41V535@6F77u8U8;8828Q9t999A9:49:9:M:::S:099::`:6:4:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:76" #鳁i=277كW41V535@6F77u8U8;8828Q9t999A9:49:9:M:::S:099::`:6:4:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:76# $&e&)*++,./0 1 1 2 1 3 4 4 3 5466655777777777777777777777777777                                                          ?7?7                                               ?7?72 *( $. *% %              ?7?7;>>579=B5:;>:17550+11 ( -;>>5746fw;'Lw ;>>5746fw;'Lw 7=        >"'+J2-2tuxU6{I(>"'+J2-2tuxU6{I(,     D         л л л л  ϻ Ϻ ϻ ϻ Ϻ Ϻ ϻ  Ϻ  ϺϺ  Ϻ  Ϻ Ϻ Ϲ Ϻ Ϻ  Ϲ Ϲ Ϲ ι  ι  ι  ι ι  ι ι  ι ι  θ ι   θ θ θ θ  θ θ  θ   ͸͸  ͸ ͷ ͷ ͷ ͷ ͷ  ͷ ͷͷ  ͷ ͷ  ͷ  Ͷ  Ͷ ̶ Ͷ ̶ ̶   ̶ ̶ ̶  7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:767:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:767777777777777777777777777777777777777777777777777777777777777777               ɿ  ɾ                                                 ž        l#) (#'& &#&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&l#) (#'& &#&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&                                         ˿            ʽ       ȼ ȼ Ƚ Ȼ Ǽ  ƻ ƾ ƺ@# 9}87{6s5555555555555555555555555555555554(312:1306 /D .? -A ,D+a*L)O(U'h&Z%]$k#m@# 9}87{6s5555555555555555555555555555555554(312:1306 /D .? -A ,D+a*L)O(U'h&Z%]$k#m              !" #$%%%%%%%% % % %%%%%% % % % & %% % % % &% $# "           e###w"!  !"#$%%%%%%%%%%%%% %%%x%%|,%k%h%g%`5%O%%R%L&G%B#:"4!0 *"  ! "#%&'e###w"!  !"#$%%%%%%%%%%%%% %%%x%%|,%k%h%g%`5%O%%R%L&G%B#:"4!0 *"  ! "#%&'( + , $&&"  $                     @#;98v88H766<7.8 9(:#;< >@#;98v88H766<7.8 9(:#;< >       ̶  ̶ ̶  ̵ ̶  ̵ ̵ ̵ ̵  ̵  ̵ ̵ ˵  ˵  ˵  ˴  ˴ ˴  ˴  ˴  ˴  ˴  ˴ ˴ ˴   ˴  ʳ ʴ ˳  ʳ ʳ ʳ ʳ ʳ  ʳ  ʳʳ  ʳ ʳ ʳ  ʳ ʲ ʲ  ʳ   ʲ ʲ ʲ  ʲ  ʲ  ʲ ɲ ɲ ɲ  ɲ ɱ  ɱ ɱ ɱ  ɱ  ɱ ɱ ɱ  ɱɱ 7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:767:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:767777777777777777777777777777777777777777777777777777777777777777      þ ý ý þ          ü    ½             »  »                             &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&ƻƻƼŻ Ż Ļ Ļ Ļ û û!û"¼# ¼$ % &'() ǿ*ǿ+ ƾ,$ƾ$ƽ%ʼƽ$нƼ$ǼƼ$ľƻ$ƽƻ$ɾŻ$Ż$Ŀ$žþ$¿#輿"!Һ˸ƿκ÷Ըɺ¾Ž꺽ǾƼԺƹ׹ϵŸ ̻ʹø ½ƹ컼Ǿ廿ƽ۹޸ŸַĹͷ÷ Ƿø! " ż# ڸļ$ εĺ%͵&俹þ(ٵ½'ֵ&˶Ľ%縹ĺ$¿฼ķ"i!m uuy} !"#$%&'()*+, $ $ $ $:$$ $  $$ $$ $ $# " ! F & 6D@ nQ L<4 %*L f\w1X!8B [! N" # j$>%6&(b'\&3 % $."i!m uuy} !"#$%&'()*+, $ $ $ $:$$ $  $$ $$ $ $# " ! F & 6D@ nQ L<4 %*L f\w1X!8B [! N" # j$>%6&(b'\&3 % $.            ܼ ݼ   ׼  ּ ٻ  ٿ кϼ ջ ξ                                ˼ ƾ    ޷ ۺ Ӻ   ƽ      ׹ ȼ   ()*+,-. {/ t0 1 k2 a3_4~5N6G7Q8[93:.;H<.>)=<<i;b:B9 887}6O543 3()*+,-. {/ t0 1 k2 a3_4~5N6G7Q8[93:.;H<.>)=<<i;b:B9 887}6O543 3                                                                                         ȱ ɱ Ȱ  Ȱ  ɰ  Ȱ  Ȱ  Ȱ  Ȱ Ȱ  Ȱ  Ȱ Ȱ  ȯ  ȯ  Ȱ ȯ  ȯ  ȯ  ȯ   ǯ ǯ  ǯ   ǯ  ǯ ǯ  ǯǯ  Ǯ  ǮǮ Ǯ  Ǯ  ǮǮ Ǯ  Ʈ   ǭ  Ǯ   Ʈ  ƭ   ƭ  ƭƭ ƭƭ  ƭ ƭ  ƭ ƭ ƭ ƭƭ ƭ  ƭ  Ƭ Ƭ  Ƭ Ƭ Ƭ Ŭ Ŭ  Ŭ   Ŭ  7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:767:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:767777777777777777777777777777777777777777777777777777777777777777                                                                &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&¿Ӵĵ#е¶"þ ! Ľ Ĺ ĸ IJ ·Ŀ ĽĻ ķ ÷ ³    ļ Ķﺾ ĵﺿ ´ﺿ  ﺾ ﺾ  úﺾ ķﺾ  ﺾ ﺾ ÿ  Ľ ﺾ  Ļ    ø   ô       ù ÷  ö                                       TU#>r" ! $C  9!Z"a#$$%&A'y())*"+G,y- . / /1 0W 1[ 23345555555555555555555555555555TU#>r" ! $C  9!Z"a#$$%&A'y())*"+G,y- . / /1 0W 1[ 23345555555555555555555555555555  ۸ ͺ  ý       Ѹ ȼ  Ǿ  ں Թ ʽ     ع ̺  ߻ Ҹļ   !"Ӹ"Ǻ#ƻ$ % &ع%Ը &ʻ%%ý% û%ٷ ø&κ ö%¼ % % þ%߹ û%ٷ ø%Ļ õ%%%þ%ٵù$ȹø$ƻõ%ó%%ٶ ý%շ û%ʹ ø%½ ·% %%ڷ % 2 \1 )0 / /.p-5,+**q)`(G' &&%$S#%"!! r;  !":"#$%&Q%J-&"q%%%%X%0@%d%%%o%Y $ B%]%%%T$$%p%%V%M%& %U%R%y%\%1 2 \1 )0 / /.p-5,+**q)`(G' &&%$S#%"!! r;  !":"#$%&Q%J-&"q%%%%X%0@%d%%%o%Y $ B%]%%%T$$%p%%V%M%& %U%R%y%\%1                                                                                                                                Ŭ ŬŬ Ŭ Ŭ Ŭ  ūū  ū   ūū ū ū  ū  ū  ū ū  ī ū ī Ī Īī Ī  Ī Ī Ī ĪĪ ĪĪ  Ī  Īĩ Ī ĪĪ ĩ ĩĩ ééé ééé é éé  é  é è  è  é¨    ϥ ٥娱  ʥ գ 𫰳7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:5:2:_:~::;$:N:r;57:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:7:5:2:_:~::;$:N:r;5777777777777777777777777777777777777777777777777777777777556664 5                                                                                &&&&&&&&&&&&&&&"&'()*s¿ &&&&&&&&&&&&&&&"&'()*s¿                   ﷺ 򳺼 ǵ ƭ  ˫ ˰                                                                         55555555555555559526C7< 955555555555555559526C7< 9 º$ ÷$ ö$´#"  ¿! ú ¸ ¶ ´  ½ ¼ ¹ ¶  ¿ºù ¼                 ¾  ¾  ¾     ½                                            $$/$V#y"! _ !!"E#P$c%%& þ $$/$V#y"! _ !!"E#P$c%%& þ Ⱥ   ޷ ܹ   ȹ       ɷ  Ż      ػ   ι   Ϲ   ƾ                                                                Ŀ      "==<n;l:9987s6543 3 Q34&4545Ė 7"==<n;l:9987s6543 3 Q34&4545Ė 7                               ļ   ĺź&Cl)N&Cl)N æ䣲 ᣲ礱 󩬳 ܡ  Ơ      不  첥Ӧ귣۱ԯ ո³  ! 2:;:;:::;;D:-::;:*:>:u:9 8x8y7X:6wF.  ! 2:;:;:::;;D:-::;:*:>:u:9 8x8y7X:6wF.  !  5 4 4 3 3 2 2 10/..-,f))(&#! !&                            ~?????                                      ??????                                      7?????            ÿ  ÿ  ¿ ÿ                   '},R4-=m5},R4-=m5?85  Ÿü ŹŸ ľ÷ ķ Ź ļ Ŀ Ŀ ļ  ¶ýÿ]RiD  Jm[ _]RiD  Jm[ _            CGWW++boxL     @&2>J &:Nbv*0<L\l| (4DTdt ,<L\l| $4DTdt,<L\l|  4H\p$589:;<<<======================================================{3 6789:::;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;================================================================;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;================================================================;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;================================================================;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;================================================================;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;================================================================;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;=====================================================<<<;:98 5@@@@@@@@@@@@@@BBB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::98763o7?fwhite background     f7ϻf[k{ËÛëû +;K[k{ċěīĻ +;K[k{ŋśūŻ +;K[k{Ƌƛƫƻ +;K[k{NjǛǫǻ +;K[k{ȋțȫȻ +;K[k{ɋɛɫɻ +;K[k{ʋʛʫʻ +;K[k{ˋ˛˫˻ +;K[k{̛̫̻̋ +;K[k{͋͛ͫͻ +;K[k{΋ΛΫλ +;K[k{ϋϛϫ                                        3^WF+#8 Background     Ъ8&2>JV8ӆӖӦӶ&6FVfvԆԖԦԶ&6FVfvՆՖզն&6FVfvֆֶ֖֦&6FVfv׆זצ׶&6FVfv؆ؖئض&6FVfvنٖ٦ٶ&6FVfvچږڦڶ&6FVfvۆۖۦ۶&6FVfv܆ܖܦܶ&6FVfv݆ݖݦݶ@PC(!frugally-deep-0.17.1/script/000077500000000000000000000000001476372554500156665ustar00rootroot00000000000000frugally-deep-0.17.1/script/auto_format.sh000077500000000000000000000001471476372554500205470ustar00rootroot00000000000000#!/usr/bin/env bash (find include -name "*.hpp" && find test -name "*.cpp") | xargs clang-format -i {} frugally-deep-0.17.1/test/000077500000000000000000000000001476372554500153415ustar00rootroot00000000000000frugally-deep-0.17.1/test/CMakeLists.txt000066400000000000000000000114551476372554500201070ustar00rootroot00000000000000message(STATUS "Building Unit Tests ${UNITTEST}") # look for Python3 find_package(Python3 COMPONENTS Interpreter Development REQUIRED) # Please make sure that Python3 and Tensorflow have been installed correctly. message(STATUS "Please make sure Tensorflow is installed.") add_custom_command ( OUTPUT test_model_exhaustive.keras COMMAND bash -c "${Python3_EXECUTABLE} ${FDEEP_TOP_DIR}/keras_export/generate_test_models.py exhaustive test_model_exhaustive.keras" WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/) add_custom_command ( OUTPUT test_model_embedding.keras COMMAND bash -c "${Python3_EXECUTABLE} ${FDEEP_TOP_DIR}/keras_export/generate_test_models.py embedding test_model_embedding.keras" WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/) add_custom_command ( OUTPUT test_model_variable.keras COMMAND bash -c "${Python3_EXECUTABLE} ${FDEEP_TOP_DIR}/keras_export/generate_test_models.py variable test_model_variable.keras" WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/) add_custom_command ( OUTPUT test_model_autoencoder.keras COMMAND bash -c "${Python3_EXECUTABLE} ${FDEEP_TOP_DIR}/keras_export/generate_test_models.py autoencoder test_model_autoencoder.keras" WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/) add_custom_command ( OUTPUT test_model_sequential.keras COMMAND bash -c "${Python3_EXECUTABLE} ${FDEEP_TOP_DIR}/keras_export/generate_test_models.py sequential test_model_sequential.keras" WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/) add_custom_command ( OUTPUT readme_example_model.keras COMMAND bash -c "${Python3_EXECUTABLE} ${FDEEP_TOP_DIR}/test/readme_example_generate.py" WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/) add_custom_command ( OUTPUT test_model_exhaustive.json DEPENDS test_model_exhaustive.keras COMMAND bash -c "${Python3_EXECUTABLE} ${FDEEP_TOP_DIR}/keras_export/convert_model.py test_model_exhaustive.keras test_model_exhaustive.json" WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/) add_custom_command ( OUTPUT test_model_embedding.json DEPENDS test_model_embedding.keras COMMAND bash -c "${Python3_EXECUTABLE} ${FDEEP_TOP_DIR}/keras_export/convert_model.py test_model_embedding.keras test_model_embedding.json" WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/) add_custom_command ( OUTPUT test_model_variable.json DEPENDS test_model_variable.keras COMMAND bash -c "${Python3_EXECUTABLE} ${FDEEP_TOP_DIR}/keras_export/convert_model.py test_model_variable.keras test_model_variable.json" WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/) add_custom_command ( OUTPUT test_model_autoencoder.json DEPENDS test_model_autoencoder.keras COMMAND bash -c "${Python3_EXECUTABLE} ${FDEEP_TOP_DIR}/keras_export/convert_model.py test_model_autoencoder.keras test_model_autoencoder.json" WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/) add_custom_command ( OUTPUT test_model_sequential.json DEPENDS test_model_sequential.keras COMMAND bash -c "${Python3_EXECUTABLE} ${FDEEP_TOP_DIR}/keras_export/convert_model.py test_model_sequential.keras test_model_sequential.json" WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/) add_custom_command ( OUTPUT readme_example_model.json DEPENDS readme_example_model.keras COMMAND bash -c "${Python3_EXECUTABLE} ${FDEEP_TOP_DIR}/keras_export/convert_model.py readme_example_model.keras readme_example_model.json" WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/) hunter_add_package(doctest) find_package(doctest CONFIG REQUIRED) macro(_add_test _NAME _DEPENDS) add_custom_target(${_NAME}_data DEPENDS ${_DEPENDS}) add_executable(${_NAME} ${_NAME}.cpp) add_dependencies(${_NAME} ${_NAME}_data) add_test(NAME ${_NAME} COMMAND ${_NAME}) target_link_libraries(${_NAME} fdeep Threads::Threads doctest::doctest) endmacro() _add_test(test_model_exhaustive_test test_model_exhaustive.json) _add_test(test_model_embedding_test test_model_embedding.json) _add_test(test_model_variable_test test_model_variable.json) _add_test(test_model_autoencoder_test test_model_autoencoder.json) _add_test(test_model_sequential_test test_model_sequential.json) _add_test(readme_example_main readme_example_model.json) add_custom_target(unittest COMMAND test_model_exhaustive_test COMMAND test_model_embedding_test COMMAND test_model_variable_test COMMAND test_model_autoencoder_test COMMAND test_model_sequential_test COMMAND readme_example_main COMMENT "Running unittests\n\n" VERBATIM ) frugally-deep-0.17.1/test/Dockerfile000066400000000000000000000050551476372554500173400ustar00rootroot00000000000000# For performance tests on application models run the following from the main directory: # docker build --rm --progress=plain -f test/Dockerfile . FROM ubuntu:24.04 RUN apt-get update -y --fix-missing ENV DEBIAN_FRONTEND=noninteractive RUN TZ=Etc/UTC apt-get -y install tzdata RUN apt-get install -y build-essential apt-utils cmake python3 python3-pip git llvm-17 clang-17 apt-transport-https curl gnupg patchelf RUN curl -fsSL https://bazel.build/bazel-release.pub.gpg | gpg --dearmor >bazel-archive-keyring.gpg RUN mv bazel-archive-keyring.gpg /usr/share/keyrings RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/bazel-archive-keyring.gpg] https://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list RUN apt-get update -y && apt-get install -y bazel-6.5.0 RUN ln -s /usr/bin/bazel-6.5.0 /usr/bin/bazel RUN git clone -b 'v2.18.0' --single-branch --depth 1 https://github.com/tensorflow/tensorflow.git WORKDIR /tensorflow RUN ./configure RUN bazel build -c opt --copt=-march=native --jobs 16 --local_ram_resources=HOST_RAM*.3 -c opt //tensorflow/tools/pip_package:wheel --repo_env=WHEEL_NAME=tensorflow_cpu RUN pip install --break-system-packages bazel-bin/tensorflow/tools/pip_package/wheel_house/tensorflow_cpu-2.18.0-cp312-cp312-linux_x86_64.whl WORKDIR / RUN git clone -b 'v0.2.24' --single-branch --depth 1 https://github.com/Dobiasd/FunctionalPlus && cd FunctionalPlus && mkdir -p build && cd build && cmake .. && make && make install RUN git clone -b '3.4.0' --single-branch --depth 1 https://gitlab.com/libeigen/eigen.git && cd eigen && mkdir -p build && cd build && cmake .. && make && make install && ln -s /usr/local/include/eigen3/Eigen /usr/local/include/Eigen RUN git clone -b 'v3.11.3' --single-branch --depth 1 https://github.com/nlohmann/json && cd json && mkdir -p build && cd build && cmake -DJSON_BuildTests=OFF .. && make && make install ADD include frugally-deep/include ADD keras_export frugally-deep/keras_export ADD test frugally-deep/test ADD "https://www.random.org/cgi-bin/randbyte?nbytes=10&format=h" skipcache RUN g++ -std=c++14 -O3 -DNDEBUG -march=native frugally-deep/test/applications_performance.cpp -I frugally-deep/include -o applications_performance RUN cat /proc/cpuinfo | grep 'name' | uniq RUN g++ --version RUN CUDA_VISIBLE_DEVICES='' PYTHONUNBUFFERED=TRUE taskset --cpu-list 1 python3 ./frugally-deep/keras_export/save_application_examples.py | grep --line-buffered -e 'on average' -e 'Conversion of model' RUN ./applications_performance | grep --line-buffered -e 'on average' -e 'Loading, constructing, testing'frugally-deep-0.17.1/test/applications_performance.cpp000066400000000000000000000060271476372554500231210ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #include "fdeep/fdeep.hpp" int main() { std::vector model_paths = { // "convnextbase.json", // "convnextlarge.json", // "convnextsmall.json", // "convnexttiny.json", // "convnextxlarge.json", "densenet121.json", "densenet169.json", "densenet201.json", "efficientnetb0.json", "efficientnetb1.json", "efficientnetb2.json", "efficientnetb3.json", "efficientnetb4.json", "efficientnetb5.json", "efficientnetb6.json", "efficientnetb7.json", "efficientnetv2b0.json", "efficientnetv2b1.json", "efficientnetv2b2.json", "efficientnetv2b3.json", "efficientnetv2l.json", "efficientnetv2m.json", "efficientnetv2s.json", // "inceptionresnetv2.json", "inceptionv3.json", "mobilenet.json", "mobilenetv2.json", "nasnetlarge.json", "nasnetmobile.json", "resnet101.json", "resnet101v2.json", "resnet152.json", "resnet152v2.json", "resnet50.json", "resnet50v2.json", "vgg16.json", "vgg19.json", "xception.json" }; bool error = false; for (const auto& model_path : model_paths) { std::cout << "----" << std::endl; std::cout << model_path << std::endl; #ifdef NDEBUG try { const auto model = fdeep::load_model(model_path, true); const std::size_t warm_up_runs = 3; const std::size_t test_runs = 5; for (std::size_t i = 0; i < warm_up_runs; ++i) { const double duration = model.test_speed(); std::cout << "Forward pass took " << duration << " s." << std::endl; } double duration_sum = 0; std::cout << "Starting performance measurements." << std::endl; for (std::size_t i = 0; i < test_runs; ++i) { const double duration = model.test_speed(); duration_sum += duration; std::cout << "Forward pass took " << duration << " s." << std::endl; } const double duration_avg = duration_sum / static_cast(test_runs); std::cout << "Forward pass took " << duration_avg << " s on average." << std::endl; } catch (const std::exception& e) { std::cerr << "ERROR: " << e.what() << std::endl; error = true; } #else const auto model = fdeep::load_model(model_path, true); #endif } if (error) { std::cout << "There were errors." << std::endl; return 1; } std::cout << "All imports and test OK." << std::endl; } frugally-deep-0.17.1/test/readme_example_generate.py000066400000000000000000000007571476372554500225460ustar00rootroot00000000000000#!/usr/bin/env python3 import numpy as np from keras.layers import Input, Dense from keras.models import Model inputs = Input(shape=(4,)) x = Dense(5, activation='relu')(inputs) predictions = Dense(3, activation='softmax')(x) model = Model(inputs=inputs, outputs=predictions) model.compile(loss='categorical_crossentropy', optimizer='nadam') model.fit( np.asarray([[1, 2, 3, 4], [2, 3, 4, 5]]), np.asarray([[1, 0, 0], [0, 0, 1]]), epochs=10) model.save('readme_example_model.keras') frugally-deep-0.17.1/test/readme_example_main.cpp000066400000000000000000000011751476372554500220250ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include "doctest/doctest.h" #include TEST_CASE("readme_example_main, main") { const auto model = fdeep::load_model("../readme_example_model.json"); const auto result = model.predict( { fdeep::tensor(fdeep::tensor_shape(static_cast(4)), fdeep::float_vec { 1, 2, 3, 4 }) }); std::cout << fdeep::show_tensors(result) << std::endl; } frugally-deep-0.17.1/test/test_model_autoencoder_test.cpp000066400000000000000000000013721476372554500236360ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include "doctest/doctest.h" #include TEST_CASE("test_model_autoencoder_test, load_model") { const auto model = fdeep::load_model("../test_model_autoencoder.json", true, fdeep::cout_logger, static_cast(0.00001)); const auto multi_inputs = fplus::generate>( [&]() -> fdeep::tensors { return model.generate_dummy_inputs(); }, 10); model.predict_multi(multi_inputs, false); model.predict_multi(multi_inputs, true); } frugally-deep-0.17.1/test/test_model_embedding_test.cpp000066400000000000000000000014261476372554500232440ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include "doctest/doctest.h" #define FDEEP_FLOAT_TYPE double #include TEST_CASE("test_model_embedding_test, load_model") { const auto model = fdeep::load_model("../test_model_embedding.json", true, fdeep::cout_logger, static_cast(0.00001)); const auto multi_inputs = fplus::generate>( [&]() -> fdeep::tensors { return model.generate_dummy_inputs(); }, 10); model.predict_multi(multi_inputs, false); model.predict_multi(multi_inputs, true); } frugally-deep-0.17.1/test/test_model_exhaustive_test.cpp000066400000000000000000000013701476372554500235110ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include "doctest/doctest.h" #include TEST_CASE("test_model_exhaustive_test, load_model") { const auto model = fdeep::load_model("../test_model_exhaustive.json", true, fdeep::cout_logger, static_cast(0.00001)); const auto multi_inputs = fplus::generate>( [&]() -> fdeep::tensors { return model.generate_dummy_inputs(); }, 10); model.predict_multi(multi_inputs, false); model.predict_multi(multi_inputs, true); } frugally-deep-0.17.1/test/test_model_sequential_test.cpp000066400000000000000000000013701476372554500234760ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include "doctest/doctest.h" #include TEST_CASE("test_model_sequential_test, load_model") { const auto model = fdeep::load_model("../test_model_sequential.json", true, fdeep::cout_logger, static_cast(0.00001)); const auto multi_inputs = fplus::generate>( [&]() -> fdeep::tensors { return model.generate_dummy_inputs(); }, 10); model.predict_multi(multi_inputs, false); model.predict_multi(multi_inputs, true); } frugally-deep-0.17.1/test/test_model_variable_test.cpp000066400000000000000000000013641476372554500231140ustar00rootroot00000000000000// Copyright 2016, Tobias Hermann. // https://github.com/Dobiasd/frugally-deep // Distributed under the MIT License. // (See accompanying LICENSE file or at // https://opensource.org/licenses/MIT) #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include "doctest/doctest.h" #include TEST_CASE("test_model_variable_test, load_model") { const auto model = fdeep::load_model("../test_model_variable.json", true, fdeep::cout_logger, static_cast(0.00001)); const auto multi_inputs = fplus::generate>( [&]() -> fdeep::tensors { return model.generate_dummy_inputs(); }, 10); model.predict_multi(multi_inputs, false); model.predict_multi(multi_inputs, true); }