tiny_dnn 1.0.0
A header only, dependency-free deep learning framework in C++11
Loading...
Searching...
No Matches
program_manager.h
1/*
2 COPYRIGHT
3
4 All contributions by Taiga Nomi
5 Copyright (c) 2013, Taiga Nomi
6 All rights reserved.
7
8 All other contributions:
9 Copyright (c) 2013-2016, the respective contributors.
10 All rights reserved.
11
12 Each contributor holds copyright over their respective contributions.
13 The project versioning (Git) records all such contribution source information.
14
15 LICENSE
16
17 The BSD 3-Clause License
18
19
20 Redistribution and use in source and binary forms, with or without
21 modification, are permitted provided that the following conditions are met:
22
23 * Redistributions of source code must retain the above copyright notice, this
24 list of conditions and the following disclaimer.
25
26 * Redistributions in binary form must reproduce the above copyright notice,
27 this list of conditions and the following disclaimer in the documentation
28 and/or other materials provided with the distribution.
29
30 * Neither the name of tiny-dnn nor the names of its
31 contributors may be used to endorse or promote products derived from
32 this software without specific prior written permission.
33
34 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
35 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
37 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
38 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
40 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
41 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
42 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44*/
45#pragma once
46
47#include "tiny_dnn/layers/layer.h"
48
49#include "tiny_dnn/core/framework/program.h"
50#include "tiny_dnn/core/framework/device.fwd.h"
51
52#if defined(USE_OPENCL) || defined(USE_CUDA)
53#ifdef USE_OPENCL
54#include "third_party/CLCudaAPI/clpp11.h"
55#else
56#include "third_party/CLCudaAPI/cupp11.h"
57#endif
58#endif
59
60namespace tiny_dnn {
61
62/* The class models a general manager to handle compiled OpenCL programs.
63 * Since we need to retain compiled programs per layer type, it's
64 * implemented as Singleton.
65 */
67 public:
68 /* This function is called to create an instance of the class.
69 * Calling the constructor publicly is not allowed.
70 * The constructor is private and is only called by this Instance function.
71 */
72 static ProgramManager& getInstance() {
74 return instance;
75 }
76
77 /* Registers and compiles a kernel source code.
78 *
79 * Creates a new program based on the kernel string.
80 * Note that the kernel string is moved-out when constructing the
81 * program to save copying: it should no longer be used in the
82 * remainder of this function.
83 */
84 void registerOp(const Device& device, layer& layer) {
85#if defined(USE_OPENCL) || defined(USE_CUDA)
86 // Register device to layer
87 layer.setDevice(device);
88 layer.createOp();
89
90/*
91 // retrieve incoming device an layer
92 CLCudaAPI::Device device_ = device.device();
93 CLCudaAPI::Context context_ = device.context();
94
95 // check if previous program was build with this
96 // Devce and Layer.
97 Program key_program(&device, &layer);
98
99 auto iter = programs_.find(key_program);
100 if (iter != programs_.end()) {
101 nn_warn("Program already registered.");
102 return;
103 }
104
105 // Define op kernel string and instantiate program
106 // TODO(edgar): load from `cl_kernels` dir.
107 // std::ifstream cl_file("opencl_hello_world.cl");
108 std::ifstream cl_file(layer.kernel_file());
109 std::string program_tail{std::istreambuf_iterator<char>(cl_file),
110 std::istreambuf_iterator<char>()};
111 // fixed kernel params
112 std::string program_head =
113 std::string("#define Dtype float\n") +
114 std::string("#define Dtype4 float4\n") +
115 std::string("#define int_tp int\n") +
116 std::string("#define CONCAT(A,B) A##_##B\n") +
117 std::string("#define TEMPLATE(name,type) CONCAT(name,type)\n");
118
119 // per layer params
120 program_head += layer.kernel_header();
121
122 std::cout << layer.kernel_header() << std::endl;
123
124 std::string program_string = std::string{program_head} + std::string{program_tail};
125 auto program = CLCudaAPI::Program(context_, std::move(program_string));
126*/
127 /*
128 * Builds this program and checks for any compilation errors.
129 * If there are any, they are printed and execution is halted.
130 */
131/* nn_info("Compiling the kernel ...");
132 auto compiler_options = std::vector<std::string>{};
133 auto build_status = program.Build(device_, compiler_options);
134
135 if (build_status != CLCudaAPI::BuildStatus::kSuccess) {
136 auto message = program.GetBuildInfo(device_);
137 //throw nn_error("Compiler error(s)/warning(s) found: " +
138 // to_string(message.c_str()));
139 nn_warn("Compiler error(s)/warning(s) found: " +
140 to_string(message.c_str()));
141 return;
142 }
143 nn_info("Compiling the kernel ... OK");
144
145 // Kernel compilation succeed: Register program.
146 programs_.insert({ key_program, program });
147*/
148#endif // USE_OPENCL OR USE_CUDA
149 }
150
151 // Returns the number of registered programs
152 serial_size_t num_programs() const {
153#if defined(USE_OPENCL) || defined(USE_CUDA)
154 return programs_.size();
155#else
156 return serial_size_t(0);
157#endif
158 }
159
160 // Returns a CLCudaProgram given a key Program
161 // based on internal device and op.
162#if defined(USE_OPENCL) || defined(USE_CUDA)
163 CLCudaAPI::Program program(const Program& program) {
164 auto p = programs_.find(program);
165 if (p == programs_.end()) {
166 throw nn_error("Cannot retrieve program.");
167 }
168 return p->second;
169 }
170#endif
171
172 // Removes the current programs from the general state
173 void reset() {
174#if defined(USE_OPENCL) || defined(USE_CUDA)
175 programs_.clear();
176#endif
177 }
178
179 protected:
180 ProgramManager() = default;
181 ProgramManager(const ProgramManager&) = delete;
182 ProgramManager& operator=(const ProgramManager&) = delete;
183
184#if defined(USE_OPENCL) || defined(USE_CUDA)
185 /* Container holding compiled kernels */
186 std::unordered_map<Program, CLCudaAPI::Program, ProgramHash> programs_;
187#endif
188};
189
190} // namespace tiny_dnn
Definition device.fwd.h:73
Definition program_manager.h:66
Definition program.h:63
Simple image utility class.
Definition image.h:94
base class of all kind of NN layers
Definition layer.h:62
error exception class for tiny-dnn
Definition nn_error.h:37