33#include "tiny_dnn/core/backend_tiny.h"
34#include "tiny_dnn/core/backend_nnp.h"
35#include "tiny_dnn/core/backend_dnn.h"
37#include "tiny_dnn/core/backend_avx.h"
40#include "tiny_dnn/util/util.h"
41#include "tiny_dnn/util/image.h"
42#include "tiny_dnn/activations/activation_function.h"
44using namespace tiny_dnn::core;
53template<
typename Activation = activation::
identity>
57 CNN_USE_LAYER_MEMBERS;
78 serial_size_t out_channels,
79 padding pad_type = padding::valid,
81 serial_size_t w_stride = 1,
82 serial_size_t h_stride = 1,
83 backend_t backend_type = core::backend_t::internal)
84 :
Base(std_input_order(has_bias)) {
87 out_channels, pad_type, has_bias,
89 init_backend(backend_type);
113 serial_size_t out_channels,
114 padding pad_type = padding::valid,
115 bool has_bias =
true,
116 serial_size_t w_stride = 1,
117 serial_size_t h_stride = 1,
118 backend_t backend_type = core::backend_t::internal)
119 :
Base(std_input_order(has_bias)) {
122 out_channels, pad_type, has_bias,
124 init_backend(backend_type);
147 serial_size_t out_channels,
149 padding pad_type = padding::valid,
150 bool has_bias =
true,
151 serial_size_t w_stride = 1,
152 serial_size_t h_stride = 1,
153 backend_t backend_type = core::backend_t::internal)
154 :
Base(std_input_order(has_bias)) {
157 out_channels, pad_type, has_bias,
160 init_backend(backend_type);
185 serial_size_t out_channels,
187 padding pad_type = padding::valid,
188 bool has_bias =
true,
189 serial_size_t w_stride = 1,
190 serial_size_t h_stride = 1,
191 backend_t backend_type = core::backend_t::internal)
192 :
Base(has_bias ? 3 : 2, 1, std_input_order(has_bias)) {
195 out_channels, pad_type, has_bias,
198 init_backend(backend_type);
204 , params_(std::
move(other.params_))
205 , cws_(std::
move(other.cws_)) {
206 init_backend(core::backend_t::internal);
211 return params_.weight.width_ *
212 params_.weight.height_ * params_.in.depth_;
217 return (params_.weight.width_ / params_.w_stride) *
218 (params_.weight.height_ / params_.h_stride) *
227 std::vector<tensor_t*>&
out_data)
override {
234 }
else if (
in_data.size() == 6) {
247 const std::vector<tensor_t*>&
out_data,
249 std::vector<tensor_t*>&
in_grad)
override {
253 std::vector<index3d<serial_size_t>>
in_shape()
const override {
254 if (params_.has_bias) {
255 return { params_.in, params_.weight,
258 return { params_.in, params_.weight };
262 std::vector<index3d<serial_size_t>>
263 out_shape()
const override {
return { params_.out, params_.out }; }
267 image<> weight_to_image()
const {
276 img.resize(width, height);
279 auto minmax = std::minmax_element(
W.begin(),
W.end());
281 for (serial_size_t
r = 0;
r < params_.in.depth_; ++
r) {
282 for (serial_size_t
c = 0;
c < params_.out.depth_; ++
c) {
283 if (!params_.tbl.is_connected(
c,
r))
continue;
288 serial_size_t
idx = 0;
290 for (serial_size_t
y = 0;
y < params_.weight.height_; ++
y) {
291 for (serial_size_t
x = 0;
x < params_.weight.width_; ++
x) {
292 idx =
c * params_.in.depth_ +
r;
293 idx = params_.weight.get_index(
x,
y,
idx);
308 void conv_set_params(
const shape3d& in,
314 serial_size_t w_stride,
315 serial_size_t h_stride,
326 params_.has_bias = has_bias;
327 params_.pad_type =
ptype;
328 params_.w_stride = w_stride;
329 params_.h_stride = h_stride;
334 if (params_.pad_type == padding::same) {
335 cws_.prev_out_buf_.resize(1, vec_t(params_.in_padded.size(),
float_t(0)));
336 cws_.prev_delta_padded_.resize(1, vec_t(params_.in_padded.size(),
float_t(0)));
339 cws_.prev_out_buf_.clear();
343 serial_size_t in_length(serial_size_t in_length,
344 serial_size_t
window_size, padding pad_type)
const {
345 return pad_type == padding::same ?
349 static serial_size_t conv_out_length(serial_size_t in_length,
351 serial_size_t
stride, padding pad_type) {
353 if (pad_type == padding::same) {
355 }
else if (pad_type == padding::valid) {
358 throw nn_error(
"Not recognized pad_type.");
360 return static_cast<serial_size_t
>(
ceil(
tmp));
363 static serial_size_t conv_out_dim(serial_size_t
in_width,
366 serial_size_t w_stride,
367 serial_size_t h_stride, padding pad_type) {
372 serial_size_t conv_out_dim(serial_size_t
in_width,
376 serial_size_t w_stride,
377 serial_size_t h_stride, padding pad_type)
const {
382 void copy_and_pad_input(
const tensor_t& in) {
385 serial_size_t
sample_count =
static_cast<serial_size_t
>(in.size());
389 if (params_.pad_type == padding::same) {
395 if (params_.pad_type == padding::valid) {
402 for (serial_size_t
c = 0;
c < params_.in.depth_;
c++) {
403 float_t *
pimg = &(*dst)[params_.in_padded.get_index(params_.weight.width_ / 2, params_.weight.height_ / 2,
c)];
406 for (serial_size_t
y = 0;
y < params_.in.height_;
y++,
pin += params_.in.width_,
pimg += params_.in_padded.width_) {
407 std::copy(
pin,
pin + params_.in.width_,
pimg);
417 if (params_.pad_type == padding::valid) {
422 serial_size_t
idx = 0;
426 for (serial_size_t
c = 0;
c < params_.in.depth_;
c++) {
428 idx = params_.in_padded.get_index(params_.weight.width_ / 2,
429 params_.weight.height_ / 2,
c);
432 for (serial_size_t
y = 0;
y < params_.in.height_;
y++) {
433 std::copy(
pin,
pin + params_.in.width_,
pdst);
434 pdst += params_.in.width_;
435 pin += params_.in_padded.width_;
442 void init_backend(
const backend_t backend_type) {
443 std::shared_ptr<core::backend>
backend =
nullptr;
446 if (backend_type == backend_t::internal) {
447 backend = std::make_shared<core::tiny_backend>(¶ms_,
448 [
this](
const tensor_t& in) {
449 return copy_and_pad_input(in);
451 [
this](
const tensor_t&
delta, tensor_t&
dst) {
452 return copy_and_unpad_delta(
delta,
dst);
454 [
this](
const tensor_t&
p_delta,
455 const tensor_t& out, tensor_t&
c_delta) {
459 }
else if (backend_type == backend_t::nnpack) {
460 backend = std::make_shared<core::nnp_backend>(¶ms_,
461 [
this](
const tensor_t& in) {
462 return copy_and_pad_input(in);
465 }
else if (backend_type == backend_t::libdnn) {
466 backend = std::make_shared<core::dnn_backend>();
468 }
else if (backend_type == backend_t::avx) {
469 backend = std::make_shared<core::avx_backend>(¶ms_,
470 [
this](
const tensor_t& in) {
471 return copy_and_pad_input(in);
473 [
this](
const tensor_t&
delta, tensor_t&
dst) {
474 return copy_and_unpad_delta(
delta,
dst);
476 [
this](
const tensor_t&
p_delta,
477 const tensor_t& out, tensor_t&
c_delta) {
483 throw nn_error(
"Not supported backend type.");
488 Base::backend_->set_layer(
this);
490 throw nn_error(
"Could not allocate the backend.");
Definition conv_params.h:92
single-input, single-output network with activation function
Definition feedforward_layer.h:37
Simple image utility class.
Definition image.h:94
serial_size_t in_channels() const
number of outgoing edges in this layer
Definition layer.h:146
error exception class for tiny-dnn
Definition nn_error.h:37
2D convolution layer
Definition quantized_convolutional_layer.h:54
quantized_convolutional_layer(quantized_convolutional_layer &&other)
number of incoming connections for each output unit
Definition quantized_convolutional_layer.h:202
quantized_convolutional_layer(serial_size_t in_width, serial_size_t in_height, serial_size_t window_size, serial_size_t in_channels, serial_size_t out_channels, const connection_table &connection_table, padding pad_type=padding::valid, bool has_bias=true, serial_size_t w_stride=1, serial_size_t h_stride=1, backend_t backend_type=core::backend_t::internal)
constructing convolutional layer
Definition quantized_convolutional_layer.h:143
void back_propagation(const std::vector< tensor_t * > &in_data, const std::vector< tensor_t * > &out_data, std::vector< tensor_t * > &out_grad, std::vector< tensor_t * > &in_grad) override
return delta of previous layer (delta=\frac{dE}{da}, a=wx in fully-connected layer)
Definition quantized_convolutional_layer.h:246
serial_size_t fan_out_size() const override
number of outgoing connections for each input unit used only for weight/bias initialization methods w...
Definition quantized_convolutional_layer.h:216
quantized_convolutional_layer(serial_size_t in_width, serial_size_t in_height, serial_size_t window_size, serial_size_t in_channels, serial_size_t out_channels, padding pad_type=padding::valid, bool has_bias=true, serial_size_t w_stride=1, serial_size_t h_stride=1, backend_t backend_type=core::backend_t::internal)
constructing convolutional layer
Definition quantized_convolutional_layer.h:74
quantized_convolutional_layer(serial_size_t in_width, serial_size_t in_height, serial_size_t window_width, serial_size_t window_height, serial_size_t in_channels, serial_size_t out_channels, padding pad_type=padding::valid, bool has_bias=true, serial_size_t w_stride=1, serial_size_t h_stride=1, backend_t backend_type=core::backend_t::internal)
constructing convolutional layer
Definition quantized_convolutional_layer.h:108
std::string layer_type() const override
name of layer, should be unique for each concrete class
Definition quantized_convolutional_layer.h:265
void forward_propagation(const std::vector< tensor_t * > &in_data, std::vector< tensor_t * > &out_data) override
Definition quantized_convolutional_layer.h:226
std::vector< index3d< serial_size_t > > out_shape() const override
array of output shapes (width x height x depth)
Definition quantized_convolutional_layer.h:263
quantized_convolutional_layer(serial_size_t in_width, serial_size_t in_height, serial_size_t window_width, serial_size_t window_height, serial_size_t in_channels, serial_size_t out_channels, const connection_table &connection_table, padding pad_type=padding::valid, bool has_bias=true, serial_size_t w_stride=1, serial_size_t h_stride=1, backend_t backend_type=core::backend_t::internal)
constructing convolutional layer
Definition quantized_convolutional_layer.h:180
serial_size_t fan_in_size() const override
number of outgoing connections for each input unit
Definition quantized_convolutional_layer.h:210
std::vector< index3d< serial_size_t > > in_shape() const override
array of input shapes (width x height x depth)
Definition quantized_convolutional_layer.h:253
Definition conv_params.h:40
Definition conv_params.h:34