Commit 904b59fd by Ting PAN

Add Contrib ops

1 parent 36f27485
// --------------------------------------------------------
// Dragon
// Copyright(c) 2017 SeetaTech
// Written by Ting Pan
// --------------------------------------------------------
#ifndef DRAGON_OPERATORS_MISC_PROPOSAL_OP_H_
#define DRAGON_OPERATORS_MISC_PROPOSAL_OP_H_
#include "core/operator.h"
namespace dragon {
template <class Context>
class ProposalOp final : public Operator<Context> {
public:
ProposalOp(const OperatorDef& op_def, Workspace* ws)
: base_size_(OperatorBase::GetSingleArg<int>("base_size", 16)),
min_size_(OperatorBase::GetSingleArg<int>("min_size", 16)),
feat_stride_(OperatorBase::GetSingleArg<int>("feat_stride", -1)),
pre_nms_topn_(OperatorBase::GetSingleArg<int>("pre_nms_topn", 12000)),
post_nms_topn_(OperatorBase::GetSingleArg<int>("post_nms_topn", 2000)),
nms_thresh_(OperatorBase::GetSingleArg<float>("nms_thresh", (float)0.7)),
Operator<Context>(op_def, ws) { Setup(); }
void Setup();
void RunOnDevice() override;
template <typename T> void RunWithType();
protected:
int min_size_, base_size_, feat_stride_;
int pre_nms_topn_, post_nms_topn_;
float nms_thresh_;
Tensor anchors_, roi_indices_, proposals_, nms_mask_;
};
} // namespace dragon
#endif // DRAGON_OPERATORS_MISC_COMPARE_OP_H_
\ No newline at end of file
...@@ -64,6 +64,7 @@ Custom ...@@ -64,6 +64,7 @@ Custom
operators/custom/data_process operators/custom/data_process
operators/custom/vec_mult operators/custom/vec_mult
========================================= ===================================================================== ========================================= =====================================================================
List Brief List Brief
========================================= ===================================================================== ========================================= =====================================================================
...@@ -73,6 +74,19 @@ List Brief ...@@ -73,6 +74,19 @@ List Brief
========================================= ===================================================================== ========================================= =====================================================================
Contrib
-------
.. toctree::
:hidden:
operators/contrib/rcnn
========================================= =====================================================================
List Brief
========================================= =====================================================================
`dragon.operators.contrib.rcnn`_ Contrib ops for R-CNN.
========================================= =====================================================================
.. _dragon.operators.data: operators/data.html .. _dragon.operators.data: operators/data.html
...@@ -91,4 +105,6 @@ List Brief ...@@ -91,4 +105,6 @@ List Brief
.. _dragon.io: io.html .. _dragon.io: io.html
.. _dragon.operators.custom.minibatch: operators/custom/minibatch.html .. _dragon.operators.custom.minibatch: operators/custom/minibatch.html
.. _dragon.operators.custom.data_process: operators/custom/data_process.html .. _dragon.operators.custom.data_process: operators/custom/data_process.html
.. _dragon.operators.custom.vec_mult: operators/custom/vec_mult.html .. _dragon.operators.custom.vec_mult: operators/custom/vec_mult.html
\ No newline at end of file .. _dragon.operators.contrib.rcnn: operators/contrib/rcnn.html
============
:mod:`R-CNN`
============
.. toctree::
:hidden:
.. automodule:: dragon.operators.contrib.rcnn.ops
:members:
\ No newline at end of file
...@@ -35,8 +35,8 @@ List Brief ...@@ -35,8 +35,8 @@ List Brief
`Conv2d`_ 2d Convolution. `Conv2d`_ 2d Convolution.
`Conv2dTranspose`_ 2d Deconvolution. `Conv2dTranspose`_ 2d Deconvolution.
`Pool2d`_ 2d Pooling, MAX or AVG. `Pool2d`_ 2d Pooling, MAX or AVG.
`ROIPooling`_ ROIPoolin(MAX), introduced by `[Girshick, 2015] <https://arxiv.org/abs/1504.08083>`_. `ROIPooling`_ ROIPooling(MAX), introduced by `[Girshick, 2015] <https://arxiv.org/abs/1504.08083>`_.
`ROIAlign`_ ROIAlign(MAX), introduced by `[He et.al, 2017] <https://arxiv.org/abs/1703.06870>`_. `ROIAlign`_ ROIAlign(AVG), introduced by `[He et.al, 2017] <https://arxiv.org/abs/1703.06870>`_.
`LRN`_ Local Response Normalization, introduced by `[Krizhevsky et.al, 2012] <http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks>`_. `LRN`_ Local Response Normalization, introduced by `[Krizhevsky et.al, 2012] <http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks>`_.
`NNResize`_ Resize the image with Nearest-Neighbor method. `NNResize`_ Resize the image with Nearest-Neighbor method.
`BilinearResize`_ Resize the image with Bi-linear method. `BilinearResize`_ Resize the image with Bi-linear method.
...@@ -167,6 +167,14 @@ List Brief ...@@ -167,6 +167,14 @@ List Brief
`Proposal`_ Generate Regional Proposals, introduced by `[Ren et.al, 2015] <https://arxiv.org/abs/1506.01497>`_. `Proposal`_ Generate Regional Proposals, introduced by `[Ren et.al, 2015] <https://arxiv.org/abs/1506.01497>`_.
================= ====================================================================== ================= ======================================================================
Contrib
-------
================= ======================================================================
List Brief
================= ======================================================================
`Proposal`_ Generate Regional Proposals, introduced by `[Ren et.al, 2015] <https://arxiv.org/abs/1506.01497>`_.
================= ======================================================================
Cast Cast
---- ----
================= ====================================================================== ================= ======================================================================
...@@ -279,7 +287,8 @@ List Brief ...@@ -279,7 +287,8 @@ List Brief
.. _Accuracy: operators/misc.html#dragon.operators.misc.Accuracy .. _Accuracy: operators/misc.html#dragon.operators.misc.Accuracy
.. _StopGradient: operators/misc.html#dragon.operators.misc.StopGradient .. _StopGradient: operators/misc.html#dragon.operators.misc.StopGradient
.. _MovingAverage: operators/misc.html#dragon.operators.misc.MovingAverage .. _MovingAverage: operators/misc.html#dragon.operators.misc.MovingAverage
.. _Proposal: operators/misc.html#dragon.operators.misc.Proposal
.. _Proposal: operators/contrib/rcnn.html#dragon.operators.contrib.rcnn.ops.Proposal
.. _FloatToHalf: operators/cast.html#dragon.operators.misc.FloatToHalf .. _FloatToHalf: operators/cast.html#dragon.operators.misc.FloatToHalf
......
...@@ -282,6 +282,8 @@ API Reference ...@@ -282,6 +282,8 @@ API Reference
.. _NormalizeParameter.scale_filler: https://github.com/weiliu89/caffe/blob/f5eac041aafbc8b86954bd161710f65e70042ce6/src/caffe/proto/caffe.proto#L1332 .. _NormalizeParameter.scale_filler: https://github.com/weiliu89/caffe/blob/f5eac041aafbc8b86954bd161710f65e70042ce6/src/caffe/proto/caffe.proto#L1332
.. _NormalizeParameter.channel_shared: https://github.com/weiliu89/caffe/blob/f5eac041aafbc8b86954bd161710f65e70042ce6/src/caffe/proto/caffe.proto#L1334 .. _NormalizeParameter.channel_shared: https://github.com/weiliu89/caffe/blob/f5eac041aafbc8b86954bd161710f65e70042ce6/src/caffe/proto/caffe.proto#L1334
.. _NormalizeParameter.eps: https://github.com/weiliu89/caffe/blob/f5eac041aafbc8b86954bd161710f65e70042ce6/src/caffe/proto/caffe.proto#L1336 .. _NormalizeParameter.eps: https://github.com/weiliu89/caffe/blob/f5eac041aafbc8b86954bd161710f65e70042ce6/src/caffe/proto/caffe.proto#L1336
.. _ReductionParameter.operation: https://github.com/BVLC/caffe/blob/effcdb0b62410b2a6a54f18f23cf90733a115673/src/caffe/proto/caffe.proto#L973
.. _ReductionParameter.axis: https://github.com/BVLC/caffe/blob/effcdb0b62410b2a6a54f18f23cf90733a115673/src/caffe/proto/caffe.proto#L988
.. _TileParameter.multiples: https://github.com/neopenx/Dragon/blob/6eeac5fec58ed3d0d79f0b4003471e4a641c72f4/Dragon/python/dragon/vm/caffe/proto/caffe.proto#L1173 .. _TileParameter.multiples: https://github.com/neopenx/Dragon/blob/6eeac5fec58ed3d0d79f0b4003471e4a641c72f4/Dragon/python/dragon/vm/caffe/proto/caffe.proto#L1173
.. _ExpandDimsParameter.axis: https://github.com/neopenx/Dragon/blob/6eeac5fec58ed3d0d79f0b4003471e4a641c72f4/Dragon/python/dragon/vm/caffe/proto/caffe.proto#L1480 .. _ExpandDimsParameter.axis: https://github.com/neopenx/Dragon/blob/6eeac5fec58ed3d0d79f0b4003471e4a641c72f4/Dragon/python/dragon/vm/caffe/proto/caffe.proto#L1480
.. _ProposalParameter.feat_stride: https://github.com/sanghoon/caffe/blob/6068dd04ea93cca9fcee036628fdb3ea95b4ebcd/src/caffe/proto/caffe.proto#L431 .. _ProposalParameter.feat_stride: https://github.com/sanghoon/caffe/blob/6068dd04ea93cca9fcee036628fdb3ea95b4ebcd/src/caffe/proto/caffe.proto#L431
......
# --------------------------------------------------------
# Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# R-CNN ops
from dragon.operators.contrib.rcnn.ops import *
# --------------------------------------------------------
# Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
\ No newline at end of file
# --------------------------------------------------------
# Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dragon.operators import *
def Proposal(inputs, strides, ratios, scales,
pre_nms_top_n=6000, post_nms_top_n=300,
nms_thresh=0.7, min_size=16,
min_level=2, max_level=5,
canonical_scale=224, canonical_level=4, **kwargs):
"""Generate Regional Proposals, introduced by `[Ren et.al, 2015] <https://arxiv.org/abs/1506.01497>`_.
Multi-Level proposals was introduced by `[Lin et.al, 2017] <https://arxiv.org/abs/1612.03144>`_.
For single level proposals(e.g. C4), the inputs should be: [cls_probs, bbox_deltas, im_info].
For multiple level proposals(e.g. FPN), the inputs should be: [cls_score/Px, ...] + [cls_probs, bbox_deltas, im_info].
Parameters
----------
inputs : list of Tensor
The inputs.
strides : list of int
The strides of anchors.
ratios : list of float
The ratios of anchors.
scales : list of float
The scales of anchors.
pre_nms_top_n : int
The number of anchors before nms.
post_nms_top_n : int
The number of anchors after nms.
nms_thresh : float
The threshold of nms.
min_size : int
The min size of anchors.
min_level : int
Finest level of the FPN pyramid.
max_level : int
Coarsest level of the FPN pyramid.
canonical_scale : int
The baseline scale of mapping policy.
canonical_level : int
Heuristic level of the canonical scale.
Returns
-------
Tensor
The proposals.
"""
CheckInputs(inputs, 3, INT_MAX)
arguments = ParseArguments(locals())
num_levels = (max_level - min_level) + 1
num_levels = 1 if len(inputs) == 3 else num_levels
outputs = Tensor.CreateOperator(nout=num_levels, op_type='Proposal', **arguments)
return outputs
\ No newline at end of file
...@@ -157,44 +157,4 @@ def MovingAverage(inputs, decay, **kwargs): ...@@ -157,44 +157,4 @@ def MovingAverage(inputs, decay, **kwargs):
output = Tensor.CreateOperator(op_type='MovingAverage', output = Tensor.CreateOperator(op_type='MovingAverage',
existing_outputs=variable, **arguments) existing_outputs=variable, **arguments)
return output
def Proposal(inputs, ratios, scales,
base_size=16, min_size=16, feat_stride=16,
pre_nms_topn=12000, post_nms_topn=2000, nms_thresh=0.7, **kwargs):
"""Generate Regional Proposals, introduced by `[Ren et.al, 2015] <https://arxiv.org/abs/1506.01497>`_.
Parameters
----------
inputs : list of Tensor
The inputs, represent [input, anchors, im_info].
ratios : list of float
The ratios of anchors.
scales : list of float
The scales of anchors.
base_size : int
The base size of anchors.
min_size : int
The min size of anchors.
feat_stride : int
The stride of input. Default is ``16`` (The 4th down-samples).
pre_nms_topn : int
The number of anchors before nms.
post_nms_topn : int
The number of anchors after nms.
nms_thresh : float
The threshold of nms.
Returns
=------
Tensor
The proposals.
"""
CheckInputs(inputs, 3)
arguments = ParseArguments(locals())
output = Tensor.CreateOperator(nout=1, op_type='Proposal', **arguments)
return output return output
\ No newline at end of file
...@@ -272,9 +272,14 @@ def Reduce(inputs, axis=-1, operation='NONE', keep_dims=False, **kwargs): ...@@ -272,9 +272,14 @@ def Reduce(inputs, axis=-1, operation='NONE', keep_dims=False, **kwargs):
output = Tensor.CreateOperator(nout=1, op_type='Reduce', **arguments) output = Tensor.CreateOperator(nout=1, op_type='Reduce', **arguments)
if inputs.shape is not None: if inputs.shape is not None:
if axis == -1: output.shape = [1] output.shape = inputs.shape[:]
if axis == -1:
if keep_dims:
for i in xrange(len(output.shape)):
output.shape[i] = 1
else: output.shape = [1]
else: else:
output.shape = inputs.shape[:]
if keep_dims: output.shape[axis] = 1 if keep_dims: output.shape[axis] = 1
else: del output.shape[axis] else: del output.shape[axis]
......
...@@ -329,7 +329,7 @@ def ROIPooling(inputs, pool_h, pool_w, spatial_scale, **kwargs): ...@@ -329,7 +329,7 @@ def ROIPooling(inputs, pool_h, pool_w, spatial_scale, **kwargs):
def ROIAlign(inputs, pool_h=0, pool_w=0, spatial_scale=1.0, sampling_ratio=2, **kwargs): def ROIAlign(inputs, pool_h=0, pool_w=0, spatial_scale=1.0, sampling_ratio=2, **kwargs):
"""Max ROIAlign, introduced by `[He et.al, 2017] <https://arxiv.org/abs/1703.06870>`_. """AVG ROIAlign, introduced by `[He et.al, 2017] <https://arxiv.org/abs/1703.06870>`_.
The first dimension of input must be ``1``. The first dimension of input must be ``1``.
......
...@@ -21,6 +21,7 @@ from .operators import mpi ...@@ -21,6 +21,7 @@ from .operators import mpi
from .operators import ndarray from .operators import ndarray
from .operators import norm from .operators import norm
from .operators import recurrent from .operators import recurrent
from .operators import contrib
# data # data
LMDBData = data.LMDBData LMDBData = data.LMDBData
...@@ -128,11 +129,13 @@ Template = misc.Template ...@@ -128,11 +129,13 @@ Template = misc.Template
Accuracy = misc.Accuracy Accuracy = misc.Accuracy
StopGradient = misc.StopGradient StopGradient = misc.StopGradient
MovingAverage = misc.MovingAverage MovingAverage = misc.MovingAverage
Proposal = misc.Proposal
# cast # cast
FloatToHalf = cast.FloatToHalf FloatToHalf = cast.FloatToHalf
# mpi # mpi
MPIBroadcast = mpi.MPIBroadcast MPIBroadcast = mpi.MPIBroadcast
MPIGather = mpi.MPIGather MPIGather = mpi.MPIGather
\ No newline at end of file
# contrib
Proposal = contrib.Proposal # R-CNN
\ No newline at end of file
...@@ -272,7 +272,7 @@ class GatherLayer(Layer): ...@@ -272,7 +272,7 @@ class GatherLayer(Layer):
Parameters Parameters
---------- ----------
axis : int axis : int
The axis for gathering. Refer `GatherParameter.axis`_. The axis for gathering. Refer ``GatherParameter.axis``.
""" """
def __init__(self, LayerParameter): def __init__(self, LayerParameter):
...@@ -623,35 +623,44 @@ class ProposalLayer(Layer): ...@@ -623,35 +623,44 @@ class ProposalLayer(Layer):
Parameters Parameters
---------- ----------
feat_stride : int stride : list of int
The stride of input. Refer `ProposalParameter.feat_stride`_. The stride of anchors. Refer ``ProposalParameter.stride``.
base_size : int
The base size of anchors. Refer `ProposalParameter.base_size`_.
min_size : int
The min size of anchors. Refer `ProposalParameter.min_size`_.
ratio : list of float
The ratios of anchors. Refer `ProposalParameter.ratio`_.
scale : list of float scale : list of float
The scales of anchors. Refer `ProposalParameter.scale`_. The scales of anchors. Refer `ProposalParameter.scale`_.
pre_nms_topn : int ratio : list of float
The num of anchors before nms. Refer `ProposalParameter.pre_nms_topn`_. The ratios of anchors. Refer `ProposalParameter.ratio`_.
post_nms_topn : int pre_nms_top_n : int
The num of anchors before nms. Refer `ProposalParameter.pre_nms_topn`_.
post_nms_top_n : int
The num of anchors after nms. Refer `ProposalParameter.post_nms_topn`_. The num of anchors after nms. Refer `ProposalParameter.post_nms_topn`_.
nms_thresh : float nms_thresh : float
The threshold of nms. Refer `ProposalParameter.nms_thresh`_. The threshold of nms. Refer `ProposalParameter.nms_thresh`_.
min_size : int
The min size of anchors. Refer `ProposalParameter.min_size`_.
min_level : int
Finest level of the FPN pyramid. Refer ``ProposalParameter.min_level``.
max_level : int
Coarsest level of the FPN pyramid. Refer ``ProposalParameter.max_level``.
canonical_scale : int
The baseline scale of mapping policy. Refer ``ProposalParameter.canonical_scale``.
canonical_level : int
Heuristic level of the canonical scale. Refer ``ProposalParameter.canonical_level``.
""" """
def __init__(self, LayerParameter): def __init__(self, LayerParameter):
super(ProposalLayer, self).__init__(LayerParameter) super(ProposalLayer, self).__init__(LayerParameter)
param = LayerParameter.proposal_param param = LayerParameter.proposal_param
self._param = {'base_size': param.base_size, self._param = {'strides': param.stride,
'min_size': param.min_size,
'feat_stride': param.feat_stride,
'pre_nms_topn': param.pre_nms_topn,
'post_nms_topn': param.post_nms_topn,
'nms_thresh': param.nms_thresh,
'ratios': param.ratio, 'ratios': param.ratio,
'scales': param.scale} 'scales': param.scale,
'pre_nms_top_n': param.pre_nms_top_n,
'post_nms_top_n': param.post_nms_top_n,
'nms_thresh': param.nms_thresh,
'min_size': param.min_size,
'min_level': param.min_level,
'max_level': param.max_level,
'canonical_scale': param.canonical_scale,
'canonical_level': param.canonical_level}
def Setup(self, bottom): def Setup(self, bottom):
super(ProposalLayer, self).Setup(bottom) super(ProposalLayer, self).Setup(bottom)
......
...@@ -1474,14 +1474,17 @@ message ExpandDimsParameter { ...@@ -1474,14 +1474,17 @@ message ExpandDimsParameter {
} }
message ProposalParameter { message ProposalParameter {
optional uint32 feat_stride = 1 [default = 16]; repeated int32 stride = 1;
optional uint32 base_size = 2 [default = 16]; repeated float ratio = 2;
optional uint32 min_size = 3 [default = 16]; repeated float scale = 3;
repeated float ratio = 4; optional uint32 pre_nms_top_n = 4 [default = 6000];
repeated float scale = 5; optional uint32 post_nms_top_n = 5 [default = 300];
optional uint32 pre_nms_topn = 6 [default = 6000]; optional float nms_thresh = 6 [default = 0.7];
optional uint32 post_nms_topn = 7 [default = 300]; optional uint32 min_size = 7 [default = 16];
optional float nms_thresh = 8 [default = 0.7]; optional int32 min_level = 8 [default = 2];
optional int32 max_level = 9 [default = 5];
optional int32 canonical_scale = 10 [default = 224];
optional int32 canonical_level = 11 [default = 4];
} }
message BatchRenormParameter { message BatchRenormParameter {
......
...@@ -19,7 +19,7 @@ _sym_db = _symbol_database.Default() ...@@ -19,7 +19,7 @@ _sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor( DESCRIPTOR = _descriptor.FileDescriptor(
name='caffe.proto', name='caffe.proto',
package='caffe', package='caffe',
serialized_pb=_b('\n\x0b\x63\x61\x66\x66\x65.proto\x12\x05\x63\x61\x66\x66\x65\"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xcc\x01\n\tBlobProto\x12\x1f\n\x05shape\x18\x07 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_data\x18\x08 \x03(\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_diff\x18\t \x03(\x01\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30\"2\n\x0f\x42lobProtoVector\x12\x1f\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\x10.caffe.BlobProto\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse\"\x8a\x02\n\x0f\x46illerParameter\x12\x16\n\x04type\x18\x01 \x01(\t:\x08\x63onstant\x12\x10\n\x05value\x18\x02 \x01(\x02:\x01\x30\x12\x0e\n\x03min\x18\x03 \x01(\x02:\x01\x30\x12\x0e\n\x03max\x18\x04 \x01(\x02:\x01\x31\x12\x0f\n\x04mean\x18\x05 \x01(\x02:\x01\x30\x12\x0e\n\x03std\x18\x06 \x01(\x02:\x01\x31\x12\x12\n\x06sparse\x18\x07 \x01(\x05:\x02-1\x12\x42\n\rvariance_norm\x18\x08 \x01(\x0e\x32#.caffe.FillerParameter.VarianceNorm:\x06\x46\x41N_IN\"4\n\x0cVarianceNorm\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x41VERAGE\x10\x02\"\x8e\x02\n\x0cNetParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12%\n\x0binput_shape\x18\x08 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x11\n\tinput_dim\x18\x04 \x03(\x05\x12\x1d\n\x0e\x66orce_backward\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x05state\x18\x06 \x01(\x0b\x32\x0f.caffe.NetState\x12\x19\n\ndebug_info\x18\x07 \x01(\x08:\x05\x66\x61lse\x12$\n\x05layer\x18\x64 \x03(\x0b\x32\x15.caffe.LayerParameter\x12\'\n\x06layers\x18\x02 \x03(\x0b\x32\x17.caffe.V1LayerParameter\"\xc9\n\n\x0fSolverParameter\x12\x0b\n\x03net\x18\x18 \x01(\t\x12&\n\tnet_param\x18\x19 \x01(\x0b\x32\x13.caffe.NetParameter\x12\x11\n\ttrain_net\x18\x01 \x01(\t\x12\x10\n\x08test_net\x18\x02 \x03(\t\x12,\n\x0ftrain_net_param\x18\x15 \x01(\x0b\x32\x13.caffe.NetParameter\x12+\n\x0etest_net_param\x18\x16 \x03(\x0b\x32\x13.caffe.NetParameter\x12$\n\x0btrain_state\x18\x1a \x01(\x0b\x32\x0f.caffe.NetState\x12#\n\ntest_state\x18\x1b \x03(\x0b\x32\x0f.caffe.NetState\x12\x11\n\ttest_iter\x18\x03 \x03(\x05\x12\x18\n\rtest_interval\x18\x04 \x01(\x05:\x01\x30\x12 \n\x11test_compute_loss\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x13test_initialization\x18 \x01(\x08:\x04true\x12\x0f\n\x07\x62\x61se_lr\x18\x05 \x01(\x02\x12\x10\n\x08stage_lr\x18\x32 \x03(\x02\x12\x12\n\nstage_iter\x18\x33 \x03(\x05\x12\x0f\n\x07\x64isplay\x18\x06 \x01(\x05\x12\x17\n\x0c\x61verage_loss\x18! \x01(\x05:\x01\x31\x12\x10\n\x08max_iter\x18\x07 \x01(\x05\x12\x14\n\titer_size\x18$ \x01(\x05:\x01\x31\x12\x11\n\tlr_policy\x18\x08 \x01(\t\x12\r\n\x05gamma\x18\t \x01(\x02\x12\r\n\x05power\x18\n \x01(\x02\x12\x10\n\x08momentum\x18\x0b \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0c \x01(\x02\x12\x1f\n\x13regularization_type\x18\x1d \x01(\t:\x02L2\x12\x10\n\x08stepsize\x18\r \x01(\x05\x12\x11\n\tstepvalue\x18\" \x03(\x05\x12\x1a\n\x0e\x63lip_gradients\x18# \x01(\x02:\x02-1\x12\x13\n\x08snapshot\x18\x0e \x01(\x05:\x01\x30\x12\x17\n\x0fsnapshot_prefix\x18\x0f \x01(\t\x12\x1c\n\rsnapshot_diff\x18\x10 \x01(\x08:\x05\x66\x61lse\x12K\n\x0fsnapshot_format\x18% \x01(\x0e\x32%.caffe.SolverParameter.SnapshotFormat:\x0b\x42INARYPROTO\x12;\n\x0bsolver_mode\x18\x11 \x01(\x0e\x32!.caffe.SolverParameter.SolverMode:\x03GPU\x12\x14\n\tdevice_id\x18\x12 \x01(\x05:\x01\x30\x12\x17\n\x0brandom_seed\x18\x14 \x01(\x03:\x02-1\x12\x11\n\x04type\x18( \x01(\t:\x03SGD\x12\x15\n\x05\x64\x65lta\x18\x1f \x01(\x02:\x06\x31\x65-008\x12\x18\n\tmomentum2\x18\' \x01(\x02:\x05\x30.999\x12\x17\n\trms_decay\x18& \x01(\x02:\x04\x30.99\x12\x19\n\ndebug_info\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\"\n\x14snapshot_after_train\x18\x1c \x01(\x08:\x04true\x12;\n\x0bsolver_type\x18\x1e \x01(\x0e\x32!.caffe.SolverParameter.SolverType:\x03SGD\"+\n\x0eSnapshotFormat\x12\x08\n\x04HDF5\x10\x00\x12\x0f\n\x0b\x42INARYPROTO\x10\x01\"\x1e\n\nSolverMode\x12\x07\n\x03\x43PU\x10\x00\x12\x07\n\x03GPU\x10\x01\"U\n\nSolverType\x12\x07\n\x03SGD\x10\x00\x12\x0c\n\x08NESTEROV\x10\x01\x12\x0b\n\x07\x41\x44\x41GRAD\x10\x02\x12\x0b\n\x07RMSPROP\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45LTA\x10\x04\x12\x08\n\x04\x41\x44\x41M\x10\x05\"l\n\x0bSolverState\x12\x0c\n\x04iter\x18\x01 \x01(\x05\x12\x13\n\x0blearned_net\x18\x02 \x01(\t\x12!\n\x07history\x18\x03 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x17\n\x0c\x63urrent_step\x18\x04 \x01(\x05:\x01\x30\"N\n\x08NetState\x12!\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase:\x04TEST\x12\x10\n\x05level\x18\x02 \x01(\x05:\x01\x30\x12\r\n\x05stage\x18\x03 \x03(\t\"\x85\x01\n\x0cNetStateRule\x12\x1b\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase\x12\x11\n\tmin_level\x18\x02 \x01(\x05\x12\x11\n\tmax_level\x18\x03 \x01(\x05\x12\r\n\x05stage\x18\x04 \x03(\t\x12\x11\n\tnot_stage\x18\x05 \x03(\t\x12\x10\n\x08mpi_rank\x18\x06 \x03(\r\"\xa3\x01\n\tParamSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\nshare_mode\x18\x02 \x01(\x0e\x32\x1d.caffe.ParamSpec.DimCheckMode\x12\x12\n\x07lr_mult\x18\x03 \x01(\x02:\x01\x31\x12\x15\n\ndecay_mult\x18\x04 \x01(\x02:\x01\x31\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\x95\x19\n\x0eLayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06\x62ottom\x18\x03 \x03(\t\x12\x0b\n\x03top\x18\x04 \x03(\t\x12\x1c\n\x0cmirror_stage\x18\xa2\x01 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x05phase\x18\n \x01(\x0e\x32\x0c.caffe.Phase\x12\x13\n\x0bloss_weight\x18\x05 \x03(\x02\x12\x1f\n\x05param\x18\x06 \x03(\x0b\x32\x10.caffe.ParamSpec\x12\x1f\n\x05\x62lobs\x18\x07 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x16\n\x0epropagate_down\x18\x0b \x03(\x08\x12$\n\x07include\x18\x08 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18\t \x03(\x0b\x32\x13.caffe.NetStateRule\x12\x37\n\x0ftransform_param\x18\x64 \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18\x65 \x01(\x0b\x32\x14.caffe.LossParameter\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x66 \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18g \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12\x34\n\x10\x62\x61tch_norm_param\x18\x8b\x01 \x01(\x0b\x32\x19.caffe.BatchNormParameter\x12)\n\nbias_param\x18\x8d\x01 \x01(\x0b\x32\x14.caffe.BiasParameter\x12,\n\x0c\x63oncat_param\x18h \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18i \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18j \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12)\n\ncrop_param\x18\x90\x01 \x01(\x0b\x32\x14.caffe.CropParameter\x12(\n\ndata_param\x18k \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18l \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18m \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18n \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12\'\n\telu_param\x18\x8c\x01 \x01(\x0b\x32\x13.caffe.ELUParameter\x12+\n\x0b\x65mbed_param\x18\x89\x01 \x01(\x0b\x32\x15.caffe.EmbedParameter\x12&\n\texp_param\x18o \x01(\x0b\x32\x13.caffe.ExpParameter\x12/\n\rflatten_param\x18\x87\x01 \x01(\x0b\x32\x17.caffe.FlattenParameter\x12\x31\n\x0fhdf5_data_param\x18p \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18q \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18r \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18s \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18t \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18u \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12+\n\x0binput_param\x18\x8f\x01 \x01(\x0b\x32\x15.caffe.InputParameter\x12\'\n\tlog_param\x18\x86\x01 \x01(\x0b\x32\x13.caffe.LogParameter\x12&\n\tlrn_param\x18v \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18w \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18x \x01(\x0b\x32\x13.caffe.MVNParameter\x12\x33\n\x0fparameter_param\x18\x91\x01 \x01(\x0b\x32\x19.caffe.ParameterParameter\x12.\n\rpooling_param\x18y \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18z \x01(\x0b\x32\x15.caffe.PowerParameter\x12+\n\x0bprelu_param\x18\x83\x01 \x01(\x0b\x32\x15.caffe.PReLUParameter\x12-\n\x0cpython_param\x18\x82\x01 \x01(\x0b\x32\x16.caffe.PythonParameter\x12\x33\n\x0freduction_param\x18\x88\x01 \x01(\x0b\x32\x19.caffe.ReductionParameter\x12(\n\nrelu_param\x18{ \x01(\x0b\x32\x14.caffe.ReLUParameter\x12/\n\rreshape_param\x18\x85\x01 \x01(\x0b\x32\x17.caffe.ReshapeParameter\x12+\n\x0bscale_param\x18\x8e\x01 \x01(\x0b\x32\x15.caffe.ScaleParameter\x12.\n\rsigmoid_param\x18| \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18} \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12\'\n\tspp_param\x18\x84\x01 \x01(\x0b\x32\x13.caffe.SPPParameter\x12*\n\x0bslice_param\x18~ \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18\x7f \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x33\n\x0fthreshold_param\x18\x80\x01 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12)\n\ntile_param\x18\x8a\x01 \x01(\x0b\x32\x14.caffe.TileParameter\x12\x36\n\x11window_data_param\x18\x81\x01 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x36\n\x11roi_pooling_param\x18\x97\x01 \x01(\x0b\x32\x1a.caffe.ROIPoolingParameter\x12;\n\x14smooth_l1_loss_param\x18\x98\x01 \x01(\x0b\x32\x1c.caffe.SmoothL1LossParameter\x12\'\n\tmpi_param\x18\x99\x01 \x01(\x0b\x32\x13.caffe.MPIParameter\x12/\n\rpermute_param\x18\x9a\x01 \x01(\x0b\x32\x17.caffe.PermuteParameter\x12\x33\n\x0fnormalize_param\x18\x9b\x01 \x01(\x0b\x32\x19.caffe.NormalizeParameter\x12\x31\n\x0eparallel_param\x18\x9d\x01 \x01(\x0b\x32\x18.caffe.ParallelParameter\x12-\n\x0cresize_param\x18\x9e\x01 \x01(\x0b\x32\x16.caffe.ResizeParameter\x12\x36\n\x11\x65xpand_dims_param\x18\x9f\x01 \x01(\x0b\x32\x1a.caffe.ExpandDimsParameter\x12\x31\n\x0eproposal_param\x18\xa0\x01 \x01(\x0b\x32\x18.caffe.ProposalParameter\x12\x38\n\x12\x62\x61tch_renorm_param\x18\xa1\x01 \x01(\x0b\x32\x1b.caffe.BatchRenormParameter\x12\x38\n\x12\x64\x65nse_concat_param\x18\xa3\x01 \x01(\x0b\x32\x1b.caffe.DenseConcatParameter\x12\x34\n\x10\x66ocal_loss_param\x18\xa4\x01 \x01(\x0b\x32\x19.caffe.FocalLossParameter\x12-\n\x0cgather_param\x18\xa5\x01 \x01(\x0b\x32\x16.caffe.GatherParameter\"\xa7\x02\n\x17TransformationParameter\x12\x10\n\x05scale\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\x06mirror\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\tcrop_size\x18\x03 \x01(\r:\x01\x30\x12\x12\n\x07padding\x18\x0b \x01(\r:\x01\x30\x12\x11\n\tmean_file\x18\x04 \x01(\t\x12\x12\n\nmean_value\x18\x05 \x03(\x02\x12\x1a\n\x0b\x66orce_color\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nforce_gray\x18\x07 \x01(\x08:\x05\x66\x61lse\x12!\n\x12\x63olor_augmentation\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x10min_random_scale\x18\t \x01(\x02:\x01\x31\x12\x1b\n\x10max_random_scale\x18\n \x01(\x02:\x01\x31\"\xf5\x01\n\rLossParameter\x12\x14\n\x0cignore_label\x18\x01 \x01(\x05\x12\x44\n\rnormalization\x18\x03 \x01(\x0e\x32&.caffe.LossParameter.NormalizationMode:\x05VALID\x12\x11\n\tnormalize\x18\x02 \x01(\x08\x1a\'\n\x13\x45xpandDimsParameter\x12\x10\n\x04\x61xis\x18\x01 \x01(\x05:\x02-1\"L\n\x11NormalizationMode\x12\x08\n\x04\x46ULL\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x0e\n\nBATCH_SIZE\x10\x02\x12\x08\n\x04NONE\x10\x03\x12\x08\n\x04UNIT\x10\x04\"L\n\x11\x41\x63\x63uracyParameter\x12\x10\n\x05top_k\x18\x01 \x01(\r:\x01\x31\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x14\n\x0cignore_label\x18\x03 \x01(\x05\"M\n\x0f\x41rgMaxParameter\x12\x1a\n\x0bout_max_val\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05top_k\x18\x02 \x01(\r:\x01\x31\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x05\"9\n\x0f\x43oncatParameter\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x15\n\nconcat_dim\x18\x01 \x01(\r:\x01\x31\"h\n\x12\x42\x61tchNormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12$\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x03\x30.9\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x30.001\"]\n\rBiasParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\"L\n\x18\x43ontrastiveLossParameter\x12\x11\n\x06margin\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x0elegacy_version\x18\x02 \x01(\x08:\x05\x66\x61lse\"\xfc\x03\n\x14\x43onvolutionParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x0b\n\x03pad\x18\x03 \x03(\r\x12\x13\n\x0bkernel_size\x18\x04 \x03(\r\x12\x0e\n\x06stride\x18\x06 \x03(\r\x12\x10\n\x08\x64ilation\x18\x12 \x03(\r\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x10\n\x08kernel_h\x18\x0b \x01(\r\x12\x10\n\x08kernel_w\x18\x0c \x01(\r\x12\x10\n\x08stride_h\x18\r \x01(\r\x12\x10\n\x08stride_w\x18\x0e \x01(\r\x12\x10\n\x05group\x18\x05 \x01(\r:\x01\x31\x12-\n\rweight_filler\x18\x07 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x08 \x01(\x0b\x32\x16.caffe.FillerParameter\x12;\n\x06\x65ngine\x18\x0f \x01(\x0e\x32\".caffe.ConvolutionParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x10 \x01(\x05:\x01\x31\x12\x1e\n\x0f\x66orce_nd_im2col\x18\x11 \x01(\x08:\x05\x66\x61lse\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"0\n\rCropParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x32\x12\x0e\n\x06offset\x18\x02 \x03(\r\"\xa4\x02\n\rDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x31\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0e\x32\x17.caffe.DataParameter.DB:\x07LEVELDB\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_encoded_color\x18\t \x01(\x08:\x05\x66\x61lse\x12\x13\n\x08prefetch\x18\n \x01(\r:\x01\x35\"\x1b\n\x02\x44\x42\x12\x0b\n\x07LEVELDB\x10\x00\x12\x08\n\x04LMDB\x10\x01\"I\n\x10\x44ropoutParameter\x12\x1a\n\rdropout_ratio\x18\x01 \x01(\x02:\x03\x30.5\x12\x19\n\x0bscale_train\x18\x02 \x01(\x08:\x04true\"\xa0\x01\n\x12\x44ummyDataParameter\x12+\n\x0b\x64\x61ta_filler\x18\x01 \x03(\x0b\x32\x16.caffe.FillerParameter\x12\x1f\n\x05shape\x18\x06 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x0b\n\x03num\x18\x02 \x03(\r\x12\x10\n\x08\x63hannels\x18\x03 \x03(\r\x12\x0e\n\x06height\x18\x04 \x03(\r\x12\r\n\x05width\x18\x05 \x03(\r\"\xa5\x01\n\x10\x45ltwiseParameter\x12\x39\n\toperation\x18\x01 \x01(\x0e\x32!.caffe.EltwiseParameter.EltwiseOp:\x03SUM\x12\r\n\x05\x63oeff\x18\x02 \x03(\x02\x12\x1e\n\x10stable_prod_grad\x18\x03 \x01(\x08:\x04true\"\'\n\tEltwiseOp\x12\x08\n\x04PROD\x10\x00\x12\x07\n\x03SUM\x10\x01\x12\x07\n\x03MAX\x10\x02\" \n\x0c\x45LUParameter\x12\x10\n\x05\x61lpha\x18\x01 \x01(\x02:\x01\x31\"\xac\x01\n\x0e\x45mbedParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x11\n\tinput_dim\x18\x02 \x01(\r\x12\x17\n\tbias_term\x18\x03 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"D\n\x0c\x45xpParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"9\n\x10\x46lattenParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x14\n\x08\x65nd_axis\x18\x02 \x01(\x05:\x02-1\"O\n\x11HDF5DataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse\"(\n\x13HDF5OutputParameter\x12\x11\n\tfile_name\x18\x01 \x01(\t\"^\n\x12HingeLossParameter\x12\x30\n\x04norm\x18\x01 \x01(\x0e\x32\x1e.caffe.HingeLossParameter.Norm:\x02L1\"\x16\n\x04Norm\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\"\x97\x02\n\x12ImageDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x15\n\nbatch_size\x18\x04 \x01(\r:\x01\x31\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x16\n\x08is_color\x18\x0b \x01(\x08:\x04true\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\x0c \x01(\t:\x00\"\'\n\x15InfogainLossParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\"\xcb\x01\n\x15InnerProductParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0f\n\x04\x61xis\x18\x05 \x01(\x05:\x01\x31\x12\x18\n\ttranspose\x18\x06 \x01(\x08:\x05\x66\x61lse\"1\n\x0eInputParameter\x12\x1f\n\x05shape\x18\x01 \x03(\x0b\x32\x10.caffe.BlobShape\"D\n\x0cLogParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xb8\x02\n\x0cLRNParameter\x12\x15\n\nlocal_size\x18\x01 \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x02 \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x03 \x01(\x02:\x04\x30.75\x12\x44\n\x0bnorm_region\x18\x04 \x01(\x0e\x32\x1e.caffe.LRNParameter.NormRegion:\x0f\x41\x43ROSS_CHANNELS\x12\x0c\n\x01k\x18\x05 \x01(\x02:\x01\x31\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.LRNParameter.Engine:\x07\x44\x45\x46\x41ULT\"5\n\nNormRegion\x12\x13\n\x0f\x41\x43ROSS_CHANNELS\x10\x00\x12\x12\n\x0eWITHIN_CHANNEL\x10\x01\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xbd\x01\n\x13MemoryDataParameter\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x10\n\x08\x63hannels\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\r\n\x05width\x18\x04 \x01(\r\x12;\n\x05\x64type\x18\x05 \x01(\x0e\x32#.caffe.MemoryDataParameter.DataType:\x07\x46LOAT32\"$\n\x08\x44\x61taType\x12\x0b\n\x07\x46LOAT32\x10\x00\x12\x0b\n\x07\x46LOAT16\x10\x01\"e\n\x0cMVNParameter\x12 \n\x12normalize_variance\x18\x01 \x01(\x08:\x04true\x12\x1e\n\x0f\x61\x63ross_channels\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x13\n\x03\x65ps\x18\x03 \x01(\x02:\x06\x31\x65-009\"5\n\x12ParameterParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\"\xa2\x03\n\x10PoolingParameter\x12\x35\n\x04pool\x18\x01 \x01(\x0e\x32\".caffe.PoolingParameter.PoolMethod:\x03MAX\x12\x0e\n\x03pad\x18\x04 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x02 \x01(\r\x12\x10\n\x08kernel_h\x18\x05 \x01(\r\x12\x10\n\x08kernel_w\x18\x06 \x01(\r\x12\x11\n\x06stride\x18\x03 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\x07 \x01(\r\x12\x10\n\x08stride_w\x18\x08 \x01(\r\x12\x37\n\x06\x65ngine\x18\x0b \x01(\x0e\x32\x1e.caffe.PoolingParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x1d\n\x0eglobal_pooling\x18\x0c \x01(\x08:\x05\x66\x61lse\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Y\n\x13ROIPoolingParameter\x12\x13\n\x08pooled_h\x18\x01 \x01(\r:\x01\x30\x12\x13\n\x08pooled_w\x18\x02 \x01(\r:\x01\x30\x12\x18\n\rspatial_scale\x18\x03 \x01(\x02:\x01\x31\"F\n\x0ePowerParameter\x12\x10\n\x05power\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"g\n\x0fPythonParameter\x12\x0e\n\x06module\x18\x01 \x01(\t\x12\r\n\x05layer\x18\x02 \x01(\t\x12\x13\n\tparam_str\x18\x03 \x01(\t:\x00\x12 \n\x11share_in_parallel\x18\x04 \x01(\x08:\x05\x66\x61lse\"\xad\x01\n\x12ReductionParameter\x12=\n\toperation\x18\x01 \x01(\x0e\x32%.caffe.ReductionParameter.ReductionOp:\x03SUM\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x10\n\x05\x63oeff\x18\x03 \x01(\x02:\x01\x31\"5\n\x0bReductionOp\x12\x07\n\x03SUM\x10\x01\x12\x08\n\x04\x41SUM\x10\x02\x12\t\n\x05SUMSQ\x10\x03\x12\x08\n\x04MEAN\x10\x04\"\x8d\x01\n\rReLUParameter\x12\x19\n\x0enegative_slope\x18\x01 \x01(\x02:\x01\x30\x12\x34\n\x06\x65ngine\x18\x02 \x01(\x0e\x32\x1b.caffe.ReLUParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x10ReshapeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\x08num_axes\x18\x03 \x01(\x05:\x02-1\"\xa5\x01\n\x0eScaleParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x18\n\tbias_term\x18\x04 \x01(\x08:\x05\x66\x61lse\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"x\n\x10SigmoidParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SigmoidParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"L\n\x0eSliceParameter\x12\x0f\n\x04\x61xis\x18\x03 \x01(\x05:\x01\x31\x12\x13\n\x0bslice_point\x18\x02 \x03(\r\x12\x14\n\tslice_dim\x18\x01 \x01(\r:\x01\x31\"\x89\x01\n\x10SoftmaxParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SoftmaxParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"r\n\rTanHParameter\x12\x34\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1b.caffe.TanHParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"T\n\rTileParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\r\n\x05tiles\x18\x02 \x01(\x05\x12#\n\tmultiples\x18\x03 \x01(\x0b\x32\x10.caffe.BlobShape\"*\n\x12ThresholdParameter\x12\x14\n\tthreshold\x18\x01 \x01(\x02:\x01\x30\"\xc1\x02\n\x13WindowDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0c\x66g_threshold\x18\x07 \x01(\x02:\x03\x30.5\x12\x19\n\x0c\x62g_threshold\x18\x08 \x01(\x02:\x03\x30.5\x12\x19\n\x0b\x66g_fraction\x18\t \x01(\x02:\x04\x30.25\x12\x16\n\x0b\x63ontext_pad\x18\n \x01(\r:\x01\x30\x12\x17\n\tcrop_mode\x18\x0b \x01(\t:\x04warp\x12\x1b\n\x0c\x63\x61\x63he_images\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\r \x01(\t:\x00\"\xeb\x01\n\x0cSPPParameter\x12\x16\n\x0epyramid_height\x18\x01 \x01(\r\x12\x31\n\x04pool\x18\x02 \x01(\x0e\x32\x1e.caffe.SPPParameter.PoolMethod:\x03MAX\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.SPPParameter.Engine:\x07\x44\x45\x46\x41ULT\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xe0\x13\n\x10V1LayerParameter\x12\x0e\n\x06\x62ottom\x18\x02 \x03(\t\x12\x0b\n\x03top\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12$\n\x07include\x18 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18! \x03(\x0b\x32\x13.caffe.NetStateRule\x12/\n\x04type\x18\x05 \x01(\x0e\x32!.caffe.V1LayerParameter.LayerType\x12\x1f\n\x05\x62lobs\x18\x06 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x0e\n\x05param\x18\xe9\x07 \x03(\t\x12>\n\x0f\x62lob_share_mode\x18\xea\x07 \x03(\x0e\x32$.caffe.V1LayerParameter.DimCheckMode\x12\x10\n\x08\x62lobs_lr\x18\x07 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x08 \x03(\x02\x12\x13\n\x0bloss_weight\x18# \x03(\x02\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x1b \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18\x17 \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12,\n\x0c\x63oncat_param\x18\t \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18( \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18\n \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12(\n\ndata_param\x18\x0b \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18\x0c \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18\x1a \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18\x18 \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12&\n\texp_param\x18) \x01(\x0b\x32\x13.caffe.ExpParameter\x12\x31\n\x0fhdf5_data_param\x18\r \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18\x0e \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18\x1d \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18\x0f \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18\x10 \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18\x11 \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12&\n\tlrn_param\x18\x12 \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18\x16 \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18\" \x01(\x0b\x32\x13.caffe.MVNParameter\x12.\n\rpooling_param\x18\x13 \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18\x15 \x01(\x0b\x32\x15.caffe.PowerParameter\x12(\n\nrelu_param\x18\x1e \x01(\x0b\x32\x14.caffe.ReLUParameter\x12.\n\rsigmoid_param\x18& \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18\' \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12*\n\x0bslice_param\x18\x1f \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18% \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x32\n\x0fthreshold_param\x18\x19 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12\x35\n\x11window_data_param\x18\x14 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x37\n\x0ftransform_param\x18$ \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18* \x01(\x0b\x32\x14.caffe.LossParameter\x12&\n\x05layer\x18\x01 \x01(\x0b\x32\x17.caffe.V0LayerParameter\"\xd8\x04\n\tLayerType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x41\x42SVAL\x10#\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\n\n\x06\x41RGMAX\x10\x1e\x12\x08\n\x04\x42NLL\x10\x02\x12\n\n\x06\x43ONCAT\x10\x03\x12\x14\n\x10\x43ONTRASTIVE_LOSS\x10%\x12\x0f\n\x0b\x43ONVOLUTION\x10\x04\x12\x08\n\x04\x44\x41TA\x10\x05\x12\x11\n\rDECONVOLUTION\x10\'\x12\x0b\n\x07\x44ROPOUT\x10\x06\x12\x0e\n\nDUMMY_DATA\x10 \x12\x12\n\x0e\x45UCLIDEAN_LOSS\x10\x07\x12\x0b\n\x07\x45LTWISE\x10\x19\x12\x07\n\x03\x45XP\x10&\x12\x0b\n\x07\x46LATTEN\x10\x08\x12\r\n\tHDF5_DATA\x10\t\x12\x0f\n\x0bHDF5_OUTPUT\x10\n\x12\x0e\n\nHINGE_LOSS\x10\x1c\x12\n\n\x06IM2COL\x10\x0b\x12\x0e\n\nIMAGE_DATA\x10\x0c\x12\x11\n\rINFOGAIN_LOSS\x10\r\x12\x11\n\rINNER_PRODUCT\x10\x0e\x12\x07\n\x03LRN\x10\x0f\x12\x0f\n\x0bMEMORY_DATA\x10\x1d\x12\x1d\n\x19MULTINOMIAL_LOGISTIC_LOSS\x10\x10\x12\x07\n\x03MVN\x10\"\x12\x0b\n\x07POOLING\x10\x11\x12\t\n\x05POWER\x10\x1a\x12\x08\n\x04RELU\x10\x12\x12\x0b\n\x07SIGMOID\x10\x13\x12\x1e\n\x1aSIGMOID_CROSS_ENTROPY_LOSS\x10\x1b\x12\x0b\n\x07SILENCE\x10$\x12\x0b\n\x07SOFTMAX\x10\x14\x12\x10\n\x0cSOFTMAX_LOSS\x10\x15\x12\t\n\x05SPLIT\x10\x16\x12\t\n\x05SLICE\x10!\x12\x08\n\x04TANH\x10\x17\x12\x0f\n\x0bWINDOW_DATA\x10\x18\x12\r\n\tTHRESHOLD\x10\x1f\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xfd\x07\n\x10V0LayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x12\n\nnum_output\x18\x03 \x01(\r\x12\x16\n\x08\x62iasterm\x18\x04 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x06 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0e\n\x03pad\x18\x07 \x01(\r:\x01\x30\x12\x12\n\nkernelsize\x18\x08 \x01(\r\x12\x10\n\x05group\x18\t \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\n \x01(\r:\x01\x31\x12\x35\n\x04pool\x18\x0b \x01(\x0e\x32\".caffe.V0LayerParameter.PoolMethod:\x03MAX\x12\x1a\n\rdropout_ratio\x18\x0c \x01(\x02:\x03\x30.5\x12\x15\n\nlocal_size\x18\r \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x0e \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x0f \x01(\x02:\x04\x30.75\x12\x0c\n\x01k\x18\x16 \x01(\x02:\x01\x31\x12\x0e\n\x06source\x18\x10 \x01(\t\x12\x10\n\x05scale\x18\x11 \x01(\x02:\x01\x31\x12\x10\n\x08meanfile\x18\x12 \x01(\t\x12\x11\n\tbatchsize\x18\x13 \x01(\r\x12\x13\n\x08\x63ropsize\x18\x14 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x15 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x05\x62lobs\x18\x32 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x10\n\x08\x62lobs_lr\x18\x33 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x34 \x03(\x02\x12\x14\n\trand_skip\x18\x35 \x01(\r:\x01\x30\x12\x1d\n\x10\x64\x65t_fg_threshold\x18\x36 \x01(\x02:\x03\x30.5\x12\x1d\n\x10\x64\x65t_bg_threshold\x18\x37 \x01(\x02:\x03\x30.5\x12\x1d\n\x0f\x64\x65t_fg_fraction\x18\x38 \x01(\x02:\x04\x30.25\x12\x1a\n\x0f\x64\x65t_context_pad\x18: \x01(\r:\x01\x30\x12\x1b\n\rdet_crop_mode\x18; \x01(\t:\x04warp\x12\x12\n\x07new_num\x18< \x01(\x05:\x01\x30\x12\x17\n\x0cnew_channels\x18= \x01(\x05:\x01\x30\x12\x15\n\nnew_height\x18> \x01(\x05:\x01\x30\x12\x14\n\tnew_width\x18? \x01(\x05:\x01\x30\x12\x1d\n\x0eshuffle_images\x18@ \x01(\x08:\x05\x66\x61lse\x12\x15\n\nconcat_dim\x18\x41 \x01(\r:\x01\x31\x12\x36\n\x11hdf5_output_param\x18\xe9\x07 \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"W\n\x0ePReLUParameter\x12&\n\x06\x66iller\x18\x01 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1d\n\x0e\x63hannel_shared\x18\x02 \x01(\x08:\x05\x66\x61lse\")\n\x15SmoothL1LossParameter\x12\x10\n\x05sigma\x18\x01 \x01(\x02:\x01\x31\"H\n\x0cMPIParameter\x12\x0f\n\x04root\x18\x01 \x01(\r:\x01\x30\x12\x12\n\x07\x63omm_id\x18\x02 \x01(\x04:\x01\x30\x12\x13\n\x08group_id\x18\x03 \x01(\x04:\x01\x30\"!\n\x10PermuteParameter\x12\r\n\x05order\x18\x01 \x03(\r\"\x93\x01\n\x12NormalizeParameter\x12\x1c\n\x0e\x61\x63ross_spatial\x18\x01 \x01(\x08:\x04true\x12,\n\x0cscale_filler\x18\x02 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1c\n\x0e\x63hannel_shared\x18\x03 \x01(\x08:\x04true\x12\x13\n\x03\x65ps\x18\x04 \x01(\x02:\x06\x31\x65-010\"_\n\x11ParallelParameter\x12\x16\n\x07shuffle\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x18\n\tnode_step\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x18\n\tpartition\x18\x03 \x01(\x08:\x05\x66\x61lse\"R\n\x0fResizeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0e\n\x02\x66x\x18\x02 \x01(\x02:\x02-1\x12\x0e\n\x02\x66y\x18\x03 \x01(\x02:\x02-1\"\'\n\x13\x45xpandDimsParameter\x12\x10\n\x04\x61xis\x18\x01 \x01(\x05:\x02-1\"\xc8\x01\n\x11ProposalParameter\x12\x17\n\x0b\x66\x65\x61t_stride\x18\x01 \x01(\r:\x02\x31\x36\x12\x15\n\tbase_size\x18\x02 \x01(\r:\x02\x31\x36\x12\x14\n\x08min_size\x18\x03 \x01(\r:\x02\x31\x36\x12\r\n\x05ratio\x18\x04 \x03(\x02\x12\r\n\x05scale\x18\x05 \x03(\x02\x12\x1a\n\x0cpre_nms_topn\x18\x06 \x01(\r:\x04\x36\x30\x30\x30\x12\x1a\n\rpost_nms_topn\x18\x07 \x01(\r:\x03\x33\x30\x30\x12\x17\n\nnms_thresh\x18\x08 \x01(\x02:\x03\x30.7\"\xa6\x01\n\x14\x42\x61tchRenormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12$\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x03\x30.9\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x30.001\x12\x10\n\x05r_max\x18\x04 \x01(\x02:\x01\x33\x12\x10\n\x05\x64_max\x18\x05 \x01(\x02:\x01\x35\x12\x16\n\x07t_delta\x18\x06 \x01(\x02:\x05\x30.001\"?\n\x14\x44\x65nseConcatParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x16\n\x0bgrowth_rate\x18\x02 \x01(\x05:\x01\x30\"c\n\x12\x46ocalLossParameter\x12\x12\n\x05\x61lpha\x18\x01 \x01(\x02:\x03\x30.5\x12\x10\n\x05gamma\x18\x02 \x01(\x02:\x01\x30\x12\x13\n\x03\x65ps\x18\x03 \x01(\x02:\x06\x31\x65-010\x12\x12\n\x06neg_id\x18\x04 \x01(\x05:\x02-1\"\"\n\x0fGatherParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x30*\x1c\n\x05Phase\x12\t\n\x05TRAIN\x10\x00\x12\x08\n\x04TEST\x10\x01') serialized_pb=_b('\n\x0b\x63\x61\x66\x66\x65.proto\x12\x05\x63\x61\x66\x66\x65\"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xcc\x01\n\tBlobProto\x12\x1f\n\x05shape\x18\x07 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_data\x18\x08 \x03(\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_diff\x18\t \x03(\x01\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30\"2\n\x0f\x42lobProtoVector\x12\x1f\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\x10.caffe.BlobProto\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse\"\x8a\x02\n\x0f\x46illerParameter\x12\x16\n\x04type\x18\x01 \x01(\t:\x08\x63onstant\x12\x10\n\x05value\x18\x02 \x01(\x02:\x01\x30\x12\x0e\n\x03min\x18\x03 \x01(\x02:\x01\x30\x12\x0e\n\x03max\x18\x04 \x01(\x02:\x01\x31\x12\x0f\n\x04mean\x18\x05 \x01(\x02:\x01\x30\x12\x0e\n\x03std\x18\x06 \x01(\x02:\x01\x31\x12\x12\n\x06sparse\x18\x07 \x01(\x05:\x02-1\x12\x42\n\rvariance_norm\x18\x08 \x01(\x0e\x32#.caffe.FillerParameter.VarianceNorm:\x06\x46\x41N_IN\"4\n\x0cVarianceNorm\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x41VERAGE\x10\x02\"\x8e\x02\n\x0cNetParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12%\n\x0binput_shape\x18\x08 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x11\n\tinput_dim\x18\x04 \x03(\x05\x12\x1d\n\x0e\x66orce_backward\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x05state\x18\x06 \x01(\x0b\x32\x0f.caffe.NetState\x12\x19\n\ndebug_info\x18\x07 \x01(\x08:\x05\x66\x61lse\x12$\n\x05layer\x18\x64 \x03(\x0b\x32\x15.caffe.LayerParameter\x12\'\n\x06layers\x18\x02 \x03(\x0b\x32\x17.caffe.V1LayerParameter\"\xc9\n\n\x0fSolverParameter\x12\x0b\n\x03net\x18\x18 \x01(\t\x12&\n\tnet_param\x18\x19 \x01(\x0b\x32\x13.caffe.NetParameter\x12\x11\n\ttrain_net\x18\x01 \x01(\t\x12\x10\n\x08test_net\x18\x02 \x03(\t\x12,\n\x0ftrain_net_param\x18\x15 \x01(\x0b\x32\x13.caffe.NetParameter\x12+\n\x0etest_net_param\x18\x16 \x03(\x0b\x32\x13.caffe.NetParameter\x12$\n\x0btrain_state\x18\x1a \x01(\x0b\x32\x0f.caffe.NetState\x12#\n\ntest_state\x18\x1b \x03(\x0b\x32\x0f.caffe.NetState\x12\x11\n\ttest_iter\x18\x03 \x03(\x05\x12\x18\n\rtest_interval\x18\x04 \x01(\x05:\x01\x30\x12 \n\x11test_compute_loss\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x13test_initialization\x18 \x01(\x08:\x04true\x12\x0f\n\x07\x62\x61se_lr\x18\x05 \x01(\x02\x12\x10\n\x08stage_lr\x18\x32 \x03(\x02\x12\x12\n\nstage_iter\x18\x33 \x03(\x05\x12\x0f\n\x07\x64isplay\x18\x06 \x01(\x05\x12\x17\n\x0c\x61verage_loss\x18! \x01(\x05:\x01\x31\x12\x10\n\x08max_iter\x18\x07 \x01(\x05\x12\x14\n\titer_size\x18$ \x01(\x05:\x01\x31\x12\x11\n\tlr_policy\x18\x08 \x01(\t\x12\r\n\x05gamma\x18\t \x01(\x02\x12\r\n\x05power\x18\n \x01(\x02\x12\x10\n\x08momentum\x18\x0b \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0c \x01(\x02\x12\x1f\n\x13regularization_type\x18\x1d \x01(\t:\x02L2\x12\x10\n\x08stepsize\x18\r \x01(\x05\x12\x11\n\tstepvalue\x18\" \x03(\x05\x12\x1a\n\x0e\x63lip_gradients\x18# \x01(\x02:\x02-1\x12\x13\n\x08snapshot\x18\x0e \x01(\x05:\x01\x30\x12\x17\n\x0fsnapshot_prefix\x18\x0f \x01(\t\x12\x1c\n\rsnapshot_diff\x18\x10 \x01(\x08:\x05\x66\x61lse\x12K\n\x0fsnapshot_format\x18% \x01(\x0e\x32%.caffe.SolverParameter.SnapshotFormat:\x0b\x42INARYPROTO\x12;\n\x0bsolver_mode\x18\x11 \x01(\x0e\x32!.caffe.SolverParameter.SolverMode:\x03GPU\x12\x14\n\tdevice_id\x18\x12 \x01(\x05:\x01\x30\x12\x17\n\x0brandom_seed\x18\x14 \x01(\x03:\x02-1\x12\x11\n\x04type\x18( \x01(\t:\x03SGD\x12\x15\n\x05\x64\x65lta\x18\x1f \x01(\x02:\x06\x31\x65-008\x12\x18\n\tmomentum2\x18\' \x01(\x02:\x05\x30.999\x12\x17\n\trms_decay\x18& \x01(\x02:\x04\x30.99\x12\x19\n\ndebug_info\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\"\n\x14snapshot_after_train\x18\x1c \x01(\x08:\x04true\x12;\n\x0bsolver_type\x18\x1e \x01(\x0e\x32!.caffe.SolverParameter.SolverType:\x03SGD\"+\n\x0eSnapshotFormat\x12\x08\n\x04HDF5\x10\x00\x12\x0f\n\x0b\x42INARYPROTO\x10\x01\"\x1e\n\nSolverMode\x12\x07\n\x03\x43PU\x10\x00\x12\x07\n\x03GPU\x10\x01\"U\n\nSolverType\x12\x07\n\x03SGD\x10\x00\x12\x0c\n\x08NESTEROV\x10\x01\x12\x0b\n\x07\x41\x44\x41GRAD\x10\x02\x12\x0b\n\x07RMSPROP\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45LTA\x10\x04\x12\x08\n\x04\x41\x44\x41M\x10\x05\"l\n\x0bSolverState\x12\x0c\n\x04iter\x18\x01 \x01(\x05\x12\x13\n\x0blearned_net\x18\x02 \x01(\t\x12!\n\x07history\x18\x03 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x17\n\x0c\x63urrent_step\x18\x04 \x01(\x05:\x01\x30\"N\n\x08NetState\x12!\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase:\x04TEST\x12\x10\n\x05level\x18\x02 \x01(\x05:\x01\x30\x12\r\n\x05stage\x18\x03 \x03(\t\"\x85\x01\n\x0cNetStateRule\x12\x1b\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase\x12\x11\n\tmin_level\x18\x02 \x01(\x05\x12\x11\n\tmax_level\x18\x03 \x01(\x05\x12\r\n\x05stage\x18\x04 \x03(\t\x12\x11\n\tnot_stage\x18\x05 \x03(\t\x12\x10\n\x08mpi_rank\x18\x06 \x03(\r\"\xa3\x01\n\tParamSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\nshare_mode\x18\x02 \x01(\x0e\x32\x1d.caffe.ParamSpec.DimCheckMode\x12\x12\n\x07lr_mult\x18\x03 \x01(\x02:\x01\x31\x12\x15\n\ndecay_mult\x18\x04 \x01(\x02:\x01\x31\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\x95\x19\n\x0eLayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06\x62ottom\x18\x03 \x03(\t\x12\x0b\n\x03top\x18\x04 \x03(\t\x12\x1c\n\x0cmirror_stage\x18\xa2\x01 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x05phase\x18\n \x01(\x0e\x32\x0c.caffe.Phase\x12\x13\n\x0bloss_weight\x18\x05 \x03(\x02\x12\x1f\n\x05param\x18\x06 \x03(\x0b\x32\x10.caffe.ParamSpec\x12\x1f\n\x05\x62lobs\x18\x07 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x16\n\x0epropagate_down\x18\x0b \x03(\x08\x12$\n\x07include\x18\x08 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18\t \x03(\x0b\x32\x13.caffe.NetStateRule\x12\x37\n\x0ftransform_param\x18\x64 \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18\x65 \x01(\x0b\x32\x14.caffe.LossParameter\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x66 \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18g \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12\x34\n\x10\x62\x61tch_norm_param\x18\x8b\x01 \x01(\x0b\x32\x19.caffe.BatchNormParameter\x12)\n\nbias_param\x18\x8d\x01 \x01(\x0b\x32\x14.caffe.BiasParameter\x12,\n\x0c\x63oncat_param\x18h \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18i \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18j \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12)\n\ncrop_param\x18\x90\x01 \x01(\x0b\x32\x14.caffe.CropParameter\x12(\n\ndata_param\x18k \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18l \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18m \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18n \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12\'\n\telu_param\x18\x8c\x01 \x01(\x0b\x32\x13.caffe.ELUParameter\x12+\n\x0b\x65mbed_param\x18\x89\x01 \x01(\x0b\x32\x15.caffe.EmbedParameter\x12&\n\texp_param\x18o \x01(\x0b\x32\x13.caffe.ExpParameter\x12/\n\rflatten_param\x18\x87\x01 \x01(\x0b\x32\x17.caffe.FlattenParameter\x12\x31\n\x0fhdf5_data_param\x18p \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18q \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18r \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18s \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18t \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18u \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12+\n\x0binput_param\x18\x8f\x01 \x01(\x0b\x32\x15.caffe.InputParameter\x12\'\n\tlog_param\x18\x86\x01 \x01(\x0b\x32\x13.caffe.LogParameter\x12&\n\tlrn_param\x18v \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18w \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18x \x01(\x0b\x32\x13.caffe.MVNParameter\x12\x33\n\x0fparameter_param\x18\x91\x01 \x01(\x0b\x32\x19.caffe.ParameterParameter\x12.\n\rpooling_param\x18y \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18z \x01(\x0b\x32\x15.caffe.PowerParameter\x12+\n\x0bprelu_param\x18\x83\x01 \x01(\x0b\x32\x15.caffe.PReLUParameter\x12-\n\x0cpython_param\x18\x82\x01 \x01(\x0b\x32\x16.caffe.PythonParameter\x12\x33\n\x0freduction_param\x18\x88\x01 \x01(\x0b\x32\x19.caffe.ReductionParameter\x12(\n\nrelu_param\x18{ \x01(\x0b\x32\x14.caffe.ReLUParameter\x12/\n\rreshape_param\x18\x85\x01 \x01(\x0b\x32\x17.caffe.ReshapeParameter\x12+\n\x0bscale_param\x18\x8e\x01 \x01(\x0b\x32\x15.caffe.ScaleParameter\x12.\n\rsigmoid_param\x18| \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18} \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12\'\n\tspp_param\x18\x84\x01 \x01(\x0b\x32\x13.caffe.SPPParameter\x12*\n\x0bslice_param\x18~ \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18\x7f \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x33\n\x0fthreshold_param\x18\x80\x01 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12)\n\ntile_param\x18\x8a\x01 \x01(\x0b\x32\x14.caffe.TileParameter\x12\x36\n\x11window_data_param\x18\x81\x01 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x36\n\x11roi_pooling_param\x18\x97\x01 \x01(\x0b\x32\x1a.caffe.ROIPoolingParameter\x12;\n\x14smooth_l1_loss_param\x18\x98\x01 \x01(\x0b\x32\x1c.caffe.SmoothL1LossParameter\x12\'\n\tmpi_param\x18\x99\x01 \x01(\x0b\x32\x13.caffe.MPIParameter\x12/\n\rpermute_param\x18\x9a\x01 \x01(\x0b\x32\x17.caffe.PermuteParameter\x12\x33\n\x0fnormalize_param\x18\x9b\x01 \x01(\x0b\x32\x19.caffe.NormalizeParameter\x12\x31\n\x0eparallel_param\x18\x9d\x01 \x01(\x0b\x32\x18.caffe.ParallelParameter\x12-\n\x0cresize_param\x18\x9e\x01 \x01(\x0b\x32\x16.caffe.ResizeParameter\x12\x36\n\x11\x65xpand_dims_param\x18\x9f\x01 \x01(\x0b\x32\x1a.caffe.ExpandDimsParameter\x12\x31\n\x0eproposal_param\x18\xa0\x01 \x01(\x0b\x32\x18.caffe.ProposalParameter\x12\x38\n\x12\x62\x61tch_renorm_param\x18\xa1\x01 \x01(\x0b\x32\x1b.caffe.BatchRenormParameter\x12\x38\n\x12\x64\x65nse_concat_param\x18\xa3\x01 \x01(\x0b\x32\x1b.caffe.DenseConcatParameter\x12\x34\n\x10\x66ocal_loss_param\x18\xa4\x01 \x01(\x0b\x32\x19.caffe.FocalLossParameter\x12-\n\x0cgather_param\x18\xa5\x01 \x01(\x0b\x32\x16.caffe.GatherParameter\"\xa7\x02\n\x17TransformationParameter\x12\x10\n\x05scale\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\x06mirror\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\tcrop_size\x18\x03 \x01(\r:\x01\x30\x12\x12\n\x07padding\x18\x0b \x01(\r:\x01\x30\x12\x11\n\tmean_file\x18\x04 \x01(\t\x12\x12\n\nmean_value\x18\x05 \x03(\x02\x12\x1a\n\x0b\x66orce_color\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nforce_gray\x18\x07 \x01(\x08:\x05\x66\x61lse\x12!\n\x12\x63olor_augmentation\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x10min_random_scale\x18\t \x01(\x02:\x01\x31\x12\x1b\n\x10max_random_scale\x18\n \x01(\x02:\x01\x31\"\xf5\x01\n\rLossParameter\x12\x14\n\x0cignore_label\x18\x01 \x01(\x05\x12\x44\n\rnormalization\x18\x03 \x01(\x0e\x32&.caffe.LossParameter.NormalizationMode:\x05VALID\x12\x11\n\tnormalize\x18\x02 \x01(\x08\x1a\'\n\x13\x45xpandDimsParameter\x12\x10\n\x04\x61xis\x18\x01 \x01(\x05:\x02-1\"L\n\x11NormalizationMode\x12\x08\n\x04\x46ULL\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x0e\n\nBATCH_SIZE\x10\x02\x12\x08\n\x04NONE\x10\x03\x12\x08\n\x04UNIT\x10\x04\"L\n\x11\x41\x63\x63uracyParameter\x12\x10\n\x05top_k\x18\x01 \x01(\r:\x01\x31\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x14\n\x0cignore_label\x18\x03 \x01(\x05\"M\n\x0f\x41rgMaxParameter\x12\x1a\n\x0bout_max_val\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05top_k\x18\x02 \x01(\r:\x01\x31\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x05\"9\n\x0f\x43oncatParameter\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x15\n\nconcat_dim\x18\x01 \x01(\r:\x01\x31\"h\n\x12\x42\x61tchNormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12$\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x03\x30.9\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x30.001\"]\n\rBiasParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\"L\n\x18\x43ontrastiveLossParameter\x12\x11\n\x06margin\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x0elegacy_version\x18\x02 \x01(\x08:\x05\x66\x61lse\"\xfc\x03\n\x14\x43onvolutionParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x0b\n\x03pad\x18\x03 \x03(\r\x12\x13\n\x0bkernel_size\x18\x04 \x03(\r\x12\x0e\n\x06stride\x18\x06 \x03(\r\x12\x10\n\x08\x64ilation\x18\x12 \x03(\r\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x10\n\x08kernel_h\x18\x0b \x01(\r\x12\x10\n\x08kernel_w\x18\x0c \x01(\r\x12\x10\n\x08stride_h\x18\r \x01(\r\x12\x10\n\x08stride_w\x18\x0e \x01(\r\x12\x10\n\x05group\x18\x05 \x01(\r:\x01\x31\x12-\n\rweight_filler\x18\x07 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x08 \x01(\x0b\x32\x16.caffe.FillerParameter\x12;\n\x06\x65ngine\x18\x0f \x01(\x0e\x32\".caffe.ConvolutionParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x10 \x01(\x05:\x01\x31\x12\x1e\n\x0f\x66orce_nd_im2col\x18\x11 \x01(\x08:\x05\x66\x61lse\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"0\n\rCropParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x32\x12\x0e\n\x06offset\x18\x02 \x03(\r\"\xa4\x02\n\rDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x31\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0e\x32\x17.caffe.DataParameter.DB:\x07LEVELDB\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_encoded_color\x18\t \x01(\x08:\x05\x66\x61lse\x12\x13\n\x08prefetch\x18\n \x01(\r:\x01\x35\"\x1b\n\x02\x44\x42\x12\x0b\n\x07LEVELDB\x10\x00\x12\x08\n\x04LMDB\x10\x01\"I\n\x10\x44ropoutParameter\x12\x1a\n\rdropout_ratio\x18\x01 \x01(\x02:\x03\x30.5\x12\x19\n\x0bscale_train\x18\x02 \x01(\x08:\x04true\"\xa0\x01\n\x12\x44ummyDataParameter\x12+\n\x0b\x64\x61ta_filler\x18\x01 \x03(\x0b\x32\x16.caffe.FillerParameter\x12\x1f\n\x05shape\x18\x06 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x0b\n\x03num\x18\x02 \x03(\r\x12\x10\n\x08\x63hannels\x18\x03 \x03(\r\x12\x0e\n\x06height\x18\x04 \x03(\r\x12\r\n\x05width\x18\x05 \x03(\r\"\xa5\x01\n\x10\x45ltwiseParameter\x12\x39\n\toperation\x18\x01 \x01(\x0e\x32!.caffe.EltwiseParameter.EltwiseOp:\x03SUM\x12\r\n\x05\x63oeff\x18\x02 \x03(\x02\x12\x1e\n\x10stable_prod_grad\x18\x03 \x01(\x08:\x04true\"\'\n\tEltwiseOp\x12\x08\n\x04PROD\x10\x00\x12\x07\n\x03SUM\x10\x01\x12\x07\n\x03MAX\x10\x02\" \n\x0c\x45LUParameter\x12\x10\n\x05\x61lpha\x18\x01 \x01(\x02:\x01\x31\"\xac\x01\n\x0e\x45mbedParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x11\n\tinput_dim\x18\x02 \x01(\r\x12\x17\n\tbias_term\x18\x03 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"D\n\x0c\x45xpParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"9\n\x10\x46lattenParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x14\n\x08\x65nd_axis\x18\x02 \x01(\x05:\x02-1\"O\n\x11HDF5DataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse\"(\n\x13HDF5OutputParameter\x12\x11\n\tfile_name\x18\x01 \x01(\t\"^\n\x12HingeLossParameter\x12\x30\n\x04norm\x18\x01 \x01(\x0e\x32\x1e.caffe.HingeLossParameter.Norm:\x02L1\"\x16\n\x04Norm\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\"\x97\x02\n\x12ImageDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x15\n\nbatch_size\x18\x04 \x01(\r:\x01\x31\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x16\n\x08is_color\x18\x0b \x01(\x08:\x04true\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\x0c \x01(\t:\x00\"\'\n\x15InfogainLossParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\"\xcb\x01\n\x15InnerProductParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0f\n\x04\x61xis\x18\x05 \x01(\x05:\x01\x31\x12\x18\n\ttranspose\x18\x06 \x01(\x08:\x05\x66\x61lse\"1\n\x0eInputParameter\x12\x1f\n\x05shape\x18\x01 \x03(\x0b\x32\x10.caffe.BlobShape\"D\n\x0cLogParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xb8\x02\n\x0cLRNParameter\x12\x15\n\nlocal_size\x18\x01 \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x02 \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x03 \x01(\x02:\x04\x30.75\x12\x44\n\x0bnorm_region\x18\x04 \x01(\x0e\x32\x1e.caffe.LRNParameter.NormRegion:\x0f\x41\x43ROSS_CHANNELS\x12\x0c\n\x01k\x18\x05 \x01(\x02:\x01\x31\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.LRNParameter.Engine:\x07\x44\x45\x46\x41ULT\"5\n\nNormRegion\x12\x13\n\x0f\x41\x43ROSS_CHANNELS\x10\x00\x12\x12\n\x0eWITHIN_CHANNEL\x10\x01\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xbd\x01\n\x13MemoryDataParameter\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x10\n\x08\x63hannels\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\r\n\x05width\x18\x04 \x01(\r\x12;\n\x05\x64type\x18\x05 \x01(\x0e\x32#.caffe.MemoryDataParameter.DataType:\x07\x46LOAT32\"$\n\x08\x44\x61taType\x12\x0b\n\x07\x46LOAT32\x10\x00\x12\x0b\n\x07\x46LOAT16\x10\x01\"e\n\x0cMVNParameter\x12 \n\x12normalize_variance\x18\x01 \x01(\x08:\x04true\x12\x1e\n\x0f\x61\x63ross_channels\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x13\n\x03\x65ps\x18\x03 \x01(\x02:\x06\x31\x65-009\"5\n\x12ParameterParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\"\xa2\x03\n\x10PoolingParameter\x12\x35\n\x04pool\x18\x01 \x01(\x0e\x32\".caffe.PoolingParameter.PoolMethod:\x03MAX\x12\x0e\n\x03pad\x18\x04 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x02 \x01(\r\x12\x10\n\x08kernel_h\x18\x05 \x01(\r\x12\x10\n\x08kernel_w\x18\x06 \x01(\r\x12\x11\n\x06stride\x18\x03 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\x07 \x01(\r\x12\x10\n\x08stride_w\x18\x08 \x01(\r\x12\x37\n\x06\x65ngine\x18\x0b \x01(\x0e\x32\x1e.caffe.PoolingParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x1d\n\x0eglobal_pooling\x18\x0c \x01(\x08:\x05\x66\x61lse\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Y\n\x13ROIPoolingParameter\x12\x13\n\x08pooled_h\x18\x01 \x01(\r:\x01\x30\x12\x13\n\x08pooled_w\x18\x02 \x01(\r:\x01\x30\x12\x18\n\rspatial_scale\x18\x03 \x01(\x02:\x01\x31\"F\n\x0ePowerParameter\x12\x10\n\x05power\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"g\n\x0fPythonParameter\x12\x0e\n\x06module\x18\x01 \x01(\t\x12\r\n\x05layer\x18\x02 \x01(\t\x12\x13\n\tparam_str\x18\x03 \x01(\t:\x00\x12 \n\x11share_in_parallel\x18\x04 \x01(\x08:\x05\x66\x61lse\"\xad\x01\n\x12ReductionParameter\x12=\n\toperation\x18\x01 \x01(\x0e\x32%.caffe.ReductionParameter.ReductionOp:\x03SUM\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x10\n\x05\x63oeff\x18\x03 \x01(\x02:\x01\x31\"5\n\x0bReductionOp\x12\x07\n\x03SUM\x10\x01\x12\x08\n\x04\x41SUM\x10\x02\x12\t\n\x05SUMSQ\x10\x03\x12\x08\n\x04MEAN\x10\x04\"\x8d\x01\n\rReLUParameter\x12\x19\n\x0enegative_slope\x18\x01 \x01(\x02:\x01\x30\x12\x34\n\x06\x65ngine\x18\x02 \x01(\x0e\x32\x1b.caffe.ReLUParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x10ReshapeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\x08num_axes\x18\x03 \x01(\x05:\x02-1\"\xa5\x01\n\x0eScaleParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x18\n\tbias_term\x18\x04 \x01(\x08:\x05\x66\x61lse\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"x\n\x10SigmoidParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SigmoidParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"L\n\x0eSliceParameter\x12\x0f\n\x04\x61xis\x18\x03 \x01(\x05:\x01\x31\x12\x13\n\x0bslice_point\x18\x02 \x03(\r\x12\x14\n\tslice_dim\x18\x01 \x01(\r:\x01\x31\"\x89\x01\n\x10SoftmaxParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SoftmaxParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"r\n\rTanHParameter\x12\x34\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1b.caffe.TanHParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"T\n\rTileParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\r\n\x05tiles\x18\x02 \x01(\x05\x12#\n\tmultiples\x18\x03 \x01(\x0b\x32\x10.caffe.BlobShape\"*\n\x12ThresholdParameter\x12\x14\n\tthreshold\x18\x01 \x01(\x02:\x01\x30\"\xc1\x02\n\x13WindowDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0c\x66g_threshold\x18\x07 \x01(\x02:\x03\x30.5\x12\x19\n\x0c\x62g_threshold\x18\x08 \x01(\x02:\x03\x30.5\x12\x19\n\x0b\x66g_fraction\x18\t \x01(\x02:\x04\x30.25\x12\x16\n\x0b\x63ontext_pad\x18\n \x01(\r:\x01\x30\x12\x17\n\tcrop_mode\x18\x0b \x01(\t:\x04warp\x12\x1b\n\x0c\x63\x61\x63he_images\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\r \x01(\t:\x00\"\xeb\x01\n\x0cSPPParameter\x12\x16\n\x0epyramid_height\x18\x01 \x01(\r\x12\x31\n\x04pool\x18\x02 \x01(\x0e\x32\x1e.caffe.SPPParameter.PoolMethod:\x03MAX\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.SPPParameter.Engine:\x07\x44\x45\x46\x41ULT\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xe0\x13\n\x10V1LayerParameter\x12\x0e\n\x06\x62ottom\x18\x02 \x03(\t\x12\x0b\n\x03top\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12$\n\x07include\x18 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18! \x03(\x0b\x32\x13.caffe.NetStateRule\x12/\n\x04type\x18\x05 \x01(\x0e\x32!.caffe.V1LayerParameter.LayerType\x12\x1f\n\x05\x62lobs\x18\x06 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x0e\n\x05param\x18\xe9\x07 \x03(\t\x12>\n\x0f\x62lob_share_mode\x18\xea\x07 \x03(\x0e\x32$.caffe.V1LayerParameter.DimCheckMode\x12\x10\n\x08\x62lobs_lr\x18\x07 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x08 \x03(\x02\x12\x13\n\x0bloss_weight\x18# \x03(\x02\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x1b \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18\x17 \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12,\n\x0c\x63oncat_param\x18\t \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18( \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18\n \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12(\n\ndata_param\x18\x0b \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18\x0c \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18\x1a \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18\x18 \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12&\n\texp_param\x18) \x01(\x0b\x32\x13.caffe.ExpParameter\x12\x31\n\x0fhdf5_data_param\x18\r \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18\x0e \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18\x1d \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18\x0f \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18\x10 \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18\x11 \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12&\n\tlrn_param\x18\x12 \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18\x16 \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18\" \x01(\x0b\x32\x13.caffe.MVNParameter\x12.\n\rpooling_param\x18\x13 \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18\x15 \x01(\x0b\x32\x15.caffe.PowerParameter\x12(\n\nrelu_param\x18\x1e \x01(\x0b\x32\x14.caffe.ReLUParameter\x12.\n\rsigmoid_param\x18& \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18\' \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12*\n\x0bslice_param\x18\x1f \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18% \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x32\n\x0fthreshold_param\x18\x19 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12\x35\n\x11window_data_param\x18\x14 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x37\n\x0ftransform_param\x18$ \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18* \x01(\x0b\x32\x14.caffe.LossParameter\x12&\n\x05layer\x18\x01 \x01(\x0b\x32\x17.caffe.V0LayerParameter\"\xd8\x04\n\tLayerType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x41\x42SVAL\x10#\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\n\n\x06\x41RGMAX\x10\x1e\x12\x08\n\x04\x42NLL\x10\x02\x12\n\n\x06\x43ONCAT\x10\x03\x12\x14\n\x10\x43ONTRASTIVE_LOSS\x10%\x12\x0f\n\x0b\x43ONVOLUTION\x10\x04\x12\x08\n\x04\x44\x41TA\x10\x05\x12\x11\n\rDECONVOLUTION\x10\'\x12\x0b\n\x07\x44ROPOUT\x10\x06\x12\x0e\n\nDUMMY_DATA\x10 \x12\x12\n\x0e\x45UCLIDEAN_LOSS\x10\x07\x12\x0b\n\x07\x45LTWISE\x10\x19\x12\x07\n\x03\x45XP\x10&\x12\x0b\n\x07\x46LATTEN\x10\x08\x12\r\n\tHDF5_DATA\x10\t\x12\x0f\n\x0bHDF5_OUTPUT\x10\n\x12\x0e\n\nHINGE_LOSS\x10\x1c\x12\n\n\x06IM2COL\x10\x0b\x12\x0e\n\nIMAGE_DATA\x10\x0c\x12\x11\n\rINFOGAIN_LOSS\x10\r\x12\x11\n\rINNER_PRODUCT\x10\x0e\x12\x07\n\x03LRN\x10\x0f\x12\x0f\n\x0bMEMORY_DATA\x10\x1d\x12\x1d\n\x19MULTINOMIAL_LOGISTIC_LOSS\x10\x10\x12\x07\n\x03MVN\x10\"\x12\x0b\n\x07POOLING\x10\x11\x12\t\n\x05POWER\x10\x1a\x12\x08\n\x04RELU\x10\x12\x12\x0b\n\x07SIGMOID\x10\x13\x12\x1e\n\x1aSIGMOID_CROSS_ENTROPY_LOSS\x10\x1b\x12\x0b\n\x07SILENCE\x10$\x12\x0b\n\x07SOFTMAX\x10\x14\x12\x10\n\x0cSOFTMAX_LOSS\x10\x15\x12\t\n\x05SPLIT\x10\x16\x12\t\n\x05SLICE\x10!\x12\x08\n\x04TANH\x10\x17\x12\x0f\n\x0bWINDOW_DATA\x10\x18\x12\r\n\tTHRESHOLD\x10\x1f\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xfd\x07\n\x10V0LayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x12\n\nnum_output\x18\x03 \x01(\r\x12\x16\n\x08\x62iasterm\x18\x04 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x06 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0e\n\x03pad\x18\x07 \x01(\r:\x01\x30\x12\x12\n\nkernelsize\x18\x08 \x01(\r\x12\x10\n\x05group\x18\t \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\n \x01(\r:\x01\x31\x12\x35\n\x04pool\x18\x0b \x01(\x0e\x32\".caffe.V0LayerParameter.PoolMethod:\x03MAX\x12\x1a\n\rdropout_ratio\x18\x0c \x01(\x02:\x03\x30.5\x12\x15\n\nlocal_size\x18\r \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x0e \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x0f \x01(\x02:\x04\x30.75\x12\x0c\n\x01k\x18\x16 \x01(\x02:\x01\x31\x12\x0e\n\x06source\x18\x10 \x01(\t\x12\x10\n\x05scale\x18\x11 \x01(\x02:\x01\x31\x12\x10\n\x08meanfile\x18\x12 \x01(\t\x12\x11\n\tbatchsize\x18\x13 \x01(\r\x12\x13\n\x08\x63ropsize\x18\x14 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x15 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x05\x62lobs\x18\x32 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x10\n\x08\x62lobs_lr\x18\x33 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x34 \x03(\x02\x12\x14\n\trand_skip\x18\x35 \x01(\r:\x01\x30\x12\x1d\n\x10\x64\x65t_fg_threshold\x18\x36 \x01(\x02:\x03\x30.5\x12\x1d\n\x10\x64\x65t_bg_threshold\x18\x37 \x01(\x02:\x03\x30.5\x12\x1d\n\x0f\x64\x65t_fg_fraction\x18\x38 \x01(\x02:\x04\x30.25\x12\x1a\n\x0f\x64\x65t_context_pad\x18: \x01(\r:\x01\x30\x12\x1b\n\rdet_crop_mode\x18; \x01(\t:\x04warp\x12\x12\n\x07new_num\x18< \x01(\x05:\x01\x30\x12\x17\n\x0cnew_channels\x18= \x01(\x05:\x01\x30\x12\x15\n\nnew_height\x18> \x01(\x05:\x01\x30\x12\x14\n\tnew_width\x18? \x01(\x05:\x01\x30\x12\x1d\n\x0eshuffle_images\x18@ \x01(\x08:\x05\x66\x61lse\x12\x15\n\nconcat_dim\x18\x41 \x01(\r:\x01\x31\x12\x36\n\x11hdf5_output_param\x18\xe9\x07 \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"W\n\x0ePReLUParameter\x12&\n\x06\x66iller\x18\x01 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1d\n\x0e\x63hannel_shared\x18\x02 \x01(\x08:\x05\x66\x61lse\")\n\x15SmoothL1LossParameter\x12\x10\n\x05sigma\x18\x01 \x01(\x02:\x01\x31\"H\n\x0cMPIParameter\x12\x0f\n\x04root\x18\x01 \x01(\r:\x01\x30\x12\x12\n\x07\x63omm_id\x18\x02 \x01(\x04:\x01\x30\x12\x13\n\x08group_id\x18\x03 \x01(\x04:\x01\x30\"!\n\x10PermuteParameter\x12\r\n\x05order\x18\x01 \x03(\r\"\x93\x01\n\x12NormalizeParameter\x12\x1c\n\x0e\x61\x63ross_spatial\x18\x01 \x01(\x08:\x04true\x12,\n\x0cscale_filler\x18\x02 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1c\n\x0e\x63hannel_shared\x18\x03 \x01(\x08:\x04true\x12\x13\n\x03\x65ps\x18\x04 \x01(\x02:\x06\x31\x65-010\"_\n\x11ParallelParameter\x12\x16\n\x07shuffle\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x18\n\tnode_step\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x18\n\tpartition\x18\x03 \x01(\x08:\x05\x66\x61lse\"R\n\x0fResizeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0e\n\x02\x66x\x18\x02 \x01(\x02:\x02-1\x12\x0e\n\x02\x66y\x18\x03 \x01(\x02:\x02-1\"\'\n\x13\x45xpandDimsParameter\x12\x10\n\x04\x61xis\x18\x01 \x01(\x05:\x02-1\"\x90\x02\n\x11ProposalParameter\x12\x0e\n\x06stride\x18\x01 \x03(\x05\x12\r\n\x05ratio\x18\x02 \x03(\x02\x12\r\n\x05scale\x18\x03 \x03(\x02\x12\x1b\n\rpre_nms_top_n\x18\x04 \x01(\r:\x04\x36\x30\x30\x30\x12\x1b\n\x0epost_nms_top_n\x18\x05 \x01(\r:\x03\x33\x30\x30\x12\x17\n\nnms_thresh\x18\x06 \x01(\x02:\x03\x30.7\x12\x14\n\x08min_size\x18\x07 \x01(\r:\x02\x31\x36\x12\x14\n\tmin_level\x18\x08 \x01(\x05:\x01\x32\x12\x14\n\tmax_level\x18\t \x01(\x05:\x01\x35\x12\x1c\n\x0f\x63\x61nonical_scale\x18\n \x01(\x05:\x03\x32\x32\x34\x12\x1a\n\x0f\x63\x61nonical_level\x18\x0b \x01(\x05:\x01\x34\"\xa6\x01\n\x14\x42\x61tchRenormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12$\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x03\x30.9\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x30.001\x12\x10\n\x05r_max\x18\x04 \x01(\x02:\x01\x33\x12\x10\n\x05\x64_max\x18\x05 \x01(\x02:\x01\x35\x12\x16\n\x07t_delta\x18\x06 \x01(\x02:\x05\x30.001\"?\n\x14\x44\x65nseConcatParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x16\n\x0bgrowth_rate\x18\x02 \x01(\x05:\x01\x30\"c\n\x12\x46ocalLossParameter\x12\x12\n\x05\x61lpha\x18\x01 \x01(\x02:\x03\x30.5\x12\x10\n\x05gamma\x18\x02 \x01(\x02:\x01\x30\x12\x13\n\x03\x65ps\x18\x03 \x01(\x02:\x06\x31\x65-010\x12\x12\n\x06neg_id\x18\x04 \x01(\x05:\x02-1\"\"\n\x0fGatherParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x30*\x1c\n\x05Phase\x12\t\n\x05TRAIN\x10\x00\x12\x08\n\x04TEST\x10\x01')
) )
_sym_db.RegisterFileDescriptor(DESCRIPTOR) _sym_db.RegisterFileDescriptor(DESCRIPTOR)
...@@ -40,8 +40,8 @@ _PHASE = _descriptor.EnumDescriptor( ...@@ -40,8 +40,8 @@ _PHASE = _descriptor.EnumDescriptor(
], ],
containing_type=None, containing_type=None,
options=None, options=None,
serialized_start=17391, serialized_start=17463,
serialized_end=17419, serialized_end=17491,
) )
_sym_db.RegisterEnumDescriptor(_PHASE) _sym_db.RegisterEnumDescriptor(_PHASE)
...@@ -5624,58 +5624,79 @@ _PROPOSALPARAMETER = _descriptor.Descriptor( ...@@ -5624,58 +5624,79 @@ _PROPOSALPARAMETER = _descriptor.Descriptor(
containing_type=None, containing_type=None,
fields=[ fields=[
_descriptor.FieldDescriptor( _descriptor.FieldDescriptor(
name='feat_stride', full_name='caffe.ProposalParameter.feat_stride', index=0, name='stride', full_name='caffe.ProposalParameter.stride', index=0,
number=1, type=13, cpp_type=3, label=1, number=1, type=5, cpp_type=1, label=3,
has_default_value=True, default_value=16, has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None, message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None, is_extension=False, extension_scope=None,
options=None), options=None),
_descriptor.FieldDescriptor( _descriptor.FieldDescriptor(
name='base_size', full_name='caffe.ProposalParameter.base_size', index=1, name='ratio', full_name='caffe.ProposalParameter.ratio', index=1,
number=2, type=13, cpp_type=3, label=1, number=2, type=2, cpp_type=6, label=3,
has_default_value=True, default_value=16, has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None, message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None, is_extension=False, extension_scope=None,
options=None), options=None),
_descriptor.FieldDescriptor( _descriptor.FieldDescriptor(
name='min_size', full_name='caffe.ProposalParameter.min_size', index=2, name='scale', full_name='caffe.ProposalParameter.scale', index=2,
number=3, type=13, cpp_type=3, label=1, number=3, type=2, cpp_type=6, label=3,
has_default_value=True, default_value=16, has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None, message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None, is_extension=False, extension_scope=None,
options=None), options=None),
_descriptor.FieldDescriptor( _descriptor.FieldDescriptor(
name='ratio', full_name='caffe.ProposalParameter.ratio', index=3, name='pre_nms_top_n', full_name='caffe.ProposalParameter.pre_nms_top_n', index=3,
number=4, type=2, cpp_type=6, label=3, number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=[], has_default_value=True, default_value=6000,
message_type=None, enum_type=None, containing_type=None, message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None, is_extension=False, extension_scope=None,
options=None), options=None),
_descriptor.FieldDescriptor( _descriptor.FieldDescriptor(
name='scale', full_name='caffe.ProposalParameter.scale', index=4, name='post_nms_top_n', full_name='caffe.ProposalParameter.post_nms_top_n', index=4,
number=5, type=2, cpp_type=6, label=3, number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=[], has_default_value=True, default_value=300,
message_type=None, enum_type=None, containing_type=None, message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None, is_extension=False, extension_scope=None,
options=None), options=None),
_descriptor.FieldDescriptor( _descriptor.FieldDescriptor(
name='pre_nms_topn', full_name='caffe.ProposalParameter.pre_nms_topn', index=5, name='nms_thresh', full_name='caffe.ProposalParameter.nms_thresh', index=5,
number=6, type=13, cpp_type=3, label=1, number=6, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=6000, has_default_value=True, default_value=0.7,
message_type=None, enum_type=None, containing_type=None, message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None, is_extension=False, extension_scope=None,
options=None), options=None),
_descriptor.FieldDescriptor( _descriptor.FieldDescriptor(
name='post_nms_topn', full_name='caffe.ProposalParameter.post_nms_topn', index=6, name='min_size', full_name='caffe.ProposalParameter.min_size', index=6,
number=7, type=13, cpp_type=3, label=1, number=7, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=300, has_default_value=True, default_value=16,
message_type=None, enum_type=None, containing_type=None, message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None, is_extension=False, extension_scope=None,
options=None), options=None),
_descriptor.FieldDescriptor( _descriptor.FieldDescriptor(
name='nms_thresh', full_name='caffe.ProposalParameter.nms_thresh', index=7, name='min_level', full_name='caffe.ProposalParameter.min_level', index=7,
number=8, type=2, cpp_type=6, label=1, number=8, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0.7, has_default_value=True, default_value=2,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_level', full_name='caffe.ProposalParameter.max_level', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='canonical_scale', full_name='caffe.ProposalParameter.canonical_scale', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=224,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='canonical_level', full_name='caffe.ProposalParameter.canonical_level', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None, message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None, is_extension=False, extension_scope=None,
options=None), options=None),
...@@ -5691,7 +5712,7 @@ _PROPOSALPARAMETER = _descriptor.Descriptor( ...@@ -5691,7 +5712,7 @@ _PROPOSALPARAMETER = _descriptor.Descriptor(
oneofs=[ oneofs=[
], ],
serialized_start=16818, serialized_start=16818,
serialized_end=17018, serialized_end=17090,
) )
...@@ -5755,8 +5776,8 @@ _BATCHRENORMPARAMETER = _descriptor.Descriptor( ...@@ -5755,8 +5776,8 @@ _BATCHRENORMPARAMETER = _descriptor.Descriptor(
extension_ranges=[], extension_ranges=[],
oneofs=[ oneofs=[
], ],
serialized_start=17021, serialized_start=17093,
serialized_end=17187, serialized_end=17259,
) )
...@@ -5792,8 +5813,8 @@ _DENSECONCATPARAMETER = _descriptor.Descriptor( ...@@ -5792,8 +5813,8 @@ _DENSECONCATPARAMETER = _descriptor.Descriptor(
extension_ranges=[], extension_ranges=[],
oneofs=[ oneofs=[
], ],
serialized_start=17189, serialized_start=17261,
serialized_end=17252, serialized_end=17324,
) )
...@@ -5843,8 +5864,8 @@ _FOCALLOSSPARAMETER = _descriptor.Descriptor( ...@@ -5843,8 +5864,8 @@ _FOCALLOSSPARAMETER = _descriptor.Descriptor(
extension_ranges=[], extension_ranges=[],
oneofs=[ oneofs=[
], ],
serialized_start=17254, serialized_start=17326,
serialized_end=17353, serialized_end=17425,
) )
...@@ -5873,8 +5894,8 @@ _GATHERPARAMETER = _descriptor.Descriptor( ...@@ -5873,8 +5894,8 @@ _GATHERPARAMETER = _descriptor.Descriptor(
extension_ranges=[], extension_ranges=[],
oneofs=[ oneofs=[
], ],
serialized_start=17355, serialized_start=17427,
serialized_end=17389, serialized_end=17461,
) )
_BLOBPROTO.fields_by_name['shape'].message_type = _BLOBSHAPE _BLOBPROTO.fields_by_name['shape'].message_type = _BLOBSHAPE
......
...@@ -36,7 +36,7 @@ find_packages('dragon') ...@@ -36,7 +36,7 @@ find_packages('dragon')
find_modules() find_modules()
setup(name = 'dragon', setup(name = 'dragon',
version='0.2.1.3', version='0.2.1.4',
description = 'Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework', description = 'Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework',
url='https://github.com/neopenx/Dragon', url='https://github.com/neopenx/Dragon',
author='Ting Pan', author='Ting Pan',
......
#include "core/context.h"
#include "contrib/rcnn/bbox_utils.h"
namespace dragon {
namespace rcnn {
/******************** Proposal ********************/
template <> void GenerateProposals<float, CPUContext>(const int A,
const int feat_h,
const int feat_w,
const int stride,
const float im_h, const float im_w,
const float min_box_h, const float min_box_w,
const float* scores,
const float* bbox_deltas,
const float* anchors,
float* proposals) {
float* proposal = proposals;
const int K = feat_h * feat_w;
for (int h = 0; h < feat_h; ++h) {
for (int w = 0; w < feat_w; ++w) {
const float x = w * stride;
const float y = h * stride;
// bbox_deltas: [1, A, 4, K]
const float* bbox_delta = bbox_deltas + h * feat_w + w;
// scores: [1, A, K]
const float* score = scores + h * feat_w + w;
for (int a = 0; a < A; ++a) {
const float dx = bbox_delta[(a * 4 + 0) * K];
const float dy = bbox_delta[(a * 4 + 1) * K];
const float d_log_w = bbox_delta[(a * 4 + 2) * K];
const float d_log_h = bbox_delta[(a * 4 + 3) * K];
proposal[0] = x + anchors[a * 4 + 0];
proposal[1] = y + anchors[a * 4 + 1];
proposal[2] = x + anchors[a * 4 + 2];
proposal[3] = y + anchors[a * 4 + 3];
proposal[4] = BBoxTransform<float>(dx, dy,
d_log_w, d_log_h,
im_w, im_h,
min_box_w, min_box_h,
proposal) * score[a * K];
proposal += 5;
}
}
}
}
template <> void GenerateProposals_v2<float, CPUContext>(const int total_anchors,
const float im_h, const float im_w,
const float min_box_h, const float min_box_w,
const float* scores,
const float* bbox_deltas,
float* proposals) {
float* proposal = proposals;
for (int i = 0; i < total_anchors; ++i) {
// bbox_deltas: [1, 4, total_anchors]
// scores: [1, total_anchors]
const float dx = bbox_deltas[i];
const float dy = bbox_deltas[total_anchors + i];
const float d_log_w = bbox_deltas[2 * total_anchors + i];
const float d_log_h = bbox_deltas[3 * total_anchors + i];
proposal[4] = BBoxTransform<float>(dx, dy,
d_log_w, d_log_h,
im_w, im_h,
min_box_w, min_box_h,
proposal) * scores[i];
proposal += 5;
}
}
/******************** NMS ********************/
template <typename T>
T iou(const T A[], const T B[]) {
if (A[0] > B[2] || A[1] > B[3] || A[2] < B[0] || A[3] < B[1]) return 0;
const T x1 = std::max(A[0], B[0]);
const T y1 = std::max(A[1], B[1]);
const T x2 = std::min(A[2], B[2]);
const T y2 = std::min(A[3], B[3]);
const T width = std::max((T)0, x2 - x1 + (T)1);
const T height = std::max((T)0, y2 - y1 + (T)1);
const T area = width * height;
const T A_area = (A[2] - A[0] + (T)1) * (A[3] - A[1] + (T)1);
const T B_area = (B[2] - B[0] + (T)1) * (B[3] - B[1] + (T)1);
return area / (A_area + B_area - area);
}
template <> void NMS<float, CPUContext>(const int num_boxes,
const int max_keeps,
const float thresh,
const float* proposals,
int* roi_indices,
int& num_rois,
Tensor* mask) {
int count = 0;
std::vector<char> is_dead(num_boxes);
for (int i = 0; i < num_boxes; ++i) is_dead[i] = 0;
for (int i = 0; i < num_boxes; ++i) {
if (is_dead[i]) continue;
roi_indices[count++] = i;
if (count == max_keeps) break;
for (int j = i + 1; j < num_boxes; ++j)
if (!is_dead[j] && iou(&proposals[i * 5], &proposals[j * 5]) > thresh) is_dead[j] = 1;
}
num_rois = count;
}
} // namespace rcnn
} // namespace dragon
\ No newline at end of file
#ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "contrib/rcnn/bbox_utils.h"
namespace dragon {
namespace rcnn {
/******************** BBox ********************/
template <typename T>
__device__ int _BBoxTransform(const T dx, const T dy,
const T d_log_w, const T d_log_h,
const T im_w, const T im_h,
const T min_box_w, const T min_box_h,
T* bbox) {
const T w = bbox[2] - bbox[0] + (T)1;
const T h = bbox[3] - bbox[1] + (T)1;
const T ctr_x = bbox[0] + (T)0.5 * w;
const T ctr_y = bbox[1] + (T)0.5 * h;
const T pred_ctr_x = dx * w + ctr_x;
const T pred_ctr_y = dy * h + ctr_y;
const T pred_w = exp(d_log_w) * w;
const T pred_h = exp(d_log_h) * h;
bbox[0] = pred_ctr_x - (T)0.5 * pred_w;
bbox[1] = pred_ctr_y - (T)0.5 * pred_h;
bbox[2] = pred_ctr_x + (T)0.5 * pred_w;
bbox[3] = pred_ctr_y + (T)0.5 * pred_h;
bbox[0] = max((T)0, min(bbox[0], im_w - (T)1));
bbox[1] = max((T)0, min(bbox[1], im_h - (T)1));
bbox[2] = max((T)0, min(bbox[2], im_w - (T)1));
bbox[3] = max((T)0, min(bbox[3], im_h - (T)1));
const T box_w = bbox[2] - bbox[0] + (T)1;
const T box_h = bbox[3] - bbox[1] + (T)1;
return (box_w >= min_box_w) * (box_h >= min_box_h);
}
/******************** Proposal ********************/
template <typename T>
__global__ void _GenerateProposals(const int nthreads,
const int A,
const int feat_h,
const int feat_w,
const int stride,
const float im_h, const float im_w,
const float min_box_h, const float min_box_w,
const T* scores,
const T* bbox_deltas,
const T* anchors,
T* proposals) {
CUDA_KERNEL_LOOP(idx, nthreads) {
const int h = idx / A / feat_w;
const int w = (idx / A) % feat_w;
const int a = idx % A;
const T x = w * stride;
const T y = h * stride;
const T* bbox_delta = bbox_deltas + h * feat_w + w;
const T* score = scores + h * feat_w + w;
const int K = feat_h * feat_w;
const T dx = bbox_delta[(a * 4 + 0) * K];
const T dy = bbox_delta[(a * 4 + 1) * K];
const T d_log_w = bbox_delta[(a * 4 + 2) * K];
const T d_log_h = bbox_delta[(a * 4 + 3) * K];
T* proposal = proposals + idx * 5;
proposal[0] = x + anchors[a * 4 + 0];
proposal[1] = y + anchors[a * 4 + 1];
proposal[2] = x + anchors[a * 4 + 2];
proposal[3] = y + anchors[a * 4 + 3];
proposal[4] = _BBoxTransform(dx, dy,
d_log_w, d_log_h,
im_w, im_h,
min_box_w, min_box_h,
proposal) * score[a * K];
}
}
template <> void GenerateProposals<float, CUDAContext>(const int A,
const int feat_h,
const int feat_w,
const int stride,
const float im_h, const float im_w,
const float min_box_h, const float min_box_w,
const float* scores,
const float* bbox_deltas,
const float* anchors,
float* proposals) {
const int num_proposals = A * feat_h * feat_w;
_GenerateProposals<float> << <GET_BLOCKS(num_proposals), CUDA_NUM_THREADS >> >(num_proposals,
A,
feat_h,
feat_w,
stride,
im_h, im_w,
min_box_h, min_box_w,
scores,
bbox_deltas,
anchors,
proposals);
CUDA_POST_KERNEL_CHECK;
}
template <typename T>
__global__ void _GenerateProposals_v2(const int nthreads,
const float im_h, const float im_w,
const float min_box_h, const float min_box_w,
const T* scores,
const T* bbox_deltas,
T* proposals) {
CUDA_KERNEL_LOOP(idx, nthreads) {
const float dx = bbox_deltas[idx];
const float dy = bbox_deltas[nthreads + idx];
const float d_log_w = bbox_deltas[2 * nthreads + idx];
const float d_log_h = bbox_deltas[3 * nthreads + idx];
T* proposal = proposals + idx * 5;
proposal[4] = _BBoxTransform(dx, dy,
d_log_w, d_log_h,
im_w, im_h,
min_box_w, min_box_h,
proposal) * scores[idx];
}
}
template <> void GenerateProposals_v2<float, CUDAContext>(const int total_anchors,
const float im_h, const float im_w,
const float min_box_h, const float min_box_w,
const float* scores,
const float* bbox_deltas,
float* proposals) {
_GenerateProposals_v2<float> << <GET_BLOCKS(total_anchors), CUDA_NUM_THREADS >> >(total_anchors,
im_h, im_w,
min_box_h, min_box_w,
scores,
bbox_deltas,
proposals);
CUDA_POST_KERNEL_CHECK;
}
/******************** NMS ********************/
#define DIV_THEN_CEIL(x, y) (((x) + (y) - 1) / (y))
#define NMS_BLOCK_SIZE 64
template <typename T>
__device__ T iou(const T* A, const T* B) {
const T x1 = max(A[0], B[0]);
const T y1 = max(A[1], B[1]);
const T x2 = min(A[2], B[2]);
const T y2 = min(A[3], B[3]);
const T width = max((T)0, x2 - x1 + (T)1);
const T height = max((T)0, y2 - y1 + (T)1);
const T area = width * height;
const T A_area = (A[2] - A[0] + (T)1) * (A[3] - A[1] + (T)1);
const T B_area = (B[2] - B[0] + (T)1) * (B[3] - B[1] + (T)1);
return area / (A_area + B_area - area);
}
template <typename T>
__global__ static void nms_mask(const T boxes[],
unsigned long long mask[],
const int num_boxes,
const T nms_thresh) {
const int i_start = blockIdx.x * NMS_BLOCK_SIZE;
const int di_end = min(num_boxes - i_start, NMS_BLOCK_SIZE);
const int j_start = blockIdx.y * NMS_BLOCK_SIZE;
const int dj_end = min(num_boxes - j_start, NMS_BLOCK_SIZE);
__shared__ T boxes_i[NMS_BLOCK_SIZE * 4];
{
const int di = threadIdx.x;
if (di < di_end) {
boxes_i[di * 4 + 0] = boxes[(i_start + di) * 5 + 0];
boxes_i[di * 4 + 1] = boxes[(i_start + di) * 5 + 1];
boxes_i[di * 4 + 2] = boxes[(i_start + di) * 5 + 2];
boxes_i[di * 4 + 3] = boxes[(i_start + di) * 5 + 3];
}
}
__syncthreads();
{
const int dj = threadIdx.x;
if (dj < dj_end) {
const T* const box_j = boxes + (j_start + dj) * 5;
unsigned long long mask_j = 0;
const int di_start = (i_start == j_start) ? (dj + 1) : 0;
for (int di = di_start; di < di_end; ++di) {
const T* const box_i = boxes_i + di * 4;
if (iou(box_j, box_i) > nms_thresh) {
mask_j |= 1ULL << di;
}
}
{
const int num_blocks = DIV_THEN_CEIL(num_boxes, NMS_BLOCK_SIZE);
const int bi = blockIdx.x;
mask[(j_start + dj) * num_blocks + bi] = mask_j;
}
}
}
}
template <typename T>
void _NMS(const int num_boxes,
const int max_keeps,
const float thresh,
const float* proposals,
int* roi_indices,
int& num_rois,
Tensor* mask) {
const int num_blocks = DIV_THEN_CEIL(num_boxes, NMS_BLOCK_SIZE);
{
const dim3 blocks(num_blocks, num_blocks);
vector<TIndex> mask_shape(2);
mask_shape[0] = num_boxes;
mask_shape[1] = num_blocks * sizeof(unsigned long long) / sizeof(int);
mask->Reshape(mask_shape);
nms_mask << <blocks, NMS_BLOCK_SIZE >> >(
proposals, (unsigned long long*)mask->template mutable_data<int, CUDAContext>(),
num_boxes, thresh);
CUDA_POST_KERNEL_CHECK;
}
// discard i-th box if it is significantly overlapped with
// one or more previous (= scored higher) boxes
{
const unsigned long long* p_mask_cpu
= (unsigned long long*)mask->mutable_data<int, CPUContext>();
int num_selected = 0;
vector<unsigned long long> dead_bit(num_blocks);
for (int i = 0; i < num_blocks; ++i) {
dead_bit[i] = 0;
}
for (int i = 0; i < num_boxes; ++i) {
const int nblock = i / NMS_BLOCK_SIZE;
const int inblock = i % NMS_BLOCK_SIZE;
if (!(dead_bit[nblock] & (1ULL << inblock))) {
roi_indices[num_selected++] = i;
const unsigned long long* const mask_i = p_mask_cpu + i * num_blocks;
for (int j = nblock; j < num_blocks; ++j) {
dead_bit[j] |= mask_i[j];
}
if (num_selected == max_keeps) {
break;
}
}
}
num_rois = num_selected;
}
}
template <> void NMS<float, CUDAContext>(const int num_boxes,
const int max_keeps,
const float thresh,
const float* proposals,
int* roi_indices,
int& num_rois,
Tensor* mask) {
_NMS<float>(num_boxes, max_keeps, thresh,
proposals, roi_indices, num_rois, mask);
}
} // namespace rcnn
} // namespace dragon
#endif // WITH_CUDA
\ No newline at end of file
// --------------------------------------------------------
// Dragon
// Copyright(c) 2017 SeetaTech
// Written by Ting Pan
// --------------------------------------------------------
#ifndef DRAGON_CONTRIB_RCNN_BBOX_UTILS_H_
#define DRAGON_CONTRIB_RCNN_BBOX_UTILS_H_
#include "core/context.h"
#include "core/operator.h"
namespace dragon {
namespace rcnn {
#define ROUND(x) ((int)((x) + (T)0.5))
/******************** BBox ********************/
template <typename T>
int BBoxTransform(const T dx, const T dy,
const T d_log_w, const T d_log_h,
const T im_w, const T im_h,
const T min_box_w, const T min_box_h,
T* bbox) {
const T w = bbox[2] - bbox[0] + (T)1;
const T h = bbox[3] - bbox[1] + (T)1;
const T ctr_x = bbox[0] + (T)0.5 * w;
const T ctr_y = bbox[1] + (T)0.5 * h;
const T pred_ctr_x = dx * w + ctr_x;
const T pred_ctr_y = dy * h + ctr_y;
const T pred_w = exp(d_log_w) * w;
const T pred_h = exp(d_log_h) * h;
bbox[0] = pred_ctr_x - (T)0.5 * pred_w;
bbox[1] = pred_ctr_y - (T)0.5 * pred_h;
bbox[2] = pred_ctr_x + (T)0.5 * pred_w;
bbox[3] = pred_ctr_y + (T)0.5 * pred_h;
bbox[0] = std::max((T)0, std::min(bbox[0], im_w - (T)1));
bbox[1] = std::max((T)0, std::min(bbox[1], im_h - (T)1));
bbox[2] = std::max((T)0, std::min(bbox[2], im_w - (T)1));
bbox[3] = std::max((T)0, std::min(bbox[3], im_h - (T)1));
const T bbox_w = bbox[2] - bbox[0] + (T)1;
const T bbox_h = bbox[3] - bbox[1] + (T)1;
return (bbox_w >= min_box_w) * (bbox_h >= min_box_h);
}
/******************** Anchor ********************/
template <typename T>
void GenerateAnchors(int base_size,
const int num_ratios,
const int num_scales,
const T* ratios,
const T* scales,
T* anchors) {
const T base_area = (T)(base_size * base_size);
const T center = (T)0.5 * (base_size - (T)1);
T* offset_anchors = anchors;
for (int i = 0; i < num_ratios; ++i) {
const T ratio_w = (T)ROUND(sqrt(base_area / ratios[i]));
const T ratio_h = (T)ROUND(ratio_w * ratios[i]);
for (int j = 0; j < num_scales; ++j) {
const T scale_w = (T)0.5 * (ratio_w * scales[j] - (T)1);
const T scale_h = (T)0.5 * (ratio_h * scales[j] - (T)1);
offset_anchors[0] = center - scale_w;
offset_anchors[1] = center - scale_h;
offset_anchors[2] = center + scale_w;
offset_anchors[3] = center + scale_h;
offset_anchors += 4;
}
}
}
template <typename T>
void GenerateGridAnchors(const int A, const int feat_h, const int feat_w,
const int stride,
const T* anchors,
T* proposals) {
T* proposal = proposals;
for (int a = 0; a < A; ++a) {
for (int h = 0; h < feat_h; ++h) {
for (int w = 0; w < feat_w; ++w) {
const T x = w * stride;
const T y = h * stride;
proposal[0] = x + anchors[a * 4 + 0];
proposal[1] = y + anchors[a * 4 + 1];
proposal[2] = x + anchors[a * 4 + 2];
proposal[3] = y + anchors[a * 4 + 3];
proposal += 5;
}
}
}
}
/******************** Proposal ********************/
template <typename T, class Context>
void GenerateProposals(const int A, const int feat_h, const int feat_w,
const int stride,
const float im_h, const float im_w,
const float min_box_h, const float min_box_w,
const T* scores,
const T* bbox_deltas,
const T* anchors,
T* proposals);
template <typename T, class Context>
void GenerateProposals_v2(const int total_anchors,
const float im_h, const float im_w,
const float min_box_h, const float min_box_w,
const T* scores,
const T* bbox_deltas,
T* proposals);
template <typename T>
void SortProposals(const int start,
const int end,
const int num_top,
T* proposals) {
const T pivot_score = proposals[start * 5 + 4];
int left = start + 1, right = end;
while (left <= right) {
while (left <= end && proposals[left * 5 + 4] >= pivot_score) ++left;
while (right > start && proposals[right * 5 + 4] <= pivot_score) --right;
if (left <= right) {
for (int i = 0; i < 5; ++i)
std::swap(proposals[left * 5 + i], proposals[right * 5 + i]);
++left;
--right;
}
}
if (right > start) {
for (int i = 0; i < 5; ++i)
std::swap(proposals[start * 5 + i], proposals[right * 5 + i]);
}
if (start < right - 1) SortProposals(start, right - 1, num_top, proposals);
if (right + 1 < num_top && right + 1 < end) SortProposals(right + 1, end, num_top, proposals);
}
template <typename T>
void RetrieveRoIs(const int num_rois,
const int roi_batch_ind,
const T* proposals,
const int* roi_indices,
T* rois) {
for (int i = 0; i < num_rois; ++i) {
const T* proposal = proposals + roi_indices[i] * 5;
rois[i * 5 + 0] = roi_batch_ind;
rois[i * 5 + 1] = proposal[0];
rois[i * 5 + 2] = proposal[1];
rois[i * 5 + 3] = proposal[2];
rois[i * 5 + 4] = proposal[3];
}
}
template <typename T>
int roi_level(const int min_level, // e.g. 2
const int max_level, // e.g. 5
const int canonical_level, // e.g. 4
const int canonical_scale, // e.g. 224
T* roi) {
T w = roi[3] - roi[1] + 1;
T h = roi[4] - roi[2] + 1;
// reference the settings of paper
int level = canonical_level + std::log(std::max(std::sqrt(w * h), T(1)) / T(canonical_scale));
return std::min(max_level, std::max(min_level, level));
}
template <typename T>
void CollectRoIs(const int num_rois,
const int min_level,
const int max_level,
const int canonical_level,
const int canonical_scale,
const T* rois,
vector< vector<TIndex> >& roi_bins) {
const T* roi = rois;
for (int i = 0; i < num_rois; ++i) {
int bin_idx = roi_level(min_level, max_level,
canonical_level, canonical_scale,
roi);
bin_idx = std::max(bin_idx - min_level, 0);
roi_bins[bin_idx].push_back(i);
roi += 5;
}
}
template <typename T>
void DistributeRoIs(const vector< vector<TIndex> >& roi_bins,
const T* rois,
vector<T*> outputs) {
for (int i = 0; i < roi_bins.size(); i++) {
auto* y = outputs[i];
if (roi_bins[i].size() == 0) {
// fake a tiny roi to avoid empty roi pooling
y[0] = 0, y[1] = 0, y[2] = 0, y[3] = 1, y[4] = 1;
} else {
for (int j = 0; j < roi_bins[i].size(); ++j) {
const T* roi = rois + roi_bins[i][j] * 5;
for (int k = 0; k < 5; ++k) y[k] = roi[k];
y += 5;
}
}
}
}
/******************** NMS ********************/
template <typename T, class Context>
void NMS(const int num_boxes,
const int max_keeps,
const T thresh,
const T* proposals,
int* roi_indices,
int& num_rois,
Tensor* mask);
} // namespace rcnn
} // namespace dragon
#endif // DRAGON_OPERATORS_CONTRIB_BBOX_UTILS_H_
\ No newline at end of file
#include "contrib/rcnn/proposal_op.h"
#include "contrib/rcnn/bbox_utils.h"
namespace dragon {
template <class Context> template <typename T>
void ProposalOp<Context>::RunWithType() {
TIndex total_rois = 0;
auto* im_info = input(-1).template data<T, CPUContext>();
auto* Ydata = output(0)->template mutable_data<T, CPUContext>();
for (int n = 0; n < num_images; ++n) {
const T im_height = im_info[0];
const T im_width = im_info[1];
const T scale = im_info[2];
const T min_box_h = min_size * scale;
const T min_box_w = min_size * scale;
int num_rois = 0;
if (strides.size() == 1) {
// case 1: single stride (Faster R-CNN)
const TIndex feat_height = input(0).dim(2);
const TIndex feat_width = input(0).dim(3);
const TIndex K = feat_height * feat_width;
const TIndex A = ratios.size() * scales.size();
const TIndex num_proposals = K * A;
const TIndex pre_nms_topn = std::min(num_proposals, pre_nms_top_n);
anchors_.Reshape(vector<TIndex>({ A, 4 }));
proposals_.Reshape(vector<TIndex>({ num_proposals, 5 }));
rcnn::GenerateAnchors<T>(strides[0], (int)ratios.size(), (int)scales.size(),
&ratios[0], &scales[0],
anchors_.template mutable_data<T, CPUContext>());
rcnn::GenerateProposals<T, Context>(A, feat_height, feat_width, strides[0],
im_height, im_width, min_box_h, min_box_w,
input(0).template data<T, Context>() + num_proposals,
input(1).template data<T, Context>(),
anchors_.template mutable_data<T, Context>(),
proposals_.template mutable_data<T, Context>());
rcnn::SortProposals(0, num_proposals - 1, pre_nms_top_n,
proposals_.template mutable_data<T, CPUContext>());
rcnn::NMS<T, Context>(pre_nms_topn, post_nms_top_n, nms_thresh,
proposals_.template mutable_data<T, Context>(),
roi_indices_.template mutable_data<int, CPUContext>(),
num_rois,
&nms_mask_);
rcnn::RetrieveRoIs<T>(num_rois, n, proposals_.template mutable_data<T, CPUContext>(),
roi_indices_.template mutable_data<int, CPUContext>(),
Ydata);
} else if (strides.size() > 1) {
// case 2: multiple stride (FPN / Mask R-CNN / RetinaNet)
CHECK_EQ(strides.size(), (int)InputSize() - 3)
<< "\nGiven " << strides.size() << " strides and "
<< InputSize() - 3 << " feature inputs";
CHECK_EQ(strides.size(), scales.size())
<< "\nGiven " << strides.size() << " strides and "
<< scales.size() << " scales";
// cls_probs: [1, 2, total_proposals]
// bbox_deltas: [1, 4, total_proposals]
TIndex total_proposals = input(-3).dim(2), acc_proposals = 0;
const TIndex pre_nms_topn = std::min(total_proposals, pre_nms_top_n);;
proposals_.Reshape(vector<TIndex>({ total_proposals, 5 }));
auto* proposals = proposals_.template mutable_data<T, CPUContext>();
for (int i = 0; i < strides.size(); i++) {
const TIndex feat_height = input(i).dim(2);
const TIndex feat_width = input(i).dim(3);
const TIndex K = feat_height * feat_width;
const TIndex A = ratios.size();
const TIndex num_proposals = K * A;
anchors_.Reshape(vector<TIndex>({ A, 4 }));
rcnn::GenerateAnchors<T>(strides[i], (int)ratios.size(), 1,
&ratios[0], &scales[0],
anchors_.template mutable_data<T, CPUContext>());
rcnn::GenerateGridAnchors<T>(A, feat_height, feat_width, strides[i],
anchors_.template mutable_data<T, CPUContext>(),
proposals);
acc_proposals += num_proposals;
proposals += (num_proposals * 5);
}
CHECK_EQ(acc_proposals, total_proposals)
<< "\nExcepted " << total_proposals << " proposals from the network, "
<< "but generated " << acc_proposals << " proposals.";
rcnn::GenerateProposals_v2<T, Context>(total_proposals, im_height, im_width,
min_box_h, min_box_w,
input(-3).template data<T, Context>() + total_proposals,
input(-2).template data<T, Context>(),
proposals_.template mutable_data<T, Context>());
rcnn::SortProposals(0, total_proposals - 1, pre_nms_top_n,
proposals_.template mutable_data<T, CPUContext>());
rcnn::NMS<T, Context>(pre_nms_topn, post_nms_top_n, nms_thresh,
proposals_.template mutable_data<T, Context>(),
roi_indices_.template mutable_data<int, CPUContext>(),
num_rois,
&nms_mask_);
rcnn::RetrieveRoIs<T>(num_rois, n, proposals_.template mutable_data<T, CPUContext>(),
roi_indices_.template mutable_data<int, CPUContext>(),
Ydata);
} else {
LOG(FATAL) << "There should be given at least one stride for proposals.";
}
total_rois += num_rois;
Ydata += (num_rois * 5);
im_info += 3;
}
output(0)->Reshape(vector<TIndex>({ total_rois, 5 }));
// distribute rois into K bins
if (OutputSize() > 1) {
CHECK_EQ(max_level - min_level + 1, (int)OutputSize())
<< "Excepted " << OutputSize() << " outputs for levels between "
<< "[" << min_level << ", " << max_level << "].";
vector< vector<TIndex> > roi_bins(OutputSize(), vector<TIndex>());
vector<T*> outputs;
Tensor collective_rois;
collective_rois.ReshapeLike(*output(0));
auto* rois = collective_rois.template mutable_data<T, CPUContext>();
CPUContext::template Copy<T, CPUContext, CPUContext>(collective_rois.count(),
rois,
output(0)->template data<T, CPUContext>());
rcnn::CollectRoIs<T>(total_rois, min_level, max_level,
canonical_level, canonical_scale,
rois,
roi_bins);
for (int i = 0; i < OutputSize(); i++) {
output(i)->Reshape(vector<TIndex>({ std::max((int)roi_bins[i].size(), 1), 5 }));
outputs.push_back(output(i)->template mutable_data<T, CPUContext>());
}
rcnn::DistributeRoIs(roi_bins, rois, outputs);
}
}
template <class Context>
void ProposalOp<Context>::RunOnDevice() {
num_images = input(0).dim(0);
CHECK_EQ(input(-1).count(), num_images * 3)
<< "Excepted " << num_images * 3 << " groups image info, "
<< "but got " << input(-1).count() / 3 << ".";
roi_indices_.Reshape(vector<TIndex>(1, post_nms_top_n));
output(0)->Reshape(vector<TIndex>({ num_images * post_nms_top_n, 5 }));
if (TypeMeta::Id<Context>() == TypeMeta::Id<CPUContext>()) {
if (input(0).template IsType<float>()) RunWithType<float>();
else LOG(FATAL) << "Unsupported input types.";
} else if (TypeMeta::Id<Context>() == TypeMeta::Id<CUDAContext>()) {
if (input(0).template IsType<float>()) RunWithType<float>();
else LOG(FATAL) << "Unsupported input types.";
}
}
DEPLOY_CPU(Proposal);
#ifdef WITH_CUDA
DEPLOY_CUDA(Proposal);
#endif
OPERATOR_SCHEMA(Proposal).NumInputs(3, INT_MAX).NumOutputs(1, INT_MAX);
} // namespace dragon
\ No newline at end of file
// --------------------------------------------------------
// Dragon
// Copyright(c) 2017 SeetaTech
// Written by Ting Pan
// --------------------------------------------------------
#ifndef DRAGON_CONTRIB_RCNN_PROPOSAL_OP_H_
#define DRAGON_CONTRIB_RCNN_PROPOSAL_OP_H_
#include "core/operator.h"
namespace dragon {
template <class Context>
class ProposalOp final : public Operator<Context> {
public:
ProposalOp(const OperatorDef& op_def, Workspace* ws)
: Operator<Context>(op_def, ws),
strides(OperatorBase::GetRepeatedArg<int>("strides")),
ratios(OperatorBase::GetRepeatedArg<float>("ratios")),
scales(OperatorBase::GetRepeatedArg<float>("scales")),
pre_nms_top_n(OperatorBase::GetSingleArg<int>("pre_nms_top_n", 6000)),
post_nms_top_n(OperatorBase::GetSingleArg<int>("post_nms_top_n", 300)),
nms_thresh(OperatorBase::GetSingleArg<float>("nms_thresh", (float)0.7)),
min_size(OperatorBase::GetSingleArg<int>("min_size", 16)),
min_level(OperatorBase::GetSingleArg<int>("min_level", 2)),
max_level(OperatorBase::GetSingleArg<int>("max_level", 5)),
canonical_level(OperatorBase::GetSingleArg<int>("canonical_level", 4)),
canonical_scale(OperatorBase::GetSingleArg<int>("canonical_scale", 224)) {}
void RunOnDevice() override;
template <typename T> void RunWithType();
protected:
vector<int> strides;
vector<float> ratios, scales;
TIndex pre_nms_top_n, post_nms_top_n, min_size, num_images;
TIndex min_level, max_level, canonical_level, canonical_scale;
float nms_thresh;
Tensor anchors_, proposals_, roi_indices_, nms_mask_;
};
} // namespace dragon
#endif // DRAGON_OPERATORS_CONTRIB_RCNN_PROPOSAL_OP_H_
\ No newline at end of file
#include "operators/misc/proposal_op.h"
namespace dragon {
#define ROUND(x) ((int)((x) + (T)0.5))
template <typename T>
static void generate_anchors(int base_size,
const T ratios[],
const T scales[],
const int num_ratios,
const int num_scales,
T anchors[]) {
// base box's width & height & center location
const T base_area = (T)(base_size * base_size);
const T center = (T)0.5 * (base_size - (T)1);
// enumerate all transformed boxes
T* p_anchors = anchors;
for (int i = 0; i < num_ratios; ++i) {
// transformed width & height for given ratio factors
const T ratio_w = (T)ROUND(sqrt(base_area / ratios[i]));
const T ratio_h = (T)ROUND(ratio_w * ratios[i]);
for (int j = 0; j < num_scales; ++j) {
// transformed width & height for given scale factors
const T scale_w = (T)0.5 * (ratio_w * scales[j] - (T)1);
const T scale_h = (T)0.5 * (ratio_h * scales[j] - (T)1);
// (x1, y1, x2, y2) for transformed box
p_anchors[0] = center - scale_w;
p_anchors[1] = center - scale_h;
p_anchors[2] = center + scale_w;
p_anchors[3] = center + scale_h;
p_anchors += 4;
} // endfor j
}
}
template <typename T>
static int transform_box(T box[],
const T dx, const T dy,
const T d_log_w, const T d_log_h,
const T img_W, const T img_H,
const T min_box_W, const T min_box_H) {
// width & height of box
const T w = box[2] - box[0] + (T)1;
const T h = box[3] - box[1] + (T)1;
// center location of box
const T ctr_x = box[0] + (T)0.5 * w;
const T ctr_y = box[1] + (T)0.5 * h;
// new center location according to gradient (dx, dy)
const T pred_ctr_x = dx * w + ctr_x;
const T pred_ctr_y = dy * h + ctr_y;
// new width & height according to gradient d(log w), d(log h)
const T pred_w = exp(d_log_w) * w;
const T pred_h = exp(d_log_h) * h;
// update upper-left corner location
box[0] = pred_ctr_x - (T)0.5 * pred_w;
box[1] = pred_ctr_y - (T)0.5 * pred_h;
// update lower-right corner location
box[2] = pred_ctr_x + (T)0.5 * pred_w;
box[3] = pred_ctr_y + (T)0.5 * pred_h;
// adjust new corner locations to be within the image region,
box[0] = std::max((T)0, std::min(box[0], img_W - (T)1));
box[1] = std::max((T)0, std::min(box[1], img_H - (T)1));
box[2] = std::max((T)0, std::min(box[2], img_W - (T)1));
box[3] = std::max((T)0, std::min(box[3], img_H - (T)1));
// recompute new width & height
const T box_w = box[2] - box[0] + (T)1;
const T box_h = box[3] - box[1] + (T)1;
// check if new box's size >= threshold
return (box_w >= min_box_W) * (box_h >= min_box_H);
}
template <typename T>
static void enumerate_proposals_cpu(const T bottom4d[],
const T d_anchor4d[],
const T anchors[],
T proposals[],
const int num_anchors,
const int bottom_H, const int bottom_W,
const T img_H, const T img_W,
const T min_box_H, const T min_box_W,
const int feat_stride) {
T* p_proposal = proposals;
const int bottom_area = bottom_H * bottom_W;
for (int h = 0; h < bottom_H; ++h) {
for (int w = 0; w < bottom_W; ++w) {
const T x = w * feat_stride;
const T y = h * feat_stride;
const T* p_box = d_anchor4d + h * bottom_W + w;
const T* p_score = bottom4d + h * bottom_W + w;
for (int k = 0; k < num_anchors; ++k) {
const T dx = p_box[(k * 4 + 0) * bottom_area];
const T dy = p_box[(k * 4 + 1) * bottom_area];
const T d_log_w = p_box[(k * 4 + 2) * bottom_area];
const T d_log_h = p_box[(k * 4 + 3) * bottom_area];
p_proposal[0] = x + anchors[k * 4 + 0];
p_proposal[1] = y + anchors[k * 4 + 1];
p_proposal[2] = x + anchors[k * 4 + 2];
p_proposal[3] = y + anchors[k * 4 + 3];
p_proposal[4]
= transform_box(p_proposal,
dx, dy, d_log_w, d_log_h,
img_W, img_H, min_box_W, min_box_H)
* p_score[k * bottom_area];
p_proposal += 5;
} // endfor k
} // endfor w
} // endfor h
}
template <typename T>
static void sort_box(T list_cpu[],
const int start,
const int end,
const int num_top) {
const T pivot_score = list_cpu[start * 5 + 4];
int left = start + 1, right = end;
T temp[5];
while (left <= right) {
while (left <= end && list_cpu[left * 5 + 4] >= pivot_score) ++left;
while (right > start && list_cpu[right * 5 + 4] <= pivot_score) --right;
if (left <= right) {
for (int i = 0; i < 5; ++i) {
temp[i] = list_cpu[left * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[left * 5 + i] = list_cpu[right * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[right * 5 + i] = temp[i];
}
++left;
--right;
}
}
if (right > start) {
for (int i = 0; i < 5; ++i) {
temp[i] = list_cpu[start * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[start * 5 + i] = list_cpu[right * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[right * 5 + i] = temp[i];
}
}
if (start < right - 1) {
sort_box(list_cpu, start, right - 1, num_top);
}
if (right + 1 < num_top && right + 1 < end) {
sort_box(list_cpu, right + 1, end, num_top);
}
}
template <typename T>
static T iou(const T A[], const T B[]) {
if (A[0] > B[2] || A[1] > B[3] || A[2] < B[0] || A[3] < B[1]) return 0;
// overlapped region (= box)
const T x1 = std::max(A[0], B[0]);
const T y1 = std::max(A[1], B[1]);
const T x2 = std::min(A[2], B[2]);
const T y2 = std::min(A[3], B[3]);
// intersection area
const T width = std::max((T)0, x2 - x1 + (T)1);
const T height = std::max((T)0, y2 - y1 + (T)1);
const T area = width * height;
// area of A, B
const T A_area = (A[2] - A[0] + (T)1) * (A[3] - A[1] + (T)1);
const T B_area = (B[2] - B[0] + (T)1) * (B[3] - B[1] + (T)1);
// IoU
return area / (A_area + B_area - area);
}
template <typename T>
void nms_cpu(const int num_boxes,
const T boxes[],
int index_out[],
int* const num_out,
const int base_index,
const T nms_thresh,
const int max_num_out) {
int count = 0;
std::vector<char> is_dead(num_boxes);
for (int i = 0; i < num_boxes; ++i) is_dead[i] = 0;
for (int i = 0; i < num_boxes; ++i) {
if (is_dead[i]) continue;
index_out[count++] = base_index + i;
if (count == max_num_out) break;
for (int j = i + 1; j < num_boxes; ++j)
if (!is_dead[j] && iou(&boxes[i * 5], &boxes[j * 5]) > nms_thresh) is_dead[j] = 1;
}
*num_out = count;
is_dead.clear();
}
template <typename T>
static void retrieve_rois_cpu(const int num_rois,
const int item_index,
const T proposals[],
const int roi_indices[],
T rois[], T roi_scores[]) {
for (int i = 0; i < num_rois; ++i) {
const T* const proposals_index = proposals + roi_indices[i] * 5;
rois[i * 5 + 0] = item_index;
rois[i * 5 + 1] = proposals_index[0];
rois[i * 5 + 2] = proposals_index[1];
rois[i * 5 + 3] = proposals_index[2];
rois[i * 5 + 4] = proposals_index[3];
if (roi_scores) {
roi_scores[i] = proposals_index[4];
}
}
}
template <class Context> template <typename T>
void ProposalOp<Context>::RunWithType() {
auto* p_bottom_item = input(0).template data<T, CPUContext>();
auto* p_d_anchor_item = input(1).template data<T, CPUContext>();
auto* p_img_info_cpu = input(2).template data<T, CPUContext>();
auto* p_roi_item = output(0)->template mutable_data<T, CPUContext>();
auto* p_score_item = (OutputSize() > 1) ? output(1)->template mutable_data<T, CPUContext>() : NULL;
vector<TIndex> proposals_shape(2), top_shape(2);
proposals_shape[0] = 0; proposals_shape[1] = 5;
top_shape[0] = 0; top_shape[1] = 5;
for (int n = 0; n < input(0).dim(0); ++n) {
// bottom shape: (2 x num_anchors) x H x W
const int bottom_H = input(0).dim(2);
const int bottom_W = input(0).dim(3);
// input image height & width
const T img_H = p_img_info_cpu[0];
const T img_W = p_img_info_cpu[1];
// scale factor for height & width
const T scale_H = p_img_info_cpu[2];
const T scale_W = p_img_info_cpu[3];
// minimum box width & height
const T min_box_H = min_size_ * scale_H;
const T min_box_W = min_size_ * scale_W;
// number of all proposals = num_anchors * H * W
const int num_proposals = anchors_.dim(0) * bottom_H * bottom_W;
// number of top-n proposals before NMS
const int pre_nms_topn = std::min(num_proposals, pre_nms_topn_);
// number of final RoIs
int num_rois = 0;
// enumerate all proposals
// num_proposals = num_anchors * H * W
// (x1, y1, x2, y2, score) for each proposal
// NOTE: for bottom, only foreground scores are passed
proposals_shape[0] = num_proposals;
proposals_.Reshape(proposals_shape);
enumerate_proposals_cpu(p_bottom_item + num_proposals, p_d_anchor_item,
anchors_.template data<T, CPUContext>(), proposals_.mutable_data<T, CPUContext>(),
anchors_.dim(0), bottom_H, bottom_W, img_H, img_W, min_box_H, min_box_W, feat_stride_);
sort_box(proposals_.mutable_data<T, CPUContext>(), 0, num_proposals - 1, pre_nms_topn_);
nms_cpu(pre_nms_topn, proposals_.template data<T, CPUContext>(),
roi_indices_.mutable_data<int, CPUContext>(), &num_rois,
0, nms_thresh_, post_nms_topn_);
retrieve_rois_cpu(
num_rois, n, proposals_.template data<T, CPUContext>(),
roi_indices_.template data<int, CPUContext>(), p_roi_item, p_score_item);
top_shape[0] += num_rois;
}
output(0)->Reshape(top_shape);
if (OutputSize() > 1) {
top_shape.pop_back();
output(1)->Reshape(top_shape);
}
}
template <typename Context>
void ProposalOp<Context>::Setup() {
vector<float> ratios(OperatorBase::GetRepeatedArg<float>("ratios"));
vector<float> scales(OperatorBase::GetRepeatedArg<float>("scales"));
vector<TIndex> anchors_shape(2);
anchors_shape[0] = ratios.size() * scales.size();
anchors_shape[1] = 4;
anchors_.Reshape(anchors_shape);
generate_anchors(base_size_, &ratios[0], &scales[0],
(int)ratios.size(), (int)scales.size(),
anchors_.mutable_data<float, CPUContext>());
vector<TIndex> roi_indices_shape(1);
roi_indices_shape[0] = post_nms_topn_;
roi_indices_.Reshape(roi_indices_shape);
// rois blob : holds R regions of interest, each is a 5 - tuple
// (n, x1, y1, x2, y2) specifying an image batch index n and a
// rectangle(x1, y1, x2, y2)
vector<TIndex> top_shape(2);
top_shape[0] = 1 * post_nms_topn_;
top_shape[1] = 5;
output(0)->Reshape(top_shape);
// scores blob : holds scores for R regions of interest
if (OutputSize() > 1) {
top_shape.pop_back();
output(0)->Reshape(top_shape);
}
}
template <class Context>
void ProposalOp<Context>::RunOnDevice() {
CHECK_EQ(input(0).dim(0), 1) << "Only single item batches are supported.";
if (TypeMeta::Id<Context>() == TypeMeta::Id<CPUContext>()) {
if (input(0).template IsType<float>()) RunWithType<float>();
else LOG(FATAL) << "Unsupported input types.";
} else if (TypeMeta::Id<Context>() == TypeMeta::Id<CUDAContext>()) {
if (input(0).template IsType<float>()) RunWithType<float>();
else LOG(FATAL) << "Unsupported input types.";
}
}
DEPLOY_CPU(Proposal);
#ifdef WITH_CUDA
DEPLOY_CUDA(Proposal);
#endif
OPERATOR_SCHEMA(Proposal).NumInputs(3).NumOutputs(1, 2);
} // namespace dragon
\ No newline at end of file
#include "operators/misc/proposal_op.h"
#include "utils/cuda_device.h"
namespace dragon {
template <typename Dtype>
__device__ static int transform_box(Dtype box[],
const Dtype dx,
const Dtype dy,
const Dtype d_log_w,
const Dtype d_log_h,
const Dtype img_W,
const Dtype img_H,
const Dtype min_box_W,
const Dtype min_box_H) {
// width & height of box
const Dtype w = box[2] - box[0] + (Dtype)1;
const Dtype h = box[3] - box[1] + (Dtype)1;
// center location of box
const Dtype ctr_x = box[0] + (Dtype)0.5 * w;
const Dtype ctr_y = box[1] + (Dtype)0.5 * h;
// new center location according to gradient (dx, dy)
const Dtype pred_ctr_x = dx * w + ctr_x;
const Dtype pred_ctr_y = dy * h + ctr_y;
// new width & height according to gradient d(log w), d(log h)
const Dtype pred_w = exp(d_log_w) * w;
const Dtype pred_h = exp(d_log_h) * h;
// update upper-left corner location
box[0] = pred_ctr_x - (Dtype)0.5 * pred_w;
box[1] = pred_ctr_y - (Dtype)0.5 * pred_h;
// update lower-right corner location
box[2] = pred_ctr_x + (Dtype)0.5 * pred_w;
box[3] = pred_ctr_y + (Dtype)0.5 * pred_h;
// adjust new corner locations to be within the image region,
box[0] = max((Dtype)0, min(box[0], img_W - (Dtype)1));
box[1] = max((Dtype)0, min(box[1], img_H - (Dtype)1));
box[2] = max((Dtype)0, min(box[2], img_W - (Dtype)1));
box[3] = max((Dtype)0, min(box[3], img_H - (Dtype)1));
// recompute new width & height
const Dtype box_w = box[2] - box[0] + (Dtype)1;
const Dtype box_h = box[3] - box[1] + (Dtype)1;
// check if new box's size >= threshold
return (box_w >= min_box_W) * (box_h >= min_box_H);
}
template <typename Dtype>
static void sort_box(Dtype* list_cpu, const int start, const int end, const int num_top) {
const Dtype pivot_score = list_cpu[start * 5 + 4];
int left = start + 1, right = end;
Dtype temp[5];
while (left <= right) {
while (left <= end && list_cpu[left * 5 + 4] >= pivot_score) ++left;
while (right > start && list_cpu[right * 5 + 4] <= pivot_score) --right;
if (left <= right) {
for (int i = 0; i < 5; ++i) {
temp[i] = list_cpu[left * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[left * 5 + i] = list_cpu[right * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[right * 5 + i] = temp[i];
}
++left;
--right;
}
}
if (right > start) {
for (int i = 0; i < 5; ++i) {
temp[i] = list_cpu[start * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[start * 5 + i] = list_cpu[right * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[right * 5 + i] = temp[i];
}
}
if (start < right - 1) {
sort_box(list_cpu, start, right - 1, num_top);
}
if (right + 1 < num_top && right + 1 < end) {
sort_box(list_cpu, right + 1, end, num_top);
}
}
template <typename Dtype>
__global__ static void enumerate_proposals_gpu(const int nthreads,
const Dtype bottom4d[],
const Dtype d_anchor4d[],
const Dtype anchors[],
Dtype proposals[],
const int num_anchors,
const int bottom_H, const int bottom_W,
const Dtype img_H, const Dtype img_W,
const Dtype min_box_H, const Dtype min_box_W,
const int feat_stride) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int h = index / num_anchors / bottom_W;
const int w = (index / num_anchors) % bottom_W;
const int k = index % num_anchors;
const Dtype x = w * feat_stride;
const Dtype y = h * feat_stride;
const Dtype* p_box = d_anchor4d + h * bottom_W + w;
const Dtype* p_score = bottom4d + h * bottom_W + w;
const int bottom_area = bottom_H * bottom_W;
const Dtype dx = p_box[(k * 4 + 0) * bottom_area];
const Dtype dy = p_box[(k * 4 + 1) * bottom_area];
const Dtype d_log_w = p_box[(k * 4 + 2) * bottom_area];
const Dtype d_log_h = p_box[(k * 4 + 3) * bottom_area];
Dtype* const p_proposal = proposals + index * 5;
p_proposal[0] = x + anchors[k * 4 + 0];
p_proposal[1] = y + anchors[k * 4 + 1];
p_proposal[2] = x + anchors[k * 4 + 2];
p_proposal[3] = y + anchors[k * 4 + 3];
p_proposal[4]
= transform_box(p_proposal,
dx, dy, d_log_w, d_log_h,
img_W, img_H, min_box_W, min_box_H)
* p_score[k * bottom_area];
}
}
template <typename Dtype>
__global__ static void retrieve_rois_gpu(const int nthreads,
const int item_index,
const Dtype proposals[],
const int roi_indices[],
Dtype rois[],
Dtype roi_scores[]) {
CUDA_KERNEL_LOOP(index, nthreads) {
const Dtype* const proposals_index = proposals + roi_indices[index] * 5;
rois[index * 5 + 0] = item_index;
rois[index * 5 + 1] = proposals_index[0];
rois[index * 5 + 2] = proposals_index[1];
rois[index * 5 + 3] = proposals_index[2];
rois[index * 5 + 4] = proposals_index[3];
if (roi_scores) {
roi_scores[index] = proposals_index[4];
}
}
}
template <typename Dtype>
__device__ static Dtype iou(const Dtype A[], const Dtype B[]) {
// overlapped region (= box)
const Dtype x1 = max(A[0], B[0]);
const Dtype y1 = max(A[1], B[1]);
const Dtype x2 = min(A[2], B[2]);
const Dtype y2 = min(A[3], B[3]);
// intersection area
const Dtype width = max((Dtype)0, x2 - x1 + (Dtype)1);
const Dtype height = max((Dtype)0, y2 - y1 + (Dtype)1);
const Dtype area = width * height;
// area of A, B
const Dtype A_area = (A[2] - A[0] + (Dtype)1) * (A[3] - A[1] + (Dtype)1);
const Dtype B_area = (B[2] - B[0] + (Dtype)1) * (B[3] - B[1] + (Dtype)1);
// IoU
return area / (A_area + B_area - area);
}
#define DIV_THEN_CEIL(x, y) (((x) + (y) - 1) / (y))
static const int nms_block_size = 64;
template <typename Dtype>
__global__ static void nms_mask(const Dtype boxes[],
unsigned long long mask[],
const int num_boxes,
const Dtype nms_thresh) {
// block region
// j = j_start + { 0, ..., dj_end - 1 }
// i = i_start + { 0, ..., di_end - 1 }
const int i_start = blockIdx.x * nms_block_size;
const int di_end = min(num_boxes - i_start, nms_block_size);
const int j_start = blockIdx.y * nms_block_size;
const int dj_end = min(num_boxes - j_start, nms_block_size);
// copy all i-th boxes to GPU cache
// i = i_start + { 0, ..., di_end - 1 }
__shared__ Dtype boxes_i[nms_block_size * 4];
{
const int di = threadIdx.x;
if (di < di_end) {
boxes_i[di * 4 + 0] = boxes[(i_start + di) * 5 + 0];
boxes_i[di * 4 + 1] = boxes[(i_start + di) * 5 + 1];
boxes_i[di * 4 + 2] = boxes[(i_start + di) * 5 + 2];
boxes_i[di * 4 + 3] = boxes[(i_start + di) * 5 + 3];
}
}
__syncthreads();
// given j = j_start + dj,
// check whether box i is significantly overlapped with box j
// (i.e., IoU(box j, box i) > threshold)
// for all i = i_start + { 0, ..., di_end - 1 } except for i == j
{
const int dj = threadIdx.x;
if (dj < dj_end) {
// box j
const Dtype* const box_j = boxes + (j_start + dj) * 5;
// mask for significant overlap
// if IoU(box j, box i) > threshold, di-th bit = 1
unsigned long long mask_j = 0;
// check for all i = i_start + { 0, ..., di_end - 1 }
// except for i == j
const int di_start = (i_start == j_start) ? (dj + 1) : 0;
for (int di = di_start; di < di_end; ++di) {
// box i
const Dtype* const box_i = boxes_i + di * 4;
// if IoU(box j, box i) > threshold, di-th bit = 1
if (iou(box_j, box_i) > nms_thresh) {
mask_j |= 1ULL << di;
}
}
// mask: "num_boxes x num_blocks" array
// for mask[j][bi], "di-th bit = 1" means:
// box j is significantly overlapped with box i = i_start + di,
// where i_start = bi * block_size
{
const int num_blocks = DIV_THEN_CEIL(num_boxes, nms_block_size);
const int bi = blockIdx.x;
mask[(j_start + dj) * num_blocks + bi] = mask_j;
}
} // endif dj < dj_end
}
}
template <typename Dtype>
void nms_gpu(const int num_boxes,
const Dtype boxes_gpu[],
Tensor* p_mask,
int index_out_cpu[],
int* const num_out,
const int base_index,
const Dtype nms_thresh,
const int max_num_out) {
const int num_blocks = DIV_THEN_CEIL(num_boxes, nms_block_size);
{
const dim3 blocks(num_blocks, num_blocks);
vector<TIndex> mask_shape(2);
mask_shape[0] = num_boxes;
mask_shape[1] = num_blocks * sizeof(unsigned long long) / sizeof(int);
p_mask->Reshape(mask_shape);
// find all significantly-overlapped pairs of boxes
nms_mask << <blocks, nms_block_size >> >(
boxes_gpu, (unsigned long long*)p_mask->template mutable_data<int, CUDAContext>(),
num_boxes, nms_thresh);
CUDA_POST_KERNEL_CHECK;
}
// discard i-th box if it is significantly overlapped with
// one or more previous (= scored higher) boxes
{
const unsigned long long* p_mask_cpu
= (unsigned long long*)p_mask->mutable_data<int, CPUContext>();
int num_selected = 0;
vector<unsigned long long> dead_bit(num_blocks);
for (int i = 0; i < num_blocks; ++i) {
dead_bit[i] = 0;
}
for (int i = 0; i < num_boxes; ++i) {
const int nblock = i / nms_block_size;
const int inblock = i % nms_block_size;
if (!(dead_bit[nblock] & (1ULL << inblock))) {
index_out_cpu[num_selected++] = base_index + i;
const unsigned long long* const mask_i = p_mask_cpu + i * num_blocks;
for (int j = nblock; j < num_blocks; ++j) {
dead_bit[j] |= mask_i[j];
}
if (num_selected == max_num_out) {
break;
}
}
}
*num_out = num_selected;
}
}
template
void nms_gpu(const int num_boxes,
const float boxes_gpu[],
Tensor* p_mask,
int index_out_cpu[],
int* const num_out,
const int base_index,
const float nms_thresh,
const int max_num_out);
template
void nms_gpu(const int num_boxes,
const double boxes_gpu[],
Tensor* p_mask,
int index_out_cpu[],
int* const num_out,
const int base_index,
const double nms_thresh,
const int max_num_out);
template <class Context> template <typename T>
void ProposalOp<Context>::RunWithType() {
auto* p_bottom_item = this->input(0).template data<T, CUDAContext>();
auto* p_d_anchor_item = this->input(1).template data<T, CUDAContext>();
auto* p_img_info_cpu = this->input(2).template data<T, CPUContext>();
auto* p_roi_item = this->output(0)->template mutable_data<T, CUDAContext>();
auto* p_score_item = (this->OutputSize() > 1) ? this->output(1)->template mutable_data<T, CUDAContext>() : NULL;
vector<TIndex> proposals_shape(2), top_shape(2);
proposals_shape[0] = 0; proposals_shape[1] = 5;
top_shape[0] = 0; top_shape[1] = 5;
for (int n = 0; n < this->input(0).dim(0); ++n) {
// bottom shape: (2 x num_anchors) x H x W
const int bottom_H = this->input(0).dim(2);
const int bottom_W = this->input(0).dim(3);
// input image height & width
const T img_H = p_img_info_cpu[0];
const T img_W = p_img_info_cpu[1];
// scale factor for height & width
const T scale_H = p_img_info_cpu[2];
const T scale_W = p_img_info_cpu[3];
// minimum box width & height
const T min_box_H = min_size_ * scale_H;
const T min_box_W = min_size_ * scale_W;
// number of all proposals = num_anchors * H * W
const int num_proposals = anchors_.dim(0) * bottom_H * bottom_W;
// number of top-n proposals before NMS
const int pre_nms_topn = std::min(num_proposals, pre_nms_topn_);
// number of final RoIs
int num_rois = 0;
// enumerate all proposals
// num_proposals = num_anchors * H * W
// (x1, y1, x2, y2, score) for each proposal
// NOTE: for bottom, only foreground scores are passed
proposals_shape[0] = num_proposals;
proposals_.Reshape(proposals_shape);
enumerate_proposals_gpu<T> << <GET_BLOCKS(num_proposals), CUDA_NUM_THREADS >> >(num_proposals,
p_bottom_item + num_proposals,
p_d_anchor_item,
anchors_.template data<T, CUDAContext>(),
proposals_.template mutable_data<T, CUDAContext>(),
anchors_.dim(0),
bottom_H, bottom_W,
img_H, img_W,
min_box_H, min_box_W,
feat_stride_);
CUDA_POST_KERNEL_CHECK;
sort_box<T>(proposals_.template mutable_data<T, CPUContext>(), 0, num_proposals - 1, pre_nms_topn_);
nms_gpu<T>(pre_nms_topn, proposals_.template data<T, CUDAContext>(),
&nms_mask_,
roi_indices_.template mutable_data<int, CPUContext>(),
&num_rois,
0,
nms_thresh_,
post_nms_topn_);
retrieve_rois_gpu<T> << <GET_BLOCKS(num_rois), CUDA_NUM_THREADS >> >(num_rois,
n,
proposals_.template data<T, CUDAContext>(),
roi_indices_.template data<int, CUDAContext>(),
p_roi_item,
p_score_item);
CUDA_POST_KERNEL_CHECK;
top_shape[0] += num_rois;
}
this->output(0)->Reshape(top_shape);
if (this->OutputSize() > 1) {
top_shape.pop_back();
this->output(1)->Reshape(top_shape);
}
}
template void ProposalOp<CUDAContext>::RunWithType<float>();
}
\ No newline at end of file
...@@ -244,23 +244,46 @@ void CuDNNBatchNormGradientOp<Context>::TrainingRunWithType() { ...@@ -244,23 +244,46 @@ void CuDNNBatchNormGradientOp<Context>::TrainingRunWithType() {
template <class Context> template <typename T> template <class Context> template <typename T>
void CuDNNBatchNormGradientOp<Context>::InferenceRunWithType() { void CuDNNBatchNormGradientOp<Context>::InferenceRunWithType() {
INIT_MULTIPLIER(multiplier, NS);
INIT_MULTIPLIER(num_multiplier, N);
INIT_MULTIPLIER(spatial_multiplier, S);
auto* dYdata = input(-1).template data<T, Context>();
auto* Sdata = input(3).template data<T, Context>();
auto* hVar_data = input(2).template data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>();
auto* NSMul_data = multiplier->template data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* NMul_data = num_multiplier->template data<T, Context>();
auto* NC_data = num_by_chans.template mutable_data<T, Context>();
// gradient w.r.t. scale
if (output(1)->name() != "ignore")
LOG(FATAL) << "The gamma should be fixed if using global stats.";
// gradient w.r.t. bias
if (output(2)->name() != "ignore") {
auto* dBdata = output(2)->template mutable_data<T, Context>();
if (data_format == "NCHW") {
math::Gemv<T, Context>(CblasNoTrans, NC, S,
1.0, dYdata, SMul_data,
0.0, NC_data);
math::Gemv<T, Context>(CblasTrans, N, C,
1.0, NC_data, NMul_data,
1.0, dBdata);
} else if (data_format == "NHWC") {
math::Gemv<T, Context>(CblasTrans, NS, C,
1.0, dYdata, NSMul_data,
1.0, dBdata);
}
}
// gradient w.r.t. x
if (output(0)->name() != "ignore") { if (output(0)->name() != "ignore") {
INIT_MULTIPLIER(multiplier, NS);
INIT_MULTIPLIER(num_multiplier, N);
INIT_MULTIPLIER(spatial_multiplier, S);
stddev = ws()->GetBuffer(); stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0)); stddev->ReshapeLike(input(0));
auto* dYdata = input(-1).template data<T, Context>();
auto* dXdata = output(0)->template mutable_data<T, Context>(); auto* dXdata = output(0)->template mutable_data<T, Context>();
auto* Std_data = stddev->template mutable_data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>();
auto* Sdata = input(3).template data<T, Context>();
auto* hVar_data = input(2).template data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>();
auto* NSMul_data = multiplier->template data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* NMul_data = num_multiplier->template data<T, Context>();
auto* NC_data = num_by_chans.template mutable_data<T, Context>();
// compute stddev // compute stddev
ctx().template Copy<T, Context, Context>(var->count(), tVar_data, hVar_data); ctx().template Copy<T, Context, Context>(var->count(), tVar_data, hVar_data);
......
...@@ -429,21 +429,43 @@ void FusedBatchNormGradientOp<Context>::TrainingRunWithType() { ...@@ -429,21 +429,43 @@ void FusedBatchNormGradientOp<Context>::TrainingRunWithType() {
template <class Context> template <typename T> template <class Context> template <typename T>
void FusedBatchNormGradientOp<Context>::InferenceRunWithType() { void FusedBatchNormGradientOp<Context>::InferenceRunWithType() {
INIT_MULTIPLIER(multiplier, NS);
INIT_MULTIPLIER(num_multiplier, N);
INIT_MULTIPLIER(spatial_multiplier, S);
auto* dYdata = input(-1).template data<T, Context>();
auto* Sdata = input(3).template data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>();
auto* NSMul_data = multiplier->template data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* NMul_data = num_multiplier->template data<T, Context>();
auto* NC_data = num_by_chans.template mutable_data<T, Context>();
// gradient w.r.t. scale
if (output(1)->name() != "ignore")
LOG(FATAL) << "The gamma should be fixed if using global stats.";
// gradient w.r.t. bias
if (output(2)->name() != "ignore") {
auto* dBdata = output(2)->template mutable_data<T, Context>();
if (data_format == "NCHW") {
math::Gemv<T, Context>(CblasNoTrans, NC, S,
1.0, dYdata, SMul_data,
0.0, NC_data);
math::Gemv<T, Context>(CblasTrans, N, C,
1.0, NC_data, NMul_data,
1.0, dBdata);
} else if (data_format == "NHWC") {
math::Gemv<T, Context>(CblasTrans, NS, C,
1.0, dYdata, NSMul_data,
1.0, dBdata);
}
}
// gradient w.r.t. x
if (output(0)->name() != "ignore") { if (output(0)->name() != "ignore") {
INIT_MULTIPLIER(multiplier, NS);
INIT_MULTIPLIER(num_multiplier, N);
INIT_MULTIPLIER(spatial_multiplier, S);
auto* dYdata = input(-1).template data<T, Context>();
auto* dXdata = output(0)->template mutable_data<T, Context>(); auto* dXdata = output(0)->template mutable_data<T, Context>();
auto* Std_data = stddev->template mutable_data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>();
auto* Sdata = input(3).template data<T, Context>();
auto* hVar_data = input(2).template data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>();
auto* NSMul_data = multiplier->template data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* NMul_data = num_multiplier->template data<T, Context>();
auto* NC_data = num_by_chans.template mutable_data<T, Context>();
// divide scale by stddev // divide scale by stddev
math::Div<T, Context>(var->count(), Sdata, tVar_data, tVar_data); math::Div<T, Context>(var->count(), Sdata, tVar_data, tVar_data);
...@@ -492,7 +514,9 @@ void FusedBatchNormGradientOp<Context>::Setup() { ...@@ -492,7 +514,9 @@ void FusedBatchNormGradientOp<Context>::Setup() {
// reshape // reshape
num_by_chans.Reshape(vector<TIndex>(1, NC)); num_by_chans.Reshape(vector<TIndex>(1, NC));
output(0)->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0)); // dX
output(1)->ReshapeLike(input(3)); // dScale
output(2)->ReshapeLike(input(3)); // dBias
} }
template <class Context> template <class Context>
......
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!