Skip to content
Toggle navigation
P
Projects
G
Groups
S
Snippets
Help
SeetaResearch
/
Dragon
This project
Loading...
Sign in
Toggle navigation
Go to a project
Project
Repository
Issues
0
Merge Requests
0
Pipelines
Wiki
Snippets
Settings
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Commit 6683676d
authored
Mar 24, 2018
by
Ting PAN
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
GroupNormalization Support
1 parent
18b664b1
Show whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
1592 additions
and
201 deletions
Dragon/include/operators/ndarray/reshape_op.h
Dragon/include/operators/norm/batch_norm_op.h
Dragon/include/operators/norm/group_norm_op.h
Dragon/python/dragon/docs/contents/operators/norm.rst
Dragon/python/dragon/docs/contents/ops.rst
Dragon/python/dragon/docs/contents/vm/caffe/layer.rst
Dragon/python/dragon/operators/ndarray.py
Dragon/python/dragon/operators/norm.py
Dragon/python/dragon/ops.py
Dragon/python/dragon/vm/caffe/layers/__init__.py
Dragon/python/dragon/vm/caffe/layers/common.py
Dragon/python/dragon/vm/caffe/proto/caffe.proto
Dragon/python/dragon/vm/caffe/proto/caffe_pb2.py
Dragon/python/setup.py
Dragon/src/operators/ndarray/reshape_op.cc
Dragon/src/operators/norm/batch_norm_op.cc
Dragon/src/operators/norm/fused_batch_norm.cc
Dragon/src/operators/norm/fused_group_norm.cc
Dragon/src/operators/norm/group_norm_op.cc
Dragon/include/operators/ndarray/reshape_op.h
View file @
6683676
...
...
@@ -16,15 +16,16 @@ class ReshapeOp final : public Operator<Context> {
public
:
ReshapeOp
(
const
OperatorDef
&
op_def
,
Workspace
*
ws
)
:
Operator
<
Context
>
(
op_def
,
ws
),
shape
(
OperatorBase
::
GetRepeatedArg
<
int
>
(
"shape
"
))
{
new_shape
.
resize
(
shape
.
size
()
);
shape
_like_desc
(
OperatorBase
::
GetSingleArg
<
string
>
(
"shape_like"
,
"
"
))
{
GET_ARGUMENTS_WITH_DESC
(
int
,
shape
);
}
void
RunOnDevice
()
override
;
protected
:
vector
<
int
>
shape
;
vector
<
TIndex
>
new_shape
;
DECLARE_ARGUMENTS_WITH_DESC
(
int
,
shape
);
string
shape_like_desc
;
vector
<
TIndex
>
require_shape
,
new_shape
;
};
template
<
class
Context
>
...
...
@@ -38,6 +39,8 @@ class ReshapeGradientOp final : public Operator<Context> {
void
RunOnDevice
()
override
;
};
DEFINE_ARGUMENTS_WITH_DESC
(
int
,
ReshapeOp
,
shape
);
}
// namespace dragon
#endif // DRAGON_OPERATORS_NDARRAY_RESHAPE_OP_H_
\ No newline at end of file
Dragon/include/operators/norm/batch_norm_op.h
View file @
6683676
...
...
@@ -105,7 +105,7 @@ class FusedBatchNormGradientOp : public Operator<Context> {
:
Operator
<
Context
>
(
op_def
,
ws
),
axis
(
OperatorBase
::
GetSingleArg
<
int
>
(
"axis"
,
-
1
)),
eps
(
OperatorBase
::
GetSingleArg
<
float
>
(
"eps"
,
float
(
1e-3
))),
use_stats
(
OperatorBase
::
GetSingleArg
<
int
>
(
"use_stats"
,
-
1
))
{
}
use_stats
(
OperatorBase
::
GetSingleArg
<
int
>
(
"use_stats"
,
-
1
))
{}
void
Setup
();
...
...
Dragon/include/operators/norm/group_norm_op.h
0 → 100644
View file @
6683676
// --------------------------------------------------------
// Dragon
// Copyright(c) 2017 SeetaTech
// Written by Ting Pan
// --------------------------------------------------------
#ifndef DRAGON_OPERATORS_NORM_GROUP_NORM_OP_H_
#define DRAGON_OPERATORS_NORM_GROUP_NORM_OP_H_
#include "core/operator.h"
namespace
dragon
{
template
<
class
Context
>
class
GroupNormOp
:
public
Operator
<
Context
>
{
public
:
GroupNormOp
(
const
OperatorDef
&
op_def
,
Workspace
*
ws
)
:
Operator
<
Context
>
(
op_def
,
ws
),
group
(
OperatorBase
::
GetSingleArg
<
int
>
(
"group"
,
32
)),
axis
(
OperatorBase
::
GetSingleArg
<
int
>
(
"axis"
,
-
1
)),
momentum
(
OperatorBase
::
GetSingleArg
<
float
>
(
"momentum"
,
float
(
0
.
9
))),
eps
(
OperatorBase
::
GetSingleArg
<
float
>
(
"eps"
,
float
(
1e-3
))),
use_stats
(
OperatorBase
::
GetSingleArg
<
int
>
(
"use_stats"
,
-
1
)),
mode
(
OperatorBase
::
GetSingleArg
<
string
>
(
"mode"
,
"DEFAULT"
))
{
if
(
axis
!=
-
1
)
CHECK_EQ
(
axis
,
1
)
<<
"
\n
The axis can only be set to 1."
;
}
void
Setup
();
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
TrainingRunWithType
();
template
<
typename
T
>
void
InferenceRunWithType
();
protected
:
float
momentum
,
eps
;
Tensor
mean
,
num_by_chans
;
Tensor
*
multiplier
,
*
num_multiplier
,
*
spatial_multiplier
,
*
cgs_multiplier
;
Tensor
*
stddev
,
*
var
;
TIndex
group
,
axis
,
N
,
C
,
S
,
NG
,
NC
,
NS
,
CGS
;
string
data_format
,
mode
;
int
use_stats
;
bool
use_global_stats
,
is_recomputing
;
};
template
<
class
Context
>
class
GroupNormGradientOp
final
:
public
Operator
<
Context
>
{
public
:
GroupNormGradientOp
(
const
OperatorDef
&
op_def
,
Workspace
*
ws
)
:
Operator
<
Context
>
(
op_def
,
ws
),
group
(
OperatorBase
::
GetSingleArg
<
int
>
(
"group"
,
32
)),
axis
(
OperatorBase
::
GetSingleArg
<
int
>
(
"axis"
,
-
1
)),
use_stats
(
OperatorBase
::
GetSingleArg
<
int
>
(
"use_stats"
,
-
1
))
{
if
(
axis
!=
-
1
)
CHECK_EQ
(
axis
,
1
)
<<
"
\n
The axis can only be set to 1."
;
}
void
Setup
();
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
TrainingRunWithType
();
template
<
typename
T
>
void
InferenceRunWithType
();
protected
:
Tensor
num_by_chans
;
Tensor
*
multiplier
,
*
num_multiplier
,
*
spatial_multiplier
,
*
cgs_multiplier
;
Tensor
*
stddev
,
*
var
;
TIndex
group
,
axis
,
N
,
C
,
S
,
NG
,
NC
,
NS
,
CGS
;
string
data_format
;
int
use_stats
;
bool
use_global_stats
;
};
template
<
class
Context
>
class
FusedGroupNormOp
:
public
Operator
<
Context
>
{
public
:
FusedGroupNormOp
(
const
OperatorDef
&
op_def
,
Workspace
*
ws
)
:
Operator
<
Context
>
(
op_def
,
ws
),
group
(
OperatorBase
::
GetSingleArg
<
int
>
(
"group"
,
32
)),
axis
(
OperatorBase
::
GetSingleArg
<
int
>
(
"axis"
,
-
1
)),
momentum
(
OperatorBase
::
GetSingleArg
<
float
>
(
"momentum"
,
float
(
0
.
9
))),
eps
(
OperatorBase
::
GetSingleArg
<
float
>
(
"eps"
,
float
(
1e-3
))),
use_stats
(
OperatorBase
::
GetSingleArg
<
int
>
(
"use_stats"
,
-
1
))
{}
void
Setup
();
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
TrainingRunWithType
();
template
<
typename
T
>
void
InferenceRunWithType
();
protected
:
float
momentum
,
eps
;
Tensor
num_by_chans
;
Tensor
*
multiplier
,
*
num_multiplier
,
*
spatial_multiplier
,
*
cgs_multiplier
;
Tensor
*
mean
,
*
var
,
*
stddev
,
*
x_norm
;
TIndex
group
,
axis
,
N
,
C
,
S
,
NG
,
NC
,
NS
,
CGS
;
string
data_format
;
int
use_stats
;
bool
use_global_stats
,
is_recomputing
;
};
template
<
class
Context
>
class
FusedGroupNormGradientOp
:
public
Operator
<
Context
>
{
public
:
FusedGroupNormGradientOp
(
const
OperatorDef
&
op_def
,
Workspace
*
ws
)
:
Operator
<
Context
>
(
op_def
,
ws
),
group
(
OperatorBase
::
GetSingleArg
<
int
>
(
"group"
,
32
)),
axis
(
OperatorBase
::
GetSingleArg
<
int
>
(
"axis"
,
-
1
)),
eps
(
OperatorBase
::
GetSingleArg
<
float
>
(
"eps"
,
float
(
1e-3
))),
use_stats
(
OperatorBase
::
GetSingleArg
<
int
>
(
"use_stats"
,
-
1
))
{}
void
Setup
();
void
ShareGradient
()
override
;
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
TrainingRunWithType
();
template
<
typename
T
>
void
InferenceRunWithType
();
protected
:
float
eps
;
Tensor
num_by_chans
;
Tensor
*
multiplier
,
*
num_multiplier
,
*
spatial_multiplier
,
*
cgs_multiplier
;
Tensor
*
mean
,
*
var
,
*
stddev
,
*
x_norm
;
TIndex
group
,
axis
,
N
,
C
,
S
,
NG
,
NC
,
NS
,
CGS
;
string
data_format
;
int
use_stats
;
bool
use_global_stats
;
};
}
// namespace dragon
#endif // DRAGON_OPERATORS_NORM_GROUP_NORM_OP_H_
\ No newline at end of file
Dragon/python/dragon/docs/contents/operators/norm.rst
View file @
6683676
...
...
@@ -16,6 +16,14 @@
\sigma_{B}^{2} = \frac{1}{m} \sum_{i=1}^{m}(x_{i} - \mu_{B})^{2} \\
\hat{x}_{i} = \frac{x_{i} - \mu_{B}}{\sqrt{\sigma_{B}^{2} + \epsilon}} \\ y_{i} = \gamma\hat{x}_{i} + \beta \\ \,
.. |groupnorm_function| mathmacro:: \\ \, \\ \mu_{G} = \frac{1}{m} \sum_{i=1}^{m}x_{i} \\
\sigma_{G}^{2} = \frac{1}{m} \sum_{i=1}^{m}(x_{i} - \mu_{G})^{2} \\
\hat{x}_{i} = \frac{x_{i} - \mu_{G}}{\sqrt{\sigma_{G}^{2} + \epsilon}} \\ \,
.. |groupnorm_scale_function| mathmacro:: \\ \, \\ \mu_{G} = \frac{1}{m} \sum_{i=1}^{m}x_{i} \\
\sigma_{G}^{2} = \frac{1}{m} \sum_{i=1}^{m}(x_{i} - \mu_{G})^{2} \\
\hat{x}_{i} = \frac{x_{i} - \mu_{G}}{\sqrt{\sigma_{G}^{2} + \epsilon}} \\ y_{i} = \gamma\hat{x}_{i} + \beta \\ \,
.. |batchrenorm_function| mathmacro:: \\ \, \\ \mu_{B} = \frac{1}{m} \sum_{i=1}^{m}x_{i} \\
\sigma_{B}^{2} = \frac{1}{m} \sum_{i=1}^{m}(x_{i} - \mu_{B})^{2} \\
\hat{x}_{i} = \frac{x_{i} - \mu_{B}}{\sqrt{\sigma_{B}^{2} + \epsilon}} \cdot r + d \\ \,
...
...
Dragon/python/dragon/docs/contents/ops.rst
View file @
6683676
...
...
@@ -113,6 +113,8 @@ List Brief
`BatchNorm`_ Batch Normalization, introduced by `[Ioffe & Szegedy, 2015] <https://arxiv.org/abs/1502.03167>`_.
`BatchRenorm`_ Batch Renormalization, introduced by `[Ioffe, 2017] <https://arxiv.org/abs/1702.03275>`_.
`FusedBatchNorm`_ Batch Normalization, with scale procedure after normalization.
`GroupNorm`_ Group Normalization, introduced by `[Wu & He, 2018] <https://arxiv.org/abs/1803.08494>`_.
`FusedGroupNorm`_ Group Normalization, with scale procedure after normalization.
`InstanceNorm`_ Instance Normalization, introduced by `[Ulyanov et.al, 2016] <https://arxiv.org/abs/1607.08022>`_.
`L2Norm`_ L2 Normalization, introduced by `[Liu et.al, 2015] <https://arxiv.org/abs/1506.04579>`_.
================== ======================================================================
...
...
@@ -253,6 +255,8 @@ List Brief
.. _BatchNorm: operators/norm.html#dragon.operators.norm.BatchNorm
.. _BatchRenorm: operators/norm.html#dragon.operators.norm.BatchRenorm
.. _FusedBatchNorm: operators/norm.html#dragon.operators.norm.FusedBatchNorm
.. _GroupNorm: operators/norm.html#dragon.operators.norm.GroupNorm
.. _FusedGroupNorm: operators/norm.html#dragon.operators.norm.FusedGroupNorm
.. _InstanceNorm: operators/norm.html#dragon.operators.norm.InstanceNorm
.. _L2Norm: operators/norm.html#dragon.operators.norm.L2Norm
...
...
Dragon/python/dragon/docs/contents/vm/caffe/layer.rst
View file @
6683676
...
...
@@ -73,9 +73,11 @@ List Brief
`ArgMaxLayer`_ The implementation of ``ArgMaxLayer``.
`BatchNormLayer`_ The implementation of ``BatchNormLayer``.
`BatchRenormLayer`_ The implementation of ``BatchRenormLayer``.
`GroupNormLayer`_ The implementation of ``GroupNormLayer``.
`InstanceNormLayer`_ The implementation of ``InstanceNormLayer``.
`ScaleLayer`_ The implementation of ``ScaleLayer``.
`BNLayer`_ The implementation of ``BNLayer``.
`GNLayer`_ The implementation of ``GNLayer``.
`NormalizeLayer`_ The implementation of ``NormalizeLayer``.
`TileLayer`_ The extended implementation of ``TileLayer``.
`ExpandDimsLayer`_ The implementation of ``ExpandDimsLayer``.
...
...
@@ -181,9 +183,11 @@ API Reference
.. _ArgMaxLayer: #dragon.vm.caffe.layers.common.ArgMaxLayer
.. _BatchNormLayer: #dragon.vm.caffe.layers.common.BatchNormLayer
.. _BatchRenormLayer: #dragon.vm.caffe.layers.common.BatchRenormLayer
.. _GroupNormLayer: #dragon.vm.caffe.layers.common.GroupNormLayer
.. _InstanceNormLayer: #dragon.vm.caffe.layers.common.InstanceNormLayer
.. _ScaleLayer: #dragon.vm.caffe.layers.common.ScaleLayer
.. _BNLayer: #dragon.vm.caffe.layers.common.BNLayer
.. _GNLayer: #dragon.vm.caffe.layers.common.GNLayer
.. _NormalizeLayer: #dragon.vm.caffe.layers.common.NormalizeLayer
.. _TileLayer: #dragon.vm.caffe.layers.common.TileLayer
.. _ExpandDimsLayer: #dragon.vm.caffe.layers.common.ExpandDimsLayer
...
...
Dragon/python/dragon/operators/ndarray.py
View file @
6683676
...
...
@@ -648,15 +648,21 @@ def Flatten(inputs, axis=0, num_axes=-1, keep_axes=None, **kwargs):
return
output
def
Reshape
(
inputs
,
shape
,
**
kwargs
):
def
Reshape
(
inputs
,
shape
,
shape_like
=
None
,
**
kwargs
):
"""Reshape the dimensions of input.
``shape`` could be a list of numbers or Tensors.
Set ``shape`` to ``None``, if you want to use ``shape_like``.
Parameters
----------
inputs : Tensor
The input tensor.
shape : list
or tupl
e
shape : list
, tuple or Non
e
The new shape.
shape_like: Tensor or None
The tensor for indicating the output shape.
Returns
-------
...
...
@@ -677,12 +683,24 @@ def Reshape(inputs, shape, **kwargs):
CheckInputs
(
inputs
,
1
)
arguments
=
ParseArguments
(
locals
())
if
not
isinstance
(
shape
,
tuple
)
and
not
isinstance
(
shape
,
list
):
raise
TypeError
(
'The type of dims must be a tuple or list.'
)
if
shape
is
not
None
:
AddArgumentsWithDesc
(
arguments
,
shape
,
'shape'
,
'int32'
,
as_target
=
True
)
elif
shape_like
is
not
None
:
if
not
isinstance
(
shape_like
,
Tensor
):
raise
TypeError
(
'The shape_like should be a Tensor.'
)
arguments
[
'shape_like'
]
=
shape_like
.
name
output
=
Tensor
.
CreateOperator
(
nout
=
1
,
op_type
=
'Reshape'
,
**
arguments
)
if
inputs
.
shape
is
not
None
:
possible_to_infer_shape
=
True
if
shape
is
not
None
:
for
dim
in
shape
:
if
isinstance
(
dim
,
Tensor
):
possible_to_infer_shape
=
False
if
shape_like
is
not
None
:
possible_to_infer_shape
=
False
if
possible_to_infer_shape
:
output
.
shape
=
[
1
]
*
len
(
shape
)
for
i
,
s
in
enumerate
(
shape
):
if
s
==
-
1
:
output
.
shape
[
i
]
=
1
...
...
Dragon/python/dragon/operators/norm.py
View file @
6683676
...
...
@@ -165,6 +165,103 @@ def FusedBatchNorm(inputs, axis=-1, momentum=0.9, eps=1e-3, use_stats=-1, **kwar
return
output
def
GroupNorm
(
inputs
,
group
=
32
,
axis
=-
1
,
momentum
=
0.9
,
eps
=
1e-3
,
use_stats
=-
1
,
mode
=
'DEFAULT'
,
**
kwargs
):
"""Group Normalization, introduced by `[Wu & He, 2018] <https://arxiv.org/abs/1803.08494>`_.
It follows the implementation of `Caffe`_, that scale procedure is moved to `ops.Scale(*args, **kwargs)`_.
The number of inputs vary from ``3`` to ``4`` (``DEFAULT`` or ``CAFFE`` mode).
Parameters
----------
inputs : list of Tensor
The inputs, represent [input, mean, var] or [input, mean, var, factor].
group : int
The group size.
axis : int
The channel axis.
momentum : float
The momentum of moving average.
eps : float
The eps.
use_stats : int
Whether to use global stats. Default is ``-1`` (Auto).
mode : str
The moving average mode. ``DEFAULT`` or ``CAFFE``.
Returns
-------
Tensor
The output tensor, calculated as:
|groupnorm_function|
The ``DEFAULT`` moving average of mean/var, calculated as:
|default_moving_average_function|
The ``CAFFE`` moving average of mean/var, calculated as:
|caffe_moving_average_function|
"""
CheckInputs
(
inputs
,
3
,
4
)
arguments
=
ParseArguments
(
locals
())
if
len
(
inputs
)
>
3
:
if
mode
!=
'CAFFE'
:
raise
ValueError
(
'Only the CAFFE mode will take 4 inputs.'
)
output
=
Tensor
.
CreateOperator
(
nout
=
1
,
op_type
=
'GroupNorm'
,
**
arguments
)
if
inputs
[
0
]
.
shape
is
not
None
:
output
.
shape
=
inputs
[
0
]
.
shape
[:]
return
output
def
FusedGroupNorm
(
inputs
,
group
=
32
,
axis
=-
1
,
momentum
=
0.9
,
eps
=
1e-3
,
use_stats
=-
1
,
**
kwargs
):
"""Group Normalization, with scale procedure after normalization.
Parameters
----------
inputs : list of Tensor
The inputs, represent [input, mean, var, scale, bias].
group : int
The group size.
axis : int
The channel axis.
momentum : float
The momentum of moving average.
eps : float
The eps.
use_stats : int
Whether to use global stats. Default is ``-1`` (Auto).
Returns
-------
Tensor
The output tensor, calculated as:
|groupnorm_scale_function|
The moving average of mean/var, calculated as:
|default_moving_average_function|
"""
CheckInputs
(
inputs
,
5
)
arguments
=
ParseArguments
(
locals
())
output
=
Tensor
.
CreateOperator
(
nout
=
1
,
op_type
=
'FusedGroupNorm'
,
**
arguments
)
if
inputs
[
0
]
.
shape
is
not
None
:
output
.
shape
=
inputs
[
0
]
.
shape
[:]
return
output
def
InstanceNorm
(
inputs
,
axis
=-
1
,
eps
=
1e-3
,
**
kwargs
):
"""Instance Normalization, introduced by `[Ulyanov et.al, 2016] <https://arxiv.org/abs/1607.08022>`_
...
...
Dragon/python/dragon/ops.py
View file @
6683676
...
...
@@ -92,7 +92,9 @@ GramMatrix = math.GramMatrix
# normalization
BatchNorm
=
norm
.
BatchNorm
BatchRenorm
=
norm
.
BatchRenorm
GroupNorm
=
norm
.
GroupNorm
FusedBatchNorm
=
norm
.
FusedBatchNorm
FusedGroupNorm
=
norm
.
FusedGroupNorm
InstanceNorm
=
norm
.
InstanceNorm
L2Norm
=
norm
.
L2Norm
...
...
Dragon/python/dragon/vm/caffe/layers/__init__.py
View file @
6683676
...
...
@@ -39,6 +39,8 @@ from .common import InnerProductLayer, \
BatchNormLayer
,
\
BatchRenormLayer
,
\
BNLayer
,
\
GroupNormLayer
,
\
GNLayer
,
\
ConcatLayer
,
\
CropLayer
,
\
PythonLayer
,
\
...
...
Dragon/python/dragon/vm/caffe/layers/common.py
View file @
6683676
...
...
@@ -412,6 +412,47 @@ class BatchRenormLayer(Layer):
return
ops
.
BatchRenorm
(
bottom
+
[
blob
[
'data'
]
for
blob
in
self
.
_blobs
],
**
self
.
_param
)
class
GroupNormLayer
(
Layer
):
"""The implementation of ``GroupNormLayer``.
Parameters
----------
group : int
Refer ``GroupNormParameter.group``.
use_global_stats : boolean
Refer ``GroupNormParameter.use_global_stats``.
moving_average_fraction : float
Refer ``GroupNormParameter.moving_average_fraction``.
eps : float
Refer ``GroupNormParameter.eps``.
"""
def
__init__
(
self
,
LayerParameter
):
super
(
GroupNormLayer
,
self
)
.
__init__
(
LayerParameter
)
param
=
LayerParameter
.
group_norm_param
self
.
_param
=
{
'group'
:
int
(
param
.
group
),
'use_stats'
:
int
(
param
.
use_global_stats
)
if
param
.
HasField
(
'use_global_stats'
)
else
-
1
,
'momentum'
:
param
.
moving_average_fraction
,
'eps'
:
param
.
eps
,
'axis'
:
1
,
'mode'
:
'CAFFE'
}
scope
=
LayerParameter
.
name
# mean, var, factor are set to 0 in order to do statistics
mean
=
Tensor
(
scope
+
'/param:0'
)
.
Constant
(
value
=
0.0
)
var
=
Tensor
(
scope
+
'/param:1'
)
.
Constant
(
value
=
0.0
)
factor
=
Tensor
(
scope
+
'/param:2'
)
.
Constant
(
value
=
0.0
)
# in dragon, set diff as None will ignore computing grad automatically
# but in bvlc-caffe1, you must set lr_mult = 0 manually
self
.
_blobs
.
append
({
'data'
:
mean
,
'diff'
:
None
})
self
.
_blobs
.
append
({
'data'
:
var
,
'diff'
:
None
})
self
.
_blobs
.
append
({
'data'
:
factor
,
'diff'
:
None
})
def
Setup
(
self
,
bottom
):
super
(
GroupNormLayer
,
self
)
.
Setup
(
bottom
)
return
ops
.
GroupNorm
(
bottom
+
[
blob
[
'data'
]
for
blob
in
self
.
_blobs
],
**
self
.
_param
)
class
InstanceNormLayer
(
Layer
):
"""
The implementation of ``InstanceNormLayer``.
...
...
@@ -518,6 +559,59 @@ class BNLayer(Layer):
return
ops
.
FusedBatchNorm
(
bottom
+
[
blob
[
'data'
]
for
blob
in
self
.
_blobs
],
**
self
.
_param
)
class
GNLayer
(
Layer
):
"""The implementation of ``GNLayer``.
Parameters
----------
group : int
Refer ``GroupNormParameter.group``.
use_global_stats : boolean
Refer ``GroupNormParameter.use_global_stats``.
moving_average_fraction : float
Refer ``GroupNormParameter.moving_average_fraction``.
eps : float
Refer ``GroupNormParameter.eps``.
filler : FillerParameter
The filler of scale parameter. Refer `ScaleParameter.filler`_.
bias_filler : FillerParameter
The filler of bias parameter. Refer `ScaleParameter.bias_filler`_.
"""
def
__init__
(
self
,
LayerParameter
):
super
(
GNLayer
,
self
)
.
__init__
(
LayerParameter
)
gn_param
=
LayerParameter
.
group_norm_param
scale_param
=
LayerParameter
.
scale_param
self
.
_param
=
{
'group'
:
int
(
gn_param
.
group
),
'use_stats'
:
int
(
gn_param
.
use_global_stats
)
if
gn_param
.
HasField
(
'use_global_stats'
)
else
-
1
,
'momentum'
:
gn_param
.
moving_average_fraction
,
'eps'
:
gn_param
.
eps
,
'axis'
:
1
}
scope
=
LayerParameter
.
name
mean
=
Tensor
(
scope
+
'/param:0'
)
.
Constant
(
value
=
0.0
)
var
=
Tensor
(
scope
+
'/param:1'
)
.
Constant
(
value
=
0.0
)
scale
=
Tensor
(
scope
+
'/param:2'
)
scale_diff
=
Tensor
(
scope
+
'/param:2_grad'
)
bias
=
Tensor
(
scope
+
'/param:3'
)
bias_diff
=
Tensor
(
scope
+
'/param:3_grad'
)
if
scale_param
.
HasField
(
'filler'
):
self
.
Fill
(
scale
,
scale_param
,
'filler'
)
else
:
scale
.
Constant
(
value
=
1.0
)
self
.
Fill
(
bias
,
scale_param
,
'bias_filler'
)
self
.
norm_blobs
=
[{
'data'
:
mean
,
'diff'
:
None
},
{
'data'
:
var
,
'diff'
:
None
}]
self
.
scale_blobs
=
[{
'data'
:
scale
,
'diff'
:
scale_diff
},
{
'data'
:
bias
,
'diff'
:
bias_diff
}]
self
.
_blobs
.
extend
(
self
.
norm_blobs
)
self
.
_blobs
.
extend
(
self
.
scale_blobs
)
def
Setup
(
self
,
bottom
):
super
(
GNLayer
,
self
)
.
Setup
(
bottom
)
return
ops
.
FusedGroupNorm
(
bottom
+
[
blob
[
'data'
]
for
blob
in
self
.
_blobs
],
**
self
.
_param
)
class
NormalizeLayer
(
Layer
):
"""The implementation of ``NormalizeLayer``.
...
...
Dragon/python/dragon/vm/caffe/proto/caffe.proto
View file @
6683676
...
...
@@ -423,6 +423,7 @@ message LayerParameter {
optional
DenseConcatParameter
dense_concat_param
=
163
;
optional
FocalLossParameter
focal_loss_param
=
164
;
optional
GatherParameter
gather_param
=
165
;
optional
GroupNormParameter
group_norm_param
=
166
;
}
// Message that stores parameters used to apply transformation
...
...
@@ -1512,3 +1513,17 @@ message GatherParameter {
optional
int32
axis
=
1
[
default
=
0
];
}
message
GroupNormParameter
{
// If false, accumulate global mean/variance values via a moving average. If
// true, use those accumulated values instead of computing mean/variance
// across the batch.
optional
bool
use_global_stats
=
1
;
// How much does the moving average decay each iteration?
optional
float
moving_average_fraction
=
2
[
default
=
0.9
];
// Small value to add to the variance estimate so that we don't divide by
// zero.
optional
float
eps
=
3
[
default
=
1e-3
];
optional
uint32
group
=
5
[
default
=
32
];
// The group size
}
Dragon/python/dragon/vm/caffe/proto/caffe_pb2.py
View file @
6683676
...
...
@@ -19,7 +19,7 @@ _sym_db = _symbol_database.Default()
DESCRIPTOR
=
_descriptor
.
FileDescriptor
(
name
=
'caffe.proto'
,
package
=
'caffe'
,
serialized_pb
=
_b
(
'
\n\x0b\x63\x61\x66\x66\x65
.proto
\x12\x05\x63\x61\x66\x66\x65\"\x1c\n\t
BlobShape
\x12\x0f\n\x03\x64
im
\x18\x01
\x03
(
\x03\x42\x02\x10\x01\"\xcc\x01\n\t
BlobProto
\x12\x1f\n\x05
shape
\x18\x07
\x01
(
\x0b\x32\x10
.caffe.BlobShape
\x12\x10\n\x04\x64\x61
ta
\x18\x05
\x03
(
\x02\x42\x02\x10\x01\x12\x10\n\x04\x64
iff
\x18\x06
\x03
(
\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64
ouble_data
\x18\x08
\x03
(
\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64
ouble_diff
\x18\t
\x03
(
\x01\x42\x02\x10\x01\x12\x0e\n\x03
num
\x18\x01
\x01
(
\x05
:
\x01\x30\x12\x13\n\x08\x63
hannels
\x18\x02
\x01
(
\x05
:
\x01\x30\x12\x11\n\x06
height
\x18\x03
\x01
(
\x05
:
\x01\x30\x12\x10\n\x05
width
\x18\x04
\x01
(
\x05
:
\x01\x30\"
2
\n\x0f\x42
lobProtoVector
\x12\x1f\n\x05\x62
lobs
\x18\x01
\x03
(
\x0b\x32\x10
.caffe.BlobProto
\"\x81\x01\n\x05\x44\x61
tum
\x12\x10\n\x08\x63
hannels
\x18\x01
\x01
(
\x05\x12\x0e\n\x06
height
\x18\x02
\x01
(
\x05\x12\r\n\x05
width
\x18\x03
\x01
(
\x05\x12\x0c\n\x04\x64\x61
ta
\x18\x04
\x01
(
\x0c\x12\r\n\x05
label
\x18\x05
\x01
(
\x05\x12\x12\n\n
float_data
\x18\x06
\x03
(
\x02\x12\x16\n\x07\x65
ncoded
\x18\x07
\x01
(
\x08
:
\x05\x66\x61
lse
\"\x8a\x02\n\x0f\x46
illerParameter
\x12\x16\n\x04
type
\x18\x01
\x01
(
\t
:
\x08\x63
onstant
\x12\x10\n\x05
value
\x18\x02
\x01
(
\x02
:
\x01\x30\x12\x0e\n\x03
min
\x18\x03
\x01
(
\x02
:
\x01\x30\x12\x0e\n\x03
max
\x18\x04
\x01
(
\x02
:
\x01\x31\x12\x0f\n\x04
mean
\x18\x05
\x01
(
\x02
:
\x01\x30\x12\x0e\n\x03
std
\x18\x06
\x01
(
\x02
:
\x01\x31\x12\x12\n\x06
sparse
\x18\x07
\x01
(
\x05
:
\x02
-1
\x12\x42\n\r
variance_norm
\x18\x08
\x01
(
\x0e\x32
#.caffe.FillerParameter.VarianceNorm:
\x06\x46\x41
N_IN
\"
4
\n\x0c
VarianceNorm
\x12\n\n\x06\x46\x41
N_IN
\x10\x00\x12\x0b\n\x07\x46\x41
N_OUT
\x10\x01\x12\x0b\n\x07\x41
VERAGE
\x10\x02\"\x8e\x02\n\x0c
NetParameter
\x12\x0c\n\x04
name
\x18\x01
\x01
(
\t\x12\r\n\x05
input
\x18\x03
\x03
(
\t\x12
%
\n\x0b
input_shape
\x18\x08
\x03
(
\x0b\x32\x10
.caffe.BlobShape
\x12\x11\n\t
input_dim
\x18\x04
\x03
(
\x05\x12\x1d\n\x0e\x66
orce_backward
\x18\x05
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x1e\n\x05
state
\x18\x06
\x01
(
\x0b\x32\x0f
.caffe.NetState
\x12\x19\n\n
debug_info
\x18\x07
\x01
(
\x08
:
\x05\x66\x61
lse
\x12
$
\n\x05
layer
\x18\x64
\x03
(
\x0b\x32\x15
.caffe.LayerParameter
\x12\'\n\x06
layers
\x18\x02
\x03
(
\x0b\x32\x17
.caffe.V1LayerParameter
\"\xc9\n\n\x0f
SolverParameter
\x12\x0b\n\x03
net
\x18\x18
\x01
(
\t\x12
&
\n\t
net_param
\x18\x19
\x01
(
\x0b\x32\x13
.caffe.NetParameter
\x12\x11\n\t
train_net
\x18\x01
\x01
(
\t\x12\x10\n\x08
test_net
\x18\x02
\x03
(
\t\x12
,
\n\x0f
train_net_param
\x18\x15
\x01
(
\x0b\x32\x13
.caffe.NetParameter
\x12
+
\n\x0e
test_net_param
\x18\x16
\x03
(
\x0b\x32\x13
.caffe.NetParameter
\x12
$
\n\x0b
train_state
\x18\x1a
\x01
(
\x0b\x32\x0f
.caffe.NetState
\x12
#
\n\n
test_state
\x18\x1b
\x03
(
\x0b\x32\x0f
.caffe.NetState
\x12\x11\n\t
test_iter
\x18\x03
\x03
(
\x05\x12\x18\n\r
test_interval
\x18\x04
\x01
(
\x05
:
\x01\x30\x12
\n\x11
test_compute_loss
\x18\x13
\x01
(
\x08
:
\x05\x66\x61
lse
\x12
!
\n\x13
test_initialization
\x18
\x01
(
\x08
:
\x04
true
\x12\x0f\n\x07\x62\x61
se_lr
\x18\x05
\x01
(
\x02\x12\x10\n\x08
stage_lr
\x18\x32
\x03
(
\x02\x12\x12\n\n
stage_iter
\x18\x33
\x03
(
\x05\x12\x0f\n\x07\x64
isplay
\x18\x06
\x01
(
\x05\x12\x17\n\x0c\x61
verage_loss
\x18
!
\x01
(
\x05
:
\x01\x31\x12\x10\n\x08
max_iter
\x18\x07
\x01
(
\x05\x12\x14\n\t
iter_size
\x18
$
\x01
(
\x05
:
\x01\x31\x12\x11\n\t
lr_policy
\x18\x08
\x01
(
\t\x12\r\n\x05
gamma
\x18\t
\x01
(
\x02\x12\r\n\x05
power
\x18\n
\x01
(
\x02\x12\x10\n\x08
momentum
\x18\x0b
\x01
(
\x02\x12\x14\n\x0c
weight_decay
\x18\x0c
\x01
(
\x02\x12\x1f\n\x13
regularization_type
\x18\x1d
\x01
(
\t
:
\x02
L2
\x12\x10\n\x08
stepsize
\x18\r
\x01
(
\x05\x12\x11\n\t
stepvalue
\x18\"
\x03
(
\x05\x12\x1a\n\x0e\x63
lip_gradients
\x18
#
\x01
(
\x02
:
\x02
-1
\x12\x13\n\x08
snapshot
\x18\x0e
\x01
(
\x05
:
\x01\x30\x12\x17\n\x0f
snapshot_prefix
\x18\x0f
\x01
(
\t\x12\x1c\n\r
snapshot_diff
\x18\x10
\x01
(
\x08
:
\x05\x66\x61
lse
\x12
K
\n\x0f
snapshot_format
\x18
%
\x01
(
\x0e\x32
%
.caffe.SolverParameter.SnapshotFormat:
\x0b\x42
INARYPROTO
\x12
;
\n\x0b
solver_mode
\x18\x11
\x01
(
\x0e\x32
!.caffe.SolverParameter.SolverMode:
\x03
GPU
\x12\x14\n\t
device_id
\x18\x12
\x01
(
\x05
:
\x01\x30\x12\x17\n\x0b
random_seed
\x18\x14
\x01
(
\x03
:
\x02
-1
\x12\x11\n\x04
type
\x18
(
\x01
(
\t
:
\x03
SGD
\x12\x15\n\x05\x64\x65
lta
\x18\x1f
\x01
(
\x02
:
\x06\x31\x65
-008
\x12\x18\n\t
momentum2
\x18\'
\x01
(
\x02
:
\x05\x30
.999
\x12\x17\n\t
rms_decay
\x18
&
\x01
(
\x02
:
\x04\x30
.99
\x12\x19\n\n
debug_info
\x18\x17
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\"\n\x14
snapshot_after_train
\x18\x1c
\x01
(
\x08
:
\x04
true
\x12
;
\n\x0b
solver_type
\x18\x1e
\x01
(
\x0e\x32
!.caffe.SolverParameter.SolverType:
\x03
SGD
\"
+
\n\x0e
SnapshotFormat
\x12\x08\n\x04
HDF5
\x10\x00\x12\x0f\n\x0b\x42
INARYPROTO
\x10\x01\"\x1e\n\n
SolverMode
\x12\x07\n\x03\x43
PU
\x10\x00\x12\x07\n\x03
GPU
\x10\x01\"
U
\n\n
SolverType
\x12\x07\n\x03
SGD
\x10\x00\x12\x0c\n\x08
NESTEROV
\x10\x01\x12\x0b\n\x07\x41\x44\x41
GRAD
\x10\x02\x12\x0b\n\x07
RMSPROP
\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45
LTA
\x10\x04\x12\x08\n\x04\x41\x44\x41
M
\x10\x05\"
l
\n\x0b
SolverState
\x12\x0c\n\x04
iter
\x18\x01
\x01
(
\x05\x12\x13\n\x0b
learned_net
\x18\x02
\x01
(
\t\x12
!
\n\x07
history
\x18\x03
\x03
(
\x0b\x32\x10
.caffe.BlobProto
\x12\x17\n\x0c\x63
urrent_step
\x18\x04
\x01
(
\x05
:
\x01\x30\"
N
\n\x08
NetState
\x12
!
\n\x05
phase
\x18\x01
\x01
(
\x0e\x32\x0c
.caffe.Phase:
\x04
TEST
\x12\x10\n\x05
level
\x18\x02
\x01
(
\x05
:
\x01\x30\x12\r\n\x05
stage
\x18\x03
\x03
(
\t\"\x85\x01\n\x0c
NetStateRule
\x12\x1b\n\x05
phase
\x18\x01
\x01
(
\x0e\x32\x0c
.caffe.Phase
\x12\x11\n\t
min_level
\x18\x02
\x01
(
\x05\x12\x11\n\t
max_level
\x18\x03
\x01
(
\x05\x12\r\n\x05
stage
\x18\x04
\x03
(
\t\x12\x11\n\t
not_stage
\x18\x05
\x03
(
\t\x12\x10\n\x08
mpi_rank
\x18\x06
\x03
(
\r\"\xa3\x01\n\t
ParamSpec
\x12\x0c\n\x04
name
\x18\x01
\x01
(
\t\x12\x31\n\n
share_mode
\x18\x02
\x01
(
\x0e\x32\x1d
.caffe.ParamSpec.DimCheckMode
\x12\x12\n\x07
lr_mult
\x18\x03
\x01
(
\x02
:
\x01\x31\x12\x15\n\n
decay_mult
\x18\x04
\x01
(
\x02
:
\x01\x31\"
*
\n\x0c\x44
imCheckMode
\x12\n\n\x06
STRICT
\x10\x00\x12\x0e\n\n
PERMISSIVE
\x10\x01\"\x
95\x19\n\x0e
LayerParameter
\x12\x0c\n\x04
name
\x18\x01
\x01
(
\t\x12\x0c\n\x04
type
\x18\x02
\x01
(
\t\x12\x0e\n\x06\x62
ottom
\x18\x03
\x03
(
\t\x12\x0b\n\x03
top
\x18\x04
\x03
(
\t\x12\x1c\n\x0c
mirror_stage
\x18\xa2\x01
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x1b\n\x05
phase
\x18\n
\x01
(
\x0e\x32\x0c
.caffe.Phase
\x12\x13\n\x0b
loss_weight
\x18\x05
\x03
(
\x02\x12\x1f\n\x05
param
\x18\x06
\x03
(
\x0b\x32\x10
.caffe.ParamSpec
\x12\x1f\n\x05\x62
lobs
\x18\x07
\x03
(
\x0b\x32\x10
.caffe.BlobProto
\x12\x16\n\x0e
propagate_down
\x18\x0b
\x03
(
\x08\x12
$
\n\x07
include
\x18\x08
\x03
(
\x0b\x32\x13
.caffe.NetStateRule
\x12
$
\n\x07\x65
xclude
\x18\t
\x03
(
\x0b\x32\x13
.caffe.NetStateRule
\x12\x37\n\x0f
transform_param
\x18\x64
\x01
(
\x0b\x32\x1e
.caffe.TransformationParameter
\x12
(
\n\n
loss_param
\x18\x65
\x01
(
\x0b\x32\x14
.caffe.LossParameter
\x12\x30\n\x0e\x61\x63\x63
uracy_param
\x18\x66
\x01
(
\x0b\x32\x18
.caffe.AccuracyParameter
\x12
,
\n\x0c\x61
rgmax_param
\x18
g
\x01
(
\x0b\x32\x16
.caffe.ArgMaxParameter
\x12\x34\n\x10\x62\x61
tch_norm_param
\x18\x8b\x01
\x01
(
\x0b\x32\x19
.caffe.BatchNormParameter
\x12
)
\n\n
bias_param
\x18\x8d\x01
\x01
(
\x0b\x32\x14
.caffe.BiasParameter
\x12
,
\n\x0c\x63
oncat_param
\x18
h
\x01
(
\x0b\x32\x16
.caffe.ConcatParameter
\x12
?
\n\x16\x63
ontrastive_loss_param
\x18
i
\x01
(
\x0b\x32\x1f
.caffe.ContrastiveLossParameter
\x12\x36\n\x11\x63
onvolution_param
\x18
j
\x01
(
\x0b\x32\x1b
.caffe.ConvolutionParameter
\x12
)
\n\n
crop_param
\x18\x90\x01
\x01
(
\x0b\x32\x14
.caffe.CropParameter
\x12
(
\n\n
data_param
\x18
k
\x01
(
\x0b\x32\x14
.caffe.DataParameter
\x12
.
\n\r
dropout_param
\x18
l
\x01
(
\x0b\x32\x17
.caffe.DropoutParameter
\x12\x33\n\x10\x64
ummy_data_param
\x18
m
\x01
(
\x0b\x32\x19
.caffe.DummyDataParameter
\x12
.
\n\r
eltwise_param
\x18
n
\x01
(
\x0b\x32\x17
.caffe.EltwiseParameter
\x12\'\n\t
elu_param
\x18\x8c\x01
\x01
(
\x0b\x32\x13
.caffe.ELUParameter
\x12
+
\n\x0b\x65
mbed_param
\x18\x89\x01
\x01
(
\x0b\x32\x15
.caffe.EmbedParameter
\x12
&
\n\t
exp_param
\x18
o
\x01
(
\x0b\x32\x13
.caffe.ExpParameter
\x12
/
\n\r
flatten_param
\x18\x87\x01
\x01
(
\x0b\x32\x17
.caffe.FlattenParameter
\x12\x31\n\x0f
hdf5_data_param
\x18
p
\x01
(
\x0b\x32\x18
.caffe.HDF5DataParameter
\x12\x35\n\x11
hdf5_output_param
\x18
q
\x01
(
\x0b\x32\x1a
.caffe.HDF5OutputParameter
\x12\x33\n\x10
hinge_loss_param
\x18
r
\x01
(
\x0b\x32\x19
.caffe.HingeLossParameter
\x12\x33\n\x10
image_data_param
\x18
s
\x01
(
\x0b\x32\x19
.caffe.ImageDataParameter
\x12\x39\n\x13
infogain_loss_param
\x18
t
\x01
(
\x0b\x32\x1c
.caffe.InfogainLossParameter
\x12\x39\n\x13
inner_product_param
\x18
u
\x01
(
\x0b\x32\x1c
.caffe.InnerProductParameter
\x12
+
\n\x0b
input_param
\x18\x8f\x01
\x01
(
\x0b\x32\x15
.caffe.InputParameter
\x12\'\n\t
log_param
\x18\x86\x01
\x01
(
\x0b\x32\x13
.caffe.LogParameter
\x12
&
\n\t
lrn_param
\x18
v
\x01
(
\x0b\x32\x13
.caffe.LRNParameter
\x12\x35\n\x11
memory_data_param
\x18
w
\x01
(
\x0b\x32\x1a
.caffe.MemoryDataParameter
\x12
&
\n\t
mvn_param
\x18
x
\x01
(
\x0b\x32\x13
.caffe.MVNParameter
\x12\x33\n\x0f
parameter_param
\x18\x91\x01
\x01
(
\x0b\x32\x19
.caffe.ParameterParameter
\x12
.
\n\r
pooling_param
\x18
y
\x01
(
\x0b\x32\x17
.caffe.PoolingParameter
\x12
*
\n\x0b
power_param
\x18
z
\x01
(
\x0b\x32\x15
.caffe.PowerParameter
\x12
+
\n\x0b
prelu_param
\x18\x83\x01
\x01
(
\x0b\x32\x15
.caffe.PReLUParameter
\x12
-
\n\x0c
python_param
\x18\x82\x01
\x01
(
\x0b\x32\x16
.caffe.PythonParameter
\x12\x33\n\x0f
reduction_param
\x18\x88\x01
\x01
(
\x0b\x32\x19
.caffe.ReductionParameter
\x12
(
\n\n
relu_param
\x18
{
\x01
(
\x0b\x32\x14
.caffe.ReLUParameter
\x12
/
\n\r
reshape_param
\x18\x85\x01
\x01
(
\x0b\x32\x17
.caffe.ReshapeParameter
\x12
+
\n\x0b
scale_param
\x18\x8e\x01
\x01
(
\x0b\x32\x15
.caffe.ScaleParameter
\x12
.
\n\r
sigmoid_param
\x18
|
\x01
(
\x0b\x32\x17
.caffe.SigmoidParameter
\x12
.
\n\r
softmax_param
\x18
}
\x01
(
\x0b\x32\x17
.caffe.SoftmaxParameter
\x12\'\n\t
spp_param
\x18\x84\x01
\x01
(
\x0b\x32\x13
.caffe.SPPParameter
\x12
*
\n\x0b
slice_param
\x18
~
\x01
(
\x0b\x32\x15
.caffe.SliceParameter
\x12
(
\n\n
tanh_param
\x18\x7f
\x01
(
\x0b\x32\x14
.caffe.TanHParameter
\x12\x33\n\x0f
threshold_param
\x18\x80\x01
\x01
(
\x0b\x32\x19
.caffe.ThresholdParameter
\x12
)
\n\n
tile_param
\x18\x8a\x01
\x01
(
\x0b\x32\x14
.caffe.TileParameter
\x12\x36\n\x11
window_data_param
\x18\x81\x01
\x01
(
\x0b\x32\x1a
.caffe.WindowDataParameter
\x12\x36\n\x11
roi_pooling_param
\x18\x97\x01
\x01
(
\x0b\x32\x1a
.caffe.ROIPoolingParameter
\x12
;
\n\x14
smooth_l1_loss_param
\x18\x98\x01
\x01
(
\x0b\x32\x1c
.caffe.SmoothL1LossParameter
\x12\'\n\t
mpi_param
\x18\x99\x01
\x01
(
\x0b\x32\x13
.caffe.MPIParameter
\x12
/
\n\r
permute_param
\x18\x9a\x01
\x01
(
\x0b\x32\x17
.caffe.PermuteParameter
\x12\x33\n\x0f
normalize_param
\x18\x9b\x01
\x01
(
\x0b\x32\x19
.caffe.NormalizeParameter
\x12\x31\n\x0e
parallel_param
\x18\x9d\x01
\x01
(
\x0b\x32\x18
.caffe.ParallelParameter
\x12
-
\n\x0c
resize_param
\x18\x9e\x01
\x01
(
\x0b\x32\x16
.caffe.ResizeParameter
\x12\x36\n\x11\x65
xpand_dims_param
\x18\x9f\x01
\x01
(
\x0b\x32\x1a
.caffe.ExpandDimsParameter
\x12\x31\n\x0e
proposal_param
\x18\xa0\x01
\x01
(
\x0b\x32\x18
.caffe.ProposalParameter
\x12\x38\n\x12\x62\x61
tch_renorm_param
\x18\xa1\x01
\x01
(
\x0b\x32\x1b
.caffe.BatchRenormParameter
\x12\x38\n\x12\x64\x65
nse_concat_param
\x18\xa3\x01
\x01
(
\x0b\x32\x1b
.caffe.DenseConcatParameter
\x12\x34\n\x10\x66
ocal_loss_param
\x18\xa4\x01
\x01
(
\x0b\x32\x19
.caffe.FocalLossParameter
\x12
-
\n\x0c
gather_param
\x18\xa5\x01
\x01
(
\x0b\x32\x16
.caffe.GatherParameter
\"\xa7\x02\n\x17
TransformationParameter
\x12\x10\n\x05
scale
\x18\x01
\x01
(
\x02
:
\x01\x31\x12\x15\n\x06
mirror
\x18\x02
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x14\n\t
crop_size
\x18\x03
\x01
(
\r
:
\x01\x30\x12\x12\n\x07
padding
\x18\x0b
\x01
(
\r
:
\x01\x30\x12\x11\n\t
mean_file
\x18\x04
\x01
(
\t\x12\x12\n\n
mean_value
\x18\x05
\x03
(
\x02\x12\x1a\n\x0b\x66
orce_color
\x18\x06
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x19\n\n
force_gray
\x18\x07
\x01
(
\x08
:
\x05\x66\x61
lse
\x12
!
\n\x12\x63
olor_augmentation
\x18\x08
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x1b\n\x10
min_random_scale
\x18\t
\x01
(
\x02
:
\x01\x31\x12\x1b\n\x10
max_random_scale
\x18\n
\x01
(
\x02
:
\x01\x31\"\xf5\x01\n\r
LossParameter
\x12\x14\n\x0c
ignore_label
\x18\x01
\x01
(
\x05\x12\x44\n\r
normalization
\x18\x03
\x01
(
\x0e\x32
&.caffe.LossParameter.NormalizationMode:
\x05
VALID
\x12\x11\n\t
normalize
\x18\x02
\x01
(
\x08\x1a\'\n\x13\x45
xpandDimsParameter
\x12\x10\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x02
-1
\"
L
\n\x11
NormalizationMode
\x12\x08\n\x04\x46
ULL
\x10\x00\x12\t\n\x05
VALID
\x10\x01\x12\x0e\n\n
BATCH_SIZE
\x10\x02\x12\x08\n\x04
NONE
\x10\x03\x12\x08\n\x04
UNIT
\x10\x04\"
L
\n\x11\x41\x63\x63
uracyParameter
\x12\x10\n\x05
top_k
\x18\x01
\x01
(
\r
:
\x01\x31\x12\x0f\n\x04\x61
xis
\x18\x02
\x01
(
\x05
:
\x01\x31\x12\x14\n\x0c
ignore_label
\x18\x03
\x01
(
\x05\"
M
\n\x0f\x41
rgMaxParameter
\x12\x1a\n\x0b
out_max_val
\x18\x01
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x10\n\x05
top_k
\x18\x02
\x01
(
\r
:
\x01\x31\x12\x0c\n\x04\x61
xis
\x18\x03
\x01
(
\x05\"
9
\n\x0f\x43
oncatParameter
\x12\x0f\n\x04\x61
xis
\x18\x02
\x01
(
\x05
:
\x01\x31\x12\x15\n\n
concat_dim
\x18\x01
\x01
(
\r
:
\x01\x31\"
h
\n\x12\x42\x61
tchNormParameter
\x12\x18\n\x10
use_global_stats
\x18\x01
\x01
(
\x08\x12
$
\n\x17
moving_average_fraction
\x18\x02
\x01
(
\x02
:
\x03\x30
.9
\x12\x12\n\x03\x65
ps
\x18\x03
\x01
(
\x02
:
\x05\x30
.001
\"
]
\n\r
BiasParameter
\x12\x0f\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x01\x31\x12\x13\n\x08
num_axes
\x18\x02
\x01
(
\x05
:
\x01\x31\x12
&
\n\x06\x66
iller
\x18\x03
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\"
L
\n\x18\x43
ontrastiveLossParameter
\x12\x11\n\x06
margin
\x18\x01
\x01
(
\x02
:
\x01\x31\x12\x1d\n\x0e
legacy_version
\x18\x02
\x01
(
\x08
:
\x05\x66\x61
lse
\"\xfc\x03\n\x14\x43
onvolutionParameter
\x12\x12\n\n
num_output
\x18\x01
\x01
(
\r\x12\x17\n\t
bias_term
\x18\x02
\x01
(
\x08
:
\x04
true
\x12\x0b\n\x03
pad
\x18\x03
\x03
(
\r\x12\x13\n\x0b
kernel_size
\x18\x04
\x03
(
\r\x12\x0e\n\x06
stride
\x18\x06
\x03
(
\r\x12\x10\n\x08\x64
ilation
\x18\x12
\x03
(
\r\x12\x10\n\x05
pad_h
\x18\t
\x01
(
\r
:
\x01\x30\x12\x10\n\x05
pad_w
\x18\n
\x01
(
\r
:
\x01\x30\x12\x10\n\x08
kernel_h
\x18\x0b
\x01
(
\r\x12\x10\n\x08
kernel_w
\x18\x0c
\x01
(
\r\x12\x10\n\x08
stride_h
\x18\r
\x01
(
\r\x12\x10\n\x08
stride_w
\x18\x0e
\x01
(
\r\x12\x10\n\x05
group
\x18\x05
\x01
(
\r
:
\x01\x31\x12
-
\n\r
weight_filler
\x18\x07
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12
+
\n\x0b\x62
ias_filler
\x18\x08
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12
;
\n\x06\x65
ngine
\x18\x0f
\x01
(
\x0e\x32\"
.caffe.ConvolutionParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\x12\x0f\n\x04\x61
xis
\x18\x10
\x01
(
\x05
:
\x01\x31\x12\x1e\n\x0f\x66
orce_nd_im2col
\x18\x11
\x01
(
\x08
:
\x05\x66\x61
lse
\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"
0
\n\r
CropParameter
\x12\x0f\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x01\x32\x12\x0e\n\x06
offset
\x18\x02
\x03
(
\r\"\xa4\x02\n\r
DataParameter
\x12\x0e\n\x06
source
\x18\x01
\x01
(
\t\x12\x12\n\n
batch_size
\x18\x04
\x01
(
\r\x12\x14\n\t
rand_skip
\x18\x07
\x01
(
\r
:
\x01\x30\x12\x31\n\x07\x62\x61\x63
kend
\x18\x08
\x01
(
\x0e\x32\x17
.caffe.DataParameter.DB:
\x07
LEVELDB
\x12\x10\n\x05
scale
\x18\x02
\x01
(
\x02
:
\x01\x31\x12\x11\n\t
mean_file
\x18\x03
\x01
(
\t\x12\x14\n\t
crop_size
\x18\x05
\x01
(
\r
:
\x01\x30\x12\x15\n\x06
mirror
\x18\x06
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\"\n\x13\x66
orce_encoded_color
\x18\t
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x13\n\x08
prefetch
\x18\n
\x01
(
\r
:
\x01\x35\"\x1b\n\x02\x44\x42\x12\x0b\n\x07
LEVELDB
\x10\x00\x12\x08\n\x04
LMDB
\x10\x01\"
I
\n\x10\x44
ropoutParameter
\x12\x1a\n\r
dropout_ratio
\x18\x01
\x01
(
\x02
:
\x03\x30
.5
\x12\x19\n\x0b
scale_train
\x18\x02
\x01
(
\x08
:
\x04
true
\"\xa0\x01\n\x12\x44
ummyDataParameter
\x12
+
\n\x0b\x64\x61
ta_filler
\x18\x01
\x03
(
\x0b\x32\x16
.caffe.FillerParameter
\x12\x1f\n\x05
shape
\x18\x06
\x03
(
\x0b\x32\x10
.caffe.BlobShape
\x12\x0b\n\x03
num
\x18\x02
\x03
(
\r\x12\x10\n\x08\x63
hannels
\x18\x03
\x03
(
\r\x12\x0e\n\x06
height
\x18\x04
\x03
(
\r\x12\r\n\x05
width
\x18\x05
\x03
(
\r\"\xa5\x01\n\x10\x45
ltwiseParameter
\x12\x39\n\t
operation
\x18\x01
\x01
(
\x0e\x32
!.caffe.EltwiseParameter.EltwiseOp:
\x03
SUM
\x12\r\n\x05\x63
oeff
\x18\x02
\x03
(
\x02\x12\x1e\n\x10
stable_prod_grad
\x18\x03
\x01
(
\x08
:
\x04
true
\"\'\n\t
EltwiseOp
\x12\x08\n\x04
PROD
\x10\x00\x12\x07\n\x03
SUM
\x10\x01\x12\x07\n\x03
MAX
\x10\x02\"
\n\x0c\x45
LUParameter
\x12\x10\n\x05\x61
lpha
\x18\x01
\x01
(
\x02
:
\x01\x31\"\xac\x01\n\x0e\x45
mbedParameter
\x12\x12\n\n
num_output
\x18\x01
\x01
(
\r\x12\x11\n\t
input_dim
\x18\x02
\x01
(
\r\x12\x17\n\t
bias_term
\x18\x03
\x01
(
\x08
:
\x04
true
\x12
-
\n\r
weight_filler
\x18\x04
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12
+
\n\x0b\x62
ias_filler
\x18\x05
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\"
D
\n\x0c\x45
xpParameter
\x12\x10\n\x04\x62\x61
se
\x18\x01
\x01
(
\x02
:
\x02
-1
\x12\x10\n\x05
scale
\x18\x02
\x01
(
\x02
:
\x01\x31\x12\x10\n\x05
shift
\x18\x03
\x01
(
\x02
:
\x01\x30\"
9
\n\x10\x46
lattenParameter
\x12\x0f\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x01\x31\x12\x14\n\x08\x65
nd_axis
\x18\x02
\x01
(
\x05
:
\x02
-1
\"
O
\n\x11
HDF5DataParameter
\x12\x0e\n\x06
source
\x18\x01
\x01
(
\t\x12\x12\n\n
batch_size
\x18\x02
\x01
(
\r\x12\x16\n\x07
shuffle
\x18\x03
\x01
(
\x08
:
\x05\x66\x61
lse
\"
(
\n\x13
HDF5OutputParameter
\x12\x11\n\t
file_name
\x18\x01
\x01
(
\t\"
^
\n\x12
HingeLossParameter
\x12\x30\n\x04
norm
\x18\x01
\x01
(
\x0e\x32\x1e
.caffe.HingeLossParameter.Norm:
\x02
L1
\"\x16\n\x04
Norm
\x12\x06\n\x02
L1
\x10\x01\x12\x06\n\x02
L2
\x10\x02\"\x97\x02\n\x12
ImageDataParameter
\x12\x0e\n\x06
source
\x18\x01
\x01
(
\t\x12\x15\n\n
batch_size
\x18\x04
\x01
(
\r
:
\x01\x31\x12\x14\n\t
rand_skip
\x18\x07
\x01
(
\r
:
\x01\x30\x12\x16\n\x07
shuffle
\x18\x08
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x15\n\n
new_height
\x18\t
\x01
(
\r
:
\x01\x30\x12\x14\n\t
new_width
\x18\n
\x01
(
\r
:
\x01\x30\x12\x16\n\x08
is_color
\x18\x0b
\x01
(
\x08
:
\x04
true
\x12\x10\n\x05
scale
\x18\x02
\x01
(
\x02
:
\x01\x31\x12\x11\n\t
mean_file
\x18\x03
\x01
(
\t\x12\x14\n\t
crop_size
\x18\x05
\x01
(
\r
:
\x01\x30\x12\x15\n\x06
mirror
\x18\x06
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x15\n\x0b
root_folder
\x18\x0c
\x01
(
\t
:
\x00\"\'\n\x15
InfogainLossParameter
\x12\x0e\n\x06
source
\x18\x01
\x01
(
\t\"\xcb\x01\n\x15
InnerProductParameter
\x12\x12\n\n
num_output
\x18\x01
\x01
(
\r\x12\x17\n\t
bias_term
\x18\x02
\x01
(
\x08
:
\x04
true
\x12
-
\n\r
weight_filler
\x18\x03
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12
+
\n\x0b\x62
ias_filler
\x18\x04
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12\x0f\n\x04\x61
xis
\x18\x05
\x01
(
\x05
:
\x01\x31\x12\x18\n\t
transpose
\x18\x06
\x01
(
\x08
:
\x05\x66\x61
lse
\"
1
\n\x0e
InputParameter
\x12\x1f\n\x05
shape
\x18\x01
\x03
(
\x0b\x32\x10
.caffe.BlobShape
\"
D
\n\x0c
LogParameter
\x12\x10\n\x04\x62\x61
se
\x18\x01
\x01
(
\x02
:
\x02
-1
\x12\x10\n\x05
scale
\x18\x02
\x01
(
\x02
:
\x01\x31\x12\x10\n\x05
shift
\x18\x03
\x01
(
\x02
:
\x01\x30\"\xb8\x02\n\x0c
LRNParameter
\x12\x15\n\n
local_size
\x18\x01
\x01
(
\r
:
\x01\x35\x12\x10\n\x05\x61
lpha
\x18\x02
\x01
(
\x02
:
\x01\x31\x12\x12\n\x04\x62\x65
ta
\x18\x03
\x01
(
\x02
:
\x04\x30
.75
\x12\x44\n\x0b
norm_region
\x18\x04
\x01
(
\x0e\x32\x1e
.caffe.LRNParameter.NormRegion:
\x0f\x41\x43
ROSS_CHANNELS
\x12\x0c\n\x01
k
\x18\x05
\x01
(
\x02
:
\x01\x31\x12\x33\n\x06\x65
ngine
\x18\x06
\x01
(
\x0e\x32\x1a
.caffe.LRNParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\"
5
\n\n
NormRegion
\x12\x13\n\x0f\x41\x43
ROSS_CHANNELS
\x10\x00\x12\x12\n\x0e
WITHIN_CHANNEL
\x10\x01\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"\xbd\x01\n\x13
MemoryDataParameter
\x12\x12\n\n
batch_size
\x18\x01
\x01
(
\r\x12\x10\n\x08\x63
hannels
\x18\x02
\x01
(
\r\x12\x0e\n\x06
height
\x18\x03
\x01
(
\r\x12\r\n\x05
width
\x18\x04
\x01
(
\r\x12
;
\n\x05\x64
type
\x18\x05
\x01
(
\x0e\x32
#.caffe.MemoryDataParameter.DataType:
\x07\x46
LOAT32
\"
$
\n\x08\x44\x61
taType
\x12\x0b\n\x07\x46
LOAT32
\x10\x00\x12\x0b\n\x07\x46
LOAT16
\x10\x01\"
e
\n\x0c
MVNParameter
\x12
\n\x12
normalize_variance
\x18\x01
\x01
(
\x08
:
\x04
true
\x12\x1e\n\x0f\x61\x63
ross_channels
\x18\x02
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x13\n\x03\x65
ps
\x18\x03
\x01
(
\x02
:
\x06\x31\x65
-009
\"
5
\n\x12
ParameterParameter
\x12\x1f\n\x05
shape
\x18\x01
\x01
(
\x0b\x32\x10
.caffe.BlobShape
\"\xa2\x03\n\x10
PoolingParameter
\x12\x35\n\x04
pool
\x18\x01
\x01
(
\x0e\x32\"
.caffe.PoolingParameter.PoolMethod:
\x03
MAX
\x12\x0e\n\x03
pad
\x18\x04
\x01
(
\r
:
\x01\x30\x12\x10\n\x05
pad_h
\x18\t
\x01
(
\r
:
\x01\x30\x12\x10\n\x05
pad_w
\x18\n
\x01
(
\r
:
\x01\x30\x12\x13\n\x0b
kernel_size
\x18\x02
\x01
(
\r\x12\x10\n\x08
kernel_h
\x18\x05
\x01
(
\r\x12\x10\n\x08
kernel_w
\x18\x06
\x01
(
\r\x12\x11\n\x06
stride
\x18\x03
\x01
(
\r
:
\x01\x31\x12\x10\n\x08
stride_h
\x18\x07
\x01
(
\r\x12\x10\n\x08
stride_w
\x18\x08
\x01
(
\r\x12\x37\n\x06\x65
ngine
\x18\x0b
\x01
(
\x0e\x32\x1e
.caffe.PoolingParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\x12\x1d\n\x0e
global_pooling
\x18\x0c
\x01
(
\x08
:
\x05\x66\x61
lse
\"
.
\n\n
PoolMethod
\x12\x07\n\x03
MAX
\x10\x00\x12\x07\n\x03\x41
VE
\x10\x01\x12\x0e\n\n
STOCHASTIC
\x10\x02\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"
Y
\n\x13
ROIPoolingParameter
\x12\x13\n\x08
pooled_h
\x18\x01
\x01
(
\r
:
\x01\x30\x12\x13\n\x08
pooled_w
\x18\x02
\x01
(
\r
:
\x01\x30\x12\x18\n\r
spatial_scale
\x18\x03
\x01
(
\x02
:
\x01\x31\"
F
\n\x0e
PowerParameter
\x12\x10\n\x05
power
\x18\x01
\x01
(
\x02
:
\x01\x31\x12\x10\n\x05
scale
\x18\x02
\x01
(
\x02
:
\x01\x31\x12\x10\n\x05
shift
\x18\x03
\x01
(
\x02
:
\x01\x30\"
g
\n\x0f
PythonParameter
\x12\x0e\n\x06
module
\x18\x01
\x01
(
\t\x12\r\n\x05
layer
\x18\x02
\x01
(
\t\x12\x13\n\t
param_str
\x18\x03
\x01
(
\t
:
\x00\x12
\n\x11
share_in_parallel
\x18\x04
\x01
(
\x08
:
\x05\x66\x61
lse
\"\xad\x01\n\x12
ReductionParameter
\x12
=
\n\t
operation
\x18\x01
\x01
(
\x0e\x32
%
.caffe.ReductionParameter.ReductionOp:
\x03
SUM
\x12\x0f\n\x04\x61
xis
\x18\x02
\x01
(
\x05
:
\x01\x30\x12\x10\n\x05\x63
oeff
\x18\x03
\x01
(
\x02
:
\x01\x31\"
5
\n\x0b
ReductionOp
\x12\x07\n\x03
SUM
\x10\x01\x12\x08\n\x04\x41
SUM
\x10\x02\x12\t\n\x05
SUMSQ
\x10\x03\x12\x08\n\x04
MEAN
\x10\x04\"\x8d\x01\n\r
ReLUParameter
\x12\x19\n\x0e
negative_slope
\x18\x01
\x01
(
\x02
:
\x01\x30\x12\x34\n\x06\x65
ngine
\x18\x02
\x01
(
\x0e\x32\x1b
.caffe.ReLUParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"
Z
\n\x10
ReshapeParameter
\x12\x1f\n\x05
shape
\x18\x01
\x01
(
\x0b\x32\x10
.caffe.BlobShape
\x12\x0f\n\x04\x61
xis
\x18\x02
\x01
(
\x05
:
\x01\x30\x12\x14\n\x08
num_axes
\x18\x03
\x01
(
\x05
:
\x02
-1
\"\xa5\x01\n\x0e
ScaleParameter
\x12\x0f\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x01\x31\x12\x13\n\x08
num_axes
\x18\x02
\x01
(
\x05
:
\x01\x31\x12
&
\n\x06\x66
iller
\x18\x03
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12\x18\n\t
bias_term
\x18\x04
\x01
(
\x08
:
\x05\x66\x61
lse
\x12
+
\n\x0b\x62
ias_filler
\x18\x05
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\"
x
\n\x10
SigmoidParameter
\x12\x37\n\x06\x65
ngine
\x18\x01
\x01
(
\x0e\x32\x1e
.caffe.SigmoidParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"
L
\n\x0e
SliceParameter
\x12\x0f\n\x04\x61
xis
\x18\x03
\x01
(
\x05
:
\x01\x31\x12\x13\n\x0b
slice_point
\x18\x02
\x03
(
\r\x12\x14\n\t
slice_dim
\x18\x01
\x01
(
\r
:
\x01\x31\"\x89\x01\n\x10
SoftmaxParameter
\x12\x37\n\x06\x65
ngine
\x18\x01
\x01
(
\x0e\x32\x1e
.caffe.SoftmaxParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\x12\x0f\n\x04\x61
xis
\x18\x02
\x01
(
\x05
:
\x01\x31\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"
r
\n\r
TanHParameter
\x12\x34\n\x06\x65
ngine
\x18\x01
\x01
(
\x0e\x32\x1b
.caffe.TanHParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"
T
\n\r
TileParameter
\x12\x0f\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x01\x31\x12\r\n\x05
tiles
\x18\x02
\x01
(
\x05\x12
#
\n\t
multiples
\x18\x03
\x01
(
\x0b\x32\x10
.caffe.BlobShape
\"
*
\n\x12
ThresholdParameter
\x12\x14\n\t
threshold
\x18\x01
\x01
(
\x02
:
\x01\x30\"\xc1\x02\n\x13
WindowDataParameter
\x12\x0e\n\x06
source
\x18\x01
\x01
(
\t\x12\x10\n\x05
scale
\x18\x02
\x01
(
\x02
:
\x01\x31\x12\x11\n\t
mean_file
\x18\x03
\x01
(
\t\x12\x12\n\n
batch_size
\x18\x04
\x01
(
\r\x12\x14\n\t
crop_size
\x18\x05
\x01
(
\r
:
\x01\x30\x12\x15\n\x06
mirror
\x18\x06
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x19\n\x0c\x66
g_threshold
\x18\x07
\x01
(
\x02
:
\x03\x30
.5
\x12\x19\n\x0c\x62
g_threshold
\x18\x08
\x01
(
\x02
:
\x03\x30
.5
\x12\x19\n\x0b\x66
g_fraction
\x18\t
\x01
(
\x02
:
\x04\x30
.25
\x12\x16\n\x0b\x63
ontext_pad
\x18\n
\x01
(
\r
:
\x01\x30\x12\x17\n\t
crop_mode
\x18\x0b
\x01
(
\t
:
\x04
warp
\x12\x1b\n\x0c\x63\x61\x63
he_images
\x18\x0c
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x15\n\x0b
root_folder
\x18\r
\x01
(
\t
:
\x00\"\xeb\x01\n\x0c
SPPParameter
\x12\x16\n\x0e
pyramid_height
\x18\x01
\x01
(
\r\x12\x31\n\x04
pool
\x18\x02
\x01
(
\x0e\x32\x1e
.caffe.SPPParameter.PoolMethod:
\x03
MAX
\x12\x33\n\x06\x65
ngine
\x18\x06
\x01
(
\x0e\x32\x1a
.caffe.SPPParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\"
.
\n\n
PoolMethod
\x12\x07\n\x03
MAX
\x10\x00\x12\x07\n\x03\x41
VE
\x10\x01\x12\x0e\n\n
STOCHASTIC
\x10\x02\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"\xe0\x13\n\x10
V1LayerParameter
\x12\x0e\n\x06\x62
ottom
\x18\x02
\x03
(
\t\x12\x0b\n\x03
top
\x18\x03
\x03
(
\t\x12\x0c\n\x04
name
\x18\x04
\x01
(
\t\x12
$
\n\x07
include
\x18
\x03
(
\x0b\x32\x13
.caffe.NetStateRule
\x12
$
\n\x07\x65
xclude
\x18
!
\x03
(
\x0b\x32\x13
.caffe.NetStateRule
\x12
/
\n\x04
type
\x18\x05
\x01
(
\x0e\x32
!.caffe.V1LayerParameter.LayerType
\x12\x1f\n\x05\x62
lobs
\x18\x06
\x03
(
\x0b\x32\x10
.caffe.BlobProto
\x12\x0e\n\x05
param
\x18\xe9\x07
\x03
(
\t\x12
>
\n\x0f\x62
lob_share_mode
\x18\xea\x07
\x03
(
\x0e\x32
$.caffe.V1LayerParameter.DimCheckMode
\x12\x10\n\x08\x62
lobs_lr
\x18\x07
\x03
(
\x02\x12\x14\n\x0c
weight_decay
\x18\x08
\x03
(
\x02\x12\x13\n\x0b
loss_weight
\x18
#
\x03
(
\x02\x12\x30\n\x0e\x61\x63\x63
uracy_param
\x18\x1b
\x01
(
\x0b\x32\x18
.caffe.AccuracyParameter
\x12
,
\n\x0c\x61
rgmax_param
\x18\x17
\x01
(
\x0b\x32\x16
.caffe.ArgMaxParameter
\x12
,
\n\x0c\x63
oncat_param
\x18\t
\x01
(
\x0b\x32\x16
.caffe.ConcatParameter
\x12
?
\n\x16\x63
ontrastive_loss_param
\x18
(
\x01
(
\x0b\x32\x1f
.caffe.ContrastiveLossParameter
\x12\x36\n\x11\x63
onvolution_param
\x18\n
\x01
(
\x0b\x32\x1b
.caffe.ConvolutionParameter
\x12
(
\n\n
data_param
\x18\x0b
\x01
(
\x0b\x32\x14
.caffe.DataParameter
\x12
.
\n\r
dropout_param
\x18\x0c
\x01
(
\x0b\x32\x17
.caffe.DropoutParameter
\x12\x33\n\x10\x64
ummy_data_param
\x18\x1a
\x01
(
\x0b\x32\x19
.caffe.DummyDataParameter
\x12
.
\n\r
eltwise_param
\x18\x18
\x01
(
\x0b\x32\x17
.caffe.EltwiseParameter
\x12
&
\n\t
exp_param
\x18
)
\x01
(
\x0b\x32\x13
.caffe.ExpParameter
\x12\x31\n\x0f
hdf5_data_param
\x18\r
\x01
(
\x0b\x32\x18
.caffe.HDF5DataParameter
\x12\x35\n\x11
hdf5_output_param
\x18\x0e
\x01
(
\x0b\x32\x1a
.caffe.HDF5OutputParameter
\x12\x33\n\x10
hinge_loss_param
\x18\x1d
\x01
(
\x0b\x32\x19
.caffe.HingeLossParameter
\x12\x33\n\x10
image_data_param
\x18\x0f
\x01
(
\x0b\x32\x19
.caffe.ImageDataParameter
\x12\x39\n\x13
infogain_loss_param
\x18\x10
\x01
(
\x0b\x32\x1c
.caffe.InfogainLossParameter
\x12\x39\n\x13
inner_product_param
\x18\x11
\x01
(
\x0b\x32\x1c
.caffe.InnerProductParameter
\x12
&
\n\t
lrn_param
\x18\x12
\x01
(
\x0b\x32\x13
.caffe.LRNParameter
\x12\x35\n\x11
memory_data_param
\x18\x16
\x01
(
\x0b\x32\x1a
.caffe.MemoryDataParameter
\x12
&
\n\t
mvn_param
\x18\"
\x01
(
\x0b\x32\x13
.caffe.MVNParameter
\x12
.
\n\r
pooling_param
\x18\x13
\x01
(
\x0b\x32\x17
.caffe.PoolingParameter
\x12
*
\n\x0b
power_param
\x18\x15
\x01
(
\x0b\x32\x15
.caffe.PowerParameter
\x12
(
\n\n
relu_param
\x18\x1e
\x01
(
\x0b\x32\x14
.caffe.ReLUParameter
\x12
.
\n\r
sigmoid_param
\x18
&
\x01
(
\x0b\x32\x17
.caffe.SigmoidParameter
\x12
.
\n\r
softmax_param
\x18\'
\x01
(
\x0b\x32\x17
.caffe.SoftmaxParameter
\x12
*
\n\x0b
slice_param
\x18\x1f
\x01
(
\x0b\x32\x15
.caffe.SliceParameter
\x12
(
\n\n
tanh_param
\x18
%
\x01
(
\x0b\x32\x14
.caffe.TanHParameter
\x12\x32\n\x0f
threshold_param
\x18\x19
\x01
(
\x0b\x32\x19
.caffe.ThresholdParameter
\x12\x35\n\x11
window_data_param
\x18\x14
\x01
(
\x0b\x32\x1a
.caffe.WindowDataParameter
\x12\x37\n\x0f
transform_param
\x18
$
\x01
(
\x0b\x32\x1e
.caffe.TransformationParameter
\x12
(
\n\n
loss_param
\x18
*
\x01
(
\x0b\x32\x14
.caffe.LossParameter
\x12
&
\n\x05
layer
\x18\x01
\x01
(
\x0b\x32\x17
.caffe.V0LayerParameter
\"\xd8\x04\n\t
LayerType
\x12\x08\n\x04
NONE
\x10\x00\x12\n\n\x06\x41\x42
SVAL
\x10
#
\x12\x0c\n\x08\x41\x43\x43
URACY
\x10\x01\x12\n\n\x06\x41
RGMAX
\x10\x1e\x12\x08\n\x04\x42
NLL
\x10\x02\x12\n\n\x06\x43
ONCAT
\x10\x03\x12\x14\n\x10\x43
ONTRASTIVE_LOSS
\x10
%
\x12\x0f\n\x0b\x43
ONVOLUTION
\x10\x04\x12\x08\n\x04\x44\x41
TA
\x10\x05\x12\x11\n\r
DECONVOLUTION
\x10\'\x12\x0b\n\x07\x44
ROPOUT
\x10\x06\x12\x0e\n\n
DUMMY_DATA
\x10
\x12\x12\n\x0e\x45
UCLIDEAN_LOSS
\x10\x07\x12\x0b\n\x07\x45
LTWISE
\x10\x19\x12\x07\n\x03\x45
XP
\x10
&
\x12\x0b\n\x07\x46
LATTEN
\x10\x08\x12\r\n\t
HDF5_DATA
\x10\t\x12\x0f\n\x0b
HDF5_OUTPUT
\x10\n\x12\x0e\n\n
HINGE_LOSS
\x10\x1c\x12\n\n\x06
IM2COL
\x10\x0b\x12\x0e\n\n
IMAGE_DATA
\x10\x0c\x12\x11\n\r
INFOGAIN_LOSS
\x10\r\x12\x11\n\r
INNER_PRODUCT
\x10\x0e\x12\x07\n\x03
LRN
\x10\x0f\x12\x0f\n\x0b
MEMORY_DATA
\x10\x1d\x12\x1d\n\x19
MULTINOMIAL_LOGISTIC_LOSS
\x10\x10\x12\x07\n\x03
MVN
\x10\"\x12\x0b\n\x07
POOLING
\x10\x11\x12\t\n\x05
POWER
\x10\x1a\x12\x08\n\x04
RELU
\x10\x12\x12\x0b\n\x07
SIGMOID
\x10\x13\x12\x1e\n\x1a
SIGMOID_CROSS_ENTROPY_LOSS
\x10\x1b\x12\x0b\n\x07
SILENCE
\x10
$
\x12\x0b\n\x07
SOFTMAX
\x10\x14\x12\x10\n\x0c
SOFTMAX_LOSS
\x10\x15\x12\t\n\x05
SPLIT
\x10\x16\x12\t\n\x05
SLICE
\x10
!
\x12\x08\n\x04
TANH
\x10\x17\x12\x0f\n\x0b
WINDOW_DATA
\x10\x18\x12\r\n\t
THRESHOLD
\x10\x1f\"
*
\n\x0c\x44
imCheckMode
\x12\n\n\x06
STRICT
\x10\x00\x12\x0e\n\n
PERMISSIVE
\x10\x01\"\xfd\x07\n\x10
V0LayerParameter
\x12\x0c\n\x04
name
\x18\x01
\x01
(
\t\x12\x0c\n\x04
type
\x18\x02
\x01
(
\t\x12\x12\n\n
num_output
\x18\x03
\x01
(
\r\x12\x16\n\x08\x62
iasterm
\x18\x04
\x01
(
\x08
:
\x04
true
\x12
-
\n\r
weight_filler
\x18\x05
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12
+
\n\x0b\x62
ias_filler
\x18\x06
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12\x0e\n\x03
pad
\x18\x07
\x01
(
\r
:
\x01\x30\x12\x12\n\n
kernelsize
\x18\x08
\x01
(
\r\x12\x10\n\x05
group
\x18\t
\x01
(
\r
:
\x01\x31\x12\x11\n\x06
stride
\x18\n
\x01
(
\r
:
\x01\x31\x12\x35\n\x04
pool
\x18\x0b
\x01
(
\x0e\x32\"
.caffe.V0LayerParameter.PoolMethod:
\x03
MAX
\x12\x1a\n\r
dropout_ratio
\x18\x0c
\x01
(
\x02
:
\x03\x30
.5
\x12\x15\n\n
local_size
\x18\r
\x01
(
\r
:
\x01\x35\x12\x10\n\x05\x61
lpha
\x18\x0e
\x01
(
\x02
:
\x01\x31\x12\x12\n\x04\x62\x65
ta
\x18\x0f
\x01
(
\x02
:
\x04\x30
.75
\x12\x0c\n\x01
k
\x18\x16
\x01
(
\x02
:
\x01\x31\x12\x0e\n\x06
source
\x18\x10
\x01
(
\t\x12\x10\n\x05
scale
\x18\x11
\x01
(
\x02
:
\x01\x31\x12\x10\n\x08
meanfile
\x18\x12
\x01
(
\t\x12\x11\n\t
batchsize
\x18\x13
\x01
(
\r\x12\x13\n\x08\x63
ropsize
\x18\x14
\x01
(
\r
:
\x01\x30\x12\x15\n\x06
mirror
\x18\x15
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x1f\n\x05\x62
lobs
\x18\x32
\x03
(
\x0b\x32\x10
.caffe.BlobProto
\x12\x10\n\x08\x62
lobs_lr
\x18\x33
\x03
(
\x02\x12\x14\n\x0c
weight_decay
\x18\x34
\x03
(
\x02\x12\x14\n\t
rand_skip
\x18\x35
\x01
(
\r
:
\x01\x30\x12\x1d\n\x10\x64\x65
t_fg_threshold
\x18\x36
\x01
(
\x02
:
\x03\x30
.5
\x12\x1d\n\x10\x64\x65
t_bg_threshold
\x18\x37
\x01
(
\x02
:
\x03\x30
.5
\x12\x1d\n\x0f\x64\x65
t_fg_fraction
\x18\x38
\x01
(
\x02
:
\x04\x30
.25
\x12\x1a\n\x0f\x64\x65
t_context_pad
\x18
:
\x01
(
\r
:
\x01\x30\x12\x1b\n\r
det_crop_mode
\x18
;
\x01
(
\t
:
\x04
warp
\x12\x12\n\x07
new_num
\x18
<
\x01
(
\x05
:
\x01\x30\x12\x17\n\x0c
new_channels
\x18
=
\x01
(
\x05
:
\x01\x30\x12\x15\n\n
new_height
\x18
>
\x01
(
\x05
:
\x01\x30\x12\x14\n\t
new_width
\x18
?
\x01
(
\x05
:
\x01\x30\x12\x1d\n\x0e
shuffle_images
\x18
@
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x15\n\n
concat_dim
\x18\x41
\x01
(
\r
:
\x01\x31\x12\x36\n\x11
hdf5_output_param
\x18\xe9\x07
\x01
(
\x0b\x32\x1a
.caffe.HDF5OutputParameter
\"
.
\n\n
PoolMethod
\x12\x07\n\x03
MAX
\x10\x00\x12\x07\n\x03\x41
VE
\x10\x01\x12\x0e\n\n
STOCHASTIC
\x10\x02\"
W
\n\x0e
PReLUParameter
\x12
&
\n\x06\x66
iller
\x18\x01
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12\x1d\n\x0e\x63
hannel_shared
\x18\x02
\x01
(
\x08
:
\x05\x66\x61
lse
\"
)
\n\x15
SmoothL1LossParameter
\x12\x10\n\x05
sigma
\x18\x01
\x01
(
\x02
:
\x01\x31\"
H
\n\x0c
MPIParameter
\x12\x0f\n\x04
root
\x18\x01
\x01
(
\r
:
\x01\x30\x12\x12\n\x07\x63
omm_id
\x18\x02
\x01
(
\x04
:
\x01\x30\x12\x13\n\x08
group_id
\x18\x03
\x01
(
\x04
:
\x01\x30\"
!
\n\x10
PermuteParameter
\x12\r\n\x05
order
\x18\x01
\x03
(
\r\"\x93\x01\n\x12
NormalizeParameter
\x12\x1c\n\x0e\x61\x63
ross_spatial
\x18\x01
\x01
(
\x08
:
\x04
true
\x12
,
\n\x0c
scale_filler
\x18\x02
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12\x1c\n\x0e\x63
hannel_shared
\x18\x03
\x01
(
\x08
:
\x04
true
\x12\x13\n\x03\x65
ps
\x18\x04
\x01
(
\x02
:
\x06\x31\x65
-010
\"
_
\n\x11
ParallelParameter
\x12\x16\n\x07
shuffle
\x18\x01
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x18\n\t
node_step
\x18\x02
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x18\n\t
partition
\x18\x03
\x01
(
\x08
:
\x05\x66\x61
lse
\"
R
\n\x0f
ResizeParameter
\x12\x1f\n\x05
shape
\x18\x01
\x01
(
\x0b\x32\x10
.caffe.BlobShape
\x12\x0e\n\x02\x66
x
\x18\x02
\x01
(
\x02
:
\x02
-1
\x12\x0e\n\x02\x66
y
\x18\x03
\x01
(
\x02
:
\x02
-1
\"\'\n\x13\x45
xpandDimsParameter
\x12\x10\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x02
-1
\"\x90\x02\n\x11
ProposalParameter
\x12\x0e\n\x06
stride
\x18\x01
\x03
(
\x05\x12\r\n\x05
ratio
\x18\x02
\x03
(
\x02\x12\r\n\x05
scale
\x18\x03
\x03
(
\x02\x12\x1b\n\r
pre_nms_top_n
\x18\x04
\x01
(
\r
:
\x04\x36\x30\x30\x30\x12\x1b\n\x0e
post_nms_top_n
\x18\x05
\x01
(
\r
:
\x03\x33\x30\x30\x12\x17\n\n
nms_thresh
\x18\x06
\x01
(
\x02
:
\x03\x30
.7
\x12\x14\n\x08
min_size
\x18\x07
\x01
(
\r
:
\x02\x31\x36\x12\x14\n\t
min_level
\x18\x08
\x01
(
\x05
:
\x01\x32\x12\x14\n\t
max_level
\x18\t
\x01
(
\x05
:
\x01\x35\x12\x1c\n\x0f\x63\x61
nonical_scale
\x18\n
\x01
(
\x05
:
\x03\x32\x32\x34\x12\x1a\n\x0f\x63\x61
nonical_level
\x18\x0b
\x01
(
\x05
:
\x01\x34\"\xa6\x01\n\x14\x42\x61
tchRenormParameter
\x12\x18\n\x10
use_global_stats
\x18\x01
\x01
(
\x08\x12
$
\n\x17
moving_average_fraction
\x18\x02
\x01
(
\x02
:
\x03\x30
.9
\x12\x12\n\x03\x65
ps
\x18\x03
\x01
(
\x02
:
\x05\x30
.001
\x12\x10\n\x05
r_max
\x18\x04
\x01
(
\x02
:
\x01\x33\x12\x10\n\x05\x64
_max
\x18\x05
\x01
(
\x02
:
\x01\x35\x12\x16\n\x07
t_delta
\x18\x06
\x01
(
\x02
:
\x05\x30
.001
\"
?
\n\x14\x44\x65
nseConcatParameter
\x12\x0f\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x01\x31\x12\x16\n\x0b
growth_rate
\x18\x02
\x01
(
\x05
:
\x01\x30\"
c
\n\x12\x46
ocalLossParameter
\x12\x12\n\x05\x61
lpha
\x18\x01
\x01
(
\x02
:
\x03\x30
.5
\x12\x10\n\x05
gamma
\x18\x02
\x01
(
\x02
:
\x01\x30\x12\x13\n\x03\x65
ps
\x18\x03
\x01
(
\x02
:
\x06\x31\x65
-010
\x12\x12\n\x06
neg_id
\x18\x04
\x01
(
\x05
:
\x02
-1
\"\"\n\x0f
GatherParameter
\x12\x0f\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x01\x30
*
\x1c\n\x05
Phase
\x12\t\n\x05
TRAIN
\x10\x00\x12\x08\n\x04
TEST
\x10\x01
'
)
serialized_pb
=
_b
(
'
\n\x0b\x63\x61\x66\x66\x65
.proto
\x12\x05\x63\x61\x66\x66\x65\"\x1c\n\t
BlobShape
\x12\x0f\n\x03\x64
im
\x18\x01
\x03
(
\x03\x42\x02\x10\x01\"\xcc\x01\n\t
BlobProto
\x12\x1f\n\x05
shape
\x18\x07
\x01
(
\x0b\x32\x10
.caffe.BlobShape
\x12\x10\n\x04\x64\x61
ta
\x18\x05
\x03
(
\x02\x42\x02\x10\x01\x12\x10\n\x04\x64
iff
\x18\x06
\x03
(
\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64
ouble_data
\x18\x08
\x03
(
\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64
ouble_diff
\x18\t
\x03
(
\x01\x42\x02\x10\x01\x12\x0e\n\x03
num
\x18\x01
\x01
(
\x05
:
\x01\x30\x12\x13\n\x08\x63
hannels
\x18\x02
\x01
(
\x05
:
\x01\x30\x12\x11\n\x06
height
\x18\x03
\x01
(
\x05
:
\x01\x30\x12\x10\n\x05
width
\x18\x04
\x01
(
\x05
:
\x01\x30\"
2
\n\x0f\x42
lobProtoVector
\x12\x1f\n\x05\x62
lobs
\x18\x01
\x03
(
\x0b\x32\x10
.caffe.BlobProto
\"\x81\x01\n\x05\x44\x61
tum
\x12\x10\n\x08\x63
hannels
\x18\x01
\x01
(
\x05\x12\x0e\n\x06
height
\x18\x02
\x01
(
\x05\x12\r\n\x05
width
\x18\x03
\x01
(
\x05\x12\x0c\n\x04\x64\x61
ta
\x18\x04
\x01
(
\x0c\x12\r\n\x05
label
\x18\x05
\x01
(
\x05\x12\x12\n\n
float_data
\x18\x06
\x03
(
\x02\x12\x16\n\x07\x65
ncoded
\x18\x07
\x01
(
\x08
:
\x05\x66\x61
lse
\"\x8a\x02\n\x0f\x46
illerParameter
\x12\x16\n\x04
type
\x18\x01
\x01
(
\t
:
\x08\x63
onstant
\x12\x10\n\x05
value
\x18\x02
\x01
(
\x02
:
\x01\x30\x12\x0e\n\x03
min
\x18\x03
\x01
(
\x02
:
\x01\x30\x12\x0e\n\x03
max
\x18\x04
\x01
(
\x02
:
\x01\x31\x12\x0f\n\x04
mean
\x18\x05
\x01
(
\x02
:
\x01\x30\x12\x0e\n\x03
std
\x18\x06
\x01
(
\x02
:
\x01\x31\x12\x12\n\x06
sparse
\x18\x07
\x01
(
\x05
:
\x02
-1
\x12\x42\n\r
variance_norm
\x18\x08
\x01
(
\x0e\x32
#.caffe.FillerParameter.VarianceNorm:
\x06\x46\x41
N_IN
\"
4
\n\x0c
VarianceNorm
\x12\n\n\x06\x46\x41
N_IN
\x10\x00\x12\x0b\n\x07\x46\x41
N_OUT
\x10\x01\x12\x0b\n\x07\x41
VERAGE
\x10\x02\"\x8e\x02\n\x0c
NetParameter
\x12\x0c\n\x04
name
\x18\x01
\x01
(
\t\x12\r\n\x05
input
\x18\x03
\x03
(
\t\x12
%
\n\x0b
input_shape
\x18\x08
\x03
(
\x0b\x32\x10
.caffe.BlobShape
\x12\x11\n\t
input_dim
\x18\x04
\x03
(
\x05\x12\x1d\n\x0e\x66
orce_backward
\x18\x05
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x1e\n\x05
state
\x18\x06
\x01
(
\x0b\x32\x0f
.caffe.NetState
\x12\x19\n\n
debug_info
\x18\x07
\x01
(
\x08
:
\x05\x66\x61
lse
\x12
$
\n\x05
layer
\x18\x64
\x03
(
\x0b\x32\x15
.caffe.LayerParameter
\x12\'\n\x06
layers
\x18\x02
\x03
(
\x0b\x32\x17
.caffe.V1LayerParameter
\"\xc9\n\n\x0f
SolverParameter
\x12\x0b\n\x03
net
\x18\x18
\x01
(
\t\x12
&
\n\t
net_param
\x18\x19
\x01
(
\x0b\x32\x13
.caffe.NetParameter
\x12\x11\n\t
train_net
\x18\x01
\x01
(
\t\x12\x10\n\x08
test_net
\x18\x02
\x03
(
\t\x12
,
\n\x0f
train_net_param
\x18\x15
\x01
(
\x0b\x32\x13
.caffe.NetParameter
\x12
+
\n\x0e
test_net_param
\x18\x16
\x03
(
\x0b\x32\x13
.caffe.NetParameter
\x12
$
\n\x0b
train_state
\x18\x1a
\x01
(
\x0b\x32\x0f
.caffe.NetState
\x12
#
\n\n
test_state
\x18\x1b
\x03
(
\x0b\x32\x0f
.caffe.NetState
\x12\x11\n\t
test_iter
\x18\x03
\x03
(
\x05\x12\x18\n\r
test_interval
\x18\x04
\x01
(
\x05
:
\x01\x30\x12
\n\x11
test_compute_loss
\x18\x13
\x01
(
\x08
:
\x05\x66\x61
lse
\x12
!
\n\x13
test_initialization
\x18
\x01
(
\x08
:
\x04
true
\x12\x0f\n\x07\x62\x61
se_lr
\x18\x05
\x01
(
\x02\x12\x10\n\x08
stage_lr
\x18\x32
\x03
(
\x02\x12\x12\n\n
stage_iter
\x18\x33
\x03
(
\x05\x12\x0f\n\x07\x64
isplay
\x18\x06
\x01
(
\x05\x12\x17\n\x0c\x61
verage_loss
\x18
!
\x01
(
\x05
:
\x01\x31\x12\x10\n\x08
max_iter
\x18\x07
\x01
(
\x05\x12\x14\n\t
iter_size
\x18
$
\x01
(
\x05
:
\x01\x31\x12\x11\n\t
lr_policy
\x18\x08
\x01
(
\t\x12\r\n\x05
gamma
\x18\t
\x01
(
\x02\x12\r\n\x05
power
\x18\n
\x01
(
\x02\x12\x10\n\x08
momentum
\x18\x0b
\x01
(
\x02\x12\x14\n\x0c
weight_decay
\x18\x0c
\x01
(
\x02\x12\x1f\n\x13
regularization_type
\x18\x1d
\x01
(
\t
:
\x02
L2
\x12\x10\n\x08
stepsize
\x18\r
\x01
(
\x05\x12\x11\n\t
stepvalue
\x18\"
\x03
(
\x05\x12\x1a\n\x0e\x63
lip_gradients
\x18
#
\x01
(
\x02
:
\x02
-1
\x12\x13\n\x08
snapshot
\x18\x0e
\x01
(
\x05
:
\x01\x30\x12\x17\n\x0f
snapshot_prefix
\x18\x0f
\x01
(
\t\x12\x1c\n\r
snapshot_diff
\x18\x10
\x01
(
\x08
:
\x05\x66\x61
lse
\x12
K
\n\x0f
snapshot_format
\x18
%
\x01
(
\x0e\x32
%
.caffe.SolverParameter.SnapshotFormat:
\x0b\x42
INARYPROTO
\x12
;
\n\x0b
solver_mode
\x18\x11
\x01
(
\x0e\x32
!.caffe.SolverParameter.SolverMode:
\x03
GPU
\x12\x14\n\t
device_id
\x18\x12
\x01
(
\x05
:
\x01\x30\x12\x17\n\x0b
random_seed
\x18\x14
\x01
(
\x03
:
\x02
-1
\x12\x11\n\x04
type
\x18
(
\x01
(
\t
:
\x03
SGD
\x12\x15\n\x05\x64\x65
lta
\x18\x1f
\x01
(
\x02
:
\x06\x31\x65
-008
\x12\x18\n\t
momentum2
\x18\'
\x01
(
\x02
:
\x05\x30
.999
\x12\x17\n\t
rms_decay
\x18
&
\x01
(
\x02
:
\x04\x30
.99
\x12\x19\n\n
debug_info
\x18\x17
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\"\n\x14
snapshot_after_train
\x18\x1c
\x01
(
\x08
:
\x04
true
\x12
;
\n\x0b
solver_type
\x18\x1e
\x01
(
\x0e\x32
!.caffe.SolverParameter.SolverType:
\x03
SGD
\"
+
\n\x0e
SnapshotFormat
\x12\x08\n\x04
HDF5
\x10\x00\x12\x0f\n\x0b\x42
INARYPROTO
\x10\x01\"\x1e\n\n
SolverMode
\x12\x07\n\x03\x43
PU
\x10\x00\x12\x07\n\x03
GPU
\x10\x01\"
U
\n\n
SolverType
\x12\x07\n\x03
SGD
\x10\x00\x12\x0c\n\x08
NESTEROV
\x10\x01\x12\x0b\n\x07\x41\x44\x41
GRAD
\x10\x02\x12\x0b\n\x07
RMSPROP
\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45
LTA
\x10\x04\x12\x08\n\x04\x41\x44\x41
M
\x10\x05\"
l
\n\x0b
SolverState
\x12\x0c\n\x04
iter
\x18\x01
\x01
(
\x05\x12\x13\n\x0b
learned_net
\x18\x02
\x01
(
\t\x12
!
\n\x07
history
\x18\x03
\x03
(
\x0b\x32\x10
.caffe.BlobProto
\x12\x17\n\x0c\x63
urrent_step
\x18\x04
\x01
(
\x05
:
\x01\x30\"
N
\n\x08
NetState
\x12
!
\n\x05
phase
\x18\x01
\x01
(
\x0e\x32\x0c
.caffe.Phase:
\x04
TEST
\x12\x10\n\x05
level
\x18\x02
\x01
(
\x05
:
\x01\x30\x12\r\n\x05
stage
\x18\x03
\x03
(
\t\"\x85\x01\n\x0c
NetStateRule
\x12\x1b\n\x05
phase
\x18\x01
\x01
(
\x0e\x32\x0c
.caffe.Phase
\x12\x11\n\t
min_level
\x18\x02
\x01
(
\x05\x12\x11\n\t
max_level
\x18\x03
\x01
(
\x05\x12\r\n\x05
stage
\x18\x04
\x03
(
\t\x12\x11\n\t
not_stage
\x18\x05
\x03
(
\t\x12\x10\n\x08
mpi_rank
\x18\x06
\x03
(
\r\"\xa3\x01\n\t
ParamSpec
\x12\x0c\n\x04
name
\x18\x01
\x01
(
\t\x12\x31\n\n
share_mode
\x18\x02
\x01
(
\x0e\x32\x1d
.caffe.ParamSpec.DimCheckMode
\x12\x12\n\x07
lr_mult
\x18\x03
\x01
(
\x02
:
\x01\x31\x12\x15\n\n
decay_mult
\x18\x04
\x01
(
\x02
:
\x01\x31\"
*
\n\x0c\x44
imCheckMode
\x12\n\n\x06
STRICT
\x10\x00\x12\x0e\n\n
PERMISSIVE
\x10\x01\"\x
cb\x19\n\x0e
LayerParameter
\x12\x0c\n\x04
name
\x18\x01
\x01
(
\t\x12\x0c\n\x04
type
\x18\x02
\x01
(
\t\x12\x0e\n\x06\x62
ottom
\x18\x03
\x03
(
\t\x12\x0b\n\x03
top
\x18\x04
\x03
(
\t\x12\x1c\n\x0c
mirror_stage
\x18\xa2\x01
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x1b\n\x05
phase
\x18\n
\x01
(
\x0e\x32\x0c
.caffe.Phase
\x12\x13\n\x0b
loss_weight
\x18\x05
\x03
(
\x02\x12\x1f\n\x05
param
\x18\x06
\x03
(
\x0b\x32\x10
.caffe.ParamSpec
\x12\x1f\n\x05\x62
lobs
\x18\x07
\x03
(
\x0b\x32\x10
.caffe.BlobProto
\x12\x16\n\x0e
propagate_down
\x18\x0b
\x03
(
\x08\x12
$
\n\x07
include
\x18\x08
\x03
(
\x0b\x32\x13
.caffe.NetStateRule
\x12
$
\n\x07\x65
xclude
\x18\t
\x03
(
\x0b\x32\x13
.caffe.NetStateRule
\x12\x37\n\x0f
transform_param
\x18\x64
\x01
(
\x0b\x32\x1e
.caffe.TransformationParameter
\x12
(
\n\n
loss_param
\x18\x65
\x01
(
\x0b\x32\x14
.caffe.LossParameter
\x12\x30\n\x0e\x61\x63\x63
uracy_param
\x18\x66
\x01
(
\x0b\x32\x18
.caffe.AccuracyParameter
\x12
,
\n\x0c\x61
rgmax_param
\x18
g
\x01
(
\x0b\x32\x16
.caffe.ArgMaxParameter
\x12\x34\n\x10\x62\x61
tch_norm_param
\x18\x8b\x01
\x01
(
\x0b\x32\x19
.caffe.BatchNormParameter
\x12
)
\n\n
bias_param
\x18\x8d\x01
\x01
(
\x0b\x32\x14
.caffe.BiasParameter
\x12
,
\n\x0c\x63
oncat_param
\x18
h
\x01
(
\x0b\x32\x16
.caffe.ConcatParameter
\x12
?
\n\x16\x63
ontrastive_loss_param
\x18
i
\x01
(
\x0b\x32\x1f
.caffe.ContrastiveLossParameter
\x12\x36\n\x11\x63
onvolution_param
\x18
j
\x01
(
\x0b\x32\x1b
.caffe.ConvolutionParameter
\x12
)
\n\n
crop_param
\x18\x90\x01
\x01
(
\x0b\x32\x14
.caffe.CropParameter
\x12
(
\n\n
data_param
\x18
k
\x01
(
\x0b\x32\x14
.caffe.DataParameter
\x12
.
\n\r
dropout_param
\x18
l
\x01
(
\x0b\x32\x17
.caffe.DropoutParameter
\x12\x33\n\x10\x64
ummy_data_param
\x18
m
\x01
(
\x0b\x32\x19
.caffe.DummyDataParameter
\x12
.
\n\r
eltwise_param
\x18
n
\x01
(
\x0b\x32\x17
.caffe.EltwiseParameter
\x12\'\n\t
elu_param
\x18\x8c\x01
\x01
(
\x0b\x32\x13
.caffe.ELUParameter
\x12
+
\n\x0b\x65
mbed_param
\x18\x89\x01
\x01
(
\x0b\x32\x15
.caffe.EmbedParameter
\x12
&
\n\t
exp_param
\x18
o
\x01
(
\x0b\x32\x13
.caffe.ExpParameter
\x12
/
\n\r
flatten_param
\x18\x87\x01
\x01
(
\x0b\x32\x17
.caffe.FlattenParameter
\x12\x31\n\x0f
hdf5_data_param
\x18
p
\x01
(
\x0b\x32\x18
.caffe.HDF5DataParameter
\x12\x35\n\x11
hdf5_output_param
\x18
q
\x01
(
\x0b\x32\x1a
.caffe.HDF5OutputParameter
\x12\x33\n\x10
hinge_loss_param
\x18
r
\x01
(
\x0b\x32\x19
.caffe.HingeLossParameter
\x12\x33\n\x10
image_data_param
\x18
s
\x01
(
\x0b\x32\x19
.caffe.ImageDataParameter
\x12\x39\n\x13
infogain_loss_param
\x18
t
\x01
(
\x0b\x32\x1c
.caffe.InfogainLossParameter
\x12\x39\n\x13
inner_product_param
\x18
u
\x01
(
\x0b\x32\x1c
.caffe.InnerProductParameter
\x12
+
\n\x0b
input_param
\x18\x8f\x01
\x01
(
\x0b\x32\x15
.caffe.InputParameter
\x12\'\n\t
log_param
\x18\x86\x01
\x01
(
\x0b\x32\x13
.caffe.LogParameter
\x12
&
\n\t
lrn_param
\x18
v
\x01
(
\x0b\x32\x13
.caffe.LRNParameter
\x12\x35\n\x11
memory_data_param
\x18
w
\x01
(
\x0b\x32\x1a
.caffe.MemoryDataParameter
\x12
&
\n\t
mvn_param
\x18
x
\x01
(
\x0b\x32\x13
.caffe.MVNParameter
\x12\x33\n\x0f
parameter_param
\x18\x91\x01
\x01
(
\x0b\x32\x19
.caffe.ParameterParameter
\x12
.
\n\r
pooling_param
\x18
y
\x01
(
\x0b\x32\x17
.caffe.PoolingParameter
\x12
*
\n\x0b
power_param
\x18
z
\x01
(
\x0b\x32\x15
.caffe.PowerParameter
\x12
+
\n\x0b
prelu_param
\x18\x83\x01
\x01
(
\x0b\x32\x15
.caffe.PReLUParameter
\x12
-
\n\x0c
python_param
\x18\x82\x01
\x01
(
\x0b\x32\x16
.caffe.PythonParameter
\x12\x33\n\x0f
reduction_param
\x18\x88\x01
\x01
(
\x0b\x32\x19
.caffe.ReductionParameter
\x12
(
\n\n
relu_param
\x18
{
\x01
(
\x0b\x32\x14
.caffe.ReLUParameter
\x12
/
\n\r
reshape_param
\x18\x85\x01
\x01
(
\x0b\x32\x17
.caffe.ReshapeParameter
\x12
+
\n\x0b
scale_param
\x18\x8e\x01
\x01
(
\x0b\x32\x15
.caffe.ScaleParameter
\x12
.
\n\r
sigmoid_param
\x18
|
\x01
(
\x0b\x32\x17
.caffe.SigmoidParameter
\x12
.
\n\r
softmax_param
\x18
}
\x01
(
\x0b\x32\x17
.caffe.SoftmaxParameter
\x12\'\n\t
spp_param
\x18\x84\x01
\x01
(
\x0b\x32\x13
.caffe.SPPParameter
\x12
*
\n\x0b
slice_param
\x18
~
\x01
(
\x0b\x32\x15
.caffe.SliceParameter
\x12
(
\n\n
tanh_param
\x18\x7f
\x01
(
\x0b\x32\x14
.caffe.TanHParameter
\x12\x33\n\x0f
threshold_param
\x18\x80\x01
\x01
(
\x0b\x32\x19
.caffe.ThresholdParameter
\x12
)
\n\n
tile_param
\x18\x8a\x01
\x01
(
\x0b\x32\x14
.caffe.TileParameter
\x12\x36\n\x11
window_data_param
\x18\x81\x01
\x01
(
\x0b\x32\x1a
.caffe.WindowDataParameter
\x12\x36\n\x11
roi_pooling_param
\x18\x97\x01
\x01
(
\x0b\x32\x1a
.caffe.ROIPoolingParameter
\x12
;
\n\x14
smooth_l1_loss_param
\x18\x98\x01
\x01
(
\x0b\x32\x1c
.caffe.SmoothL1LossParameter
\x12\'\n\t
mpi_param
\x18\x99\x01
\x01
(
\x0b\x32\x13
.caffe.MPIParameter
\x12
/
\n\r
permute_param
\x18\x9a\x01
\x01
(
\x0b\x32\x17
.caffe.PermuteParameter
\x12\x33\n\x0f
normalize_param
\x18\x9b\x01
\x01
(
\x0b\x32\x19
.caffe.NormalizeParameter
\x12\x31\n\x0e
parallel_param
\x18\x9d\x01
\x01
(
\x0b\x32\x18
.caffe.ParallelParameter
\x12
-
\n\x0c
resize_param
\x18\x9e\x01
\x01
(
\x0b\x32\x16
.caffe.ResizeParameter
\x12\x36\n\x11\x65
xpand_dims_param
\x18\x9f\x01
\x01
(
\x0b\x32\x1a
.caffe.ExpandDimsParameter
\x12\x31\n\x0e
proposal_param
\x18\xa0\x01
\x01
(
\x0b\x32\x18
.caffe.ProposalParameter
\x12\x38\n\x12\x62\x61
tch_renorm_param
\x18\xa1\x01
\x01
(
\x0b\x32\x1b
.caffe.BatchRenormParameter
\x12\x38\n\x12\x64\x65
nse_concat_param
\x18\xa3\x01
\x01
(
\x0b\x32\x1b
.caffe.DenseConcatParameter
\x12\x34\n\x10\x66
ocal_loss_param
\x18\xa4\x01
\x01
(
\x0b\x32\x19
.caffe.FocalLossParameter
\x12
-
\n\x0c
gather_param
\x18\xa5\x01
\x01
(
\x0b\x32\x16
.caffe.GatherParameter
\x12\x34\n\x10
group_norm_param
\x18\xa6\x01
\x01
(
\x0b\x32\x19
.caffe.GroupNormParameter
\"\xa7\x02\n\x17
TransformationParameter
\x12\x10\n\x05
scale
\x18\x01
\x01
(
\x02
:
\x01\x31\x12\x15\n\x06
mirror
\x18\x02
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x14\n\t
crop_size
\x18\x03
\x01
(
\r
:
\x01\x30\x12\x12\n\x07
padding
\x18\x0b
\x01
(
\r
:
\x01\x30\x12\x11\n\t
mean_file
\x18\x04
\x01
(
\t\x12\x12\n\n
mean_value
\x18\x05
\x03
(
\x02\x12\x1a\n\x0b\x66
orce_color
\x18\x06
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x19\n\n
force_gray
\x18\x07
\x01
(
\x08
:
\x05\x66\x61
lse
\x12
!
\n\x12\x63
olor_augmentation
\x18\x08
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x1b\n\x10
min_random_scale
\x18\t
\x01
(
\x02
:
\x01\x31\x12\x1b\n\x10
max_random_scale
\x18\n
\x01
(
\x02
:
\x01\x31\"\xf5\x01\n\r
LossParameter
\x12\x14\n\x0c
ignore_label
\x18\x01
\x01
(
\x05\x12\x44\n\r
normalization
\x18\x03
\x01
(
\x0e\x32
&.caffe.LossParameter.NormalizationMode:
\x05
VALID
\x12\x11\n\t
normalize
\x18\x02
\x01
(
\x08\x1a\'\n\x13\x45
xpandDimsParameter
\x12\x10\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x02
-1
\"
L
\n\x11
NormalizationMode
\x12\x08\n\x04\x46
ULL
\x10\x00\x12\t\n\x05
VALID
\x10\x01\x12\x0e\n\n
BATCH_SIZE
\x10\x02\x12\x08\n\x04
NONE
\x10\x03\x12\x08\n\x04
UNIT
\x10\x04\"
L
\n\x11\x41\x63\x63
uracyParameter
\x12\x10\n\x05
top_k
\x18\x01
\x01
(
\r
:
\x01\x31\x12\x0f\n\x04\x61
xis
\x18\x02
\x01
(
\x05
:
\x01\x31\x12\x14\n\x0c
ignore_label
\x18\x03
\x01
(
\x05\"
M
\n\x0f\x41
rgMaxParameter
\x12\x1a\n\x0b
out_max_val
\x18\x01
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x10\n\x05
top_k
\x18\x02
\x01
(
\r
:
\x01\x31\x12\x0c\n\x04\x61
xis
\x18\x03
\x01
(
\x05\"
9
\n\x0f\x43
oncatParameter
\x12\x0f\n\x04\x61
xis
\x18\x02
\x01
(
\x05
:
\x01\x31\x12\x15\n\n
concat_dim
\x18\x01
\x01
(
\r
:
\x01\x31\"
h
\n\x12\x42\x61
tchNormParameter
\x12\x18\n\x10
use_global_stats
\x18\x01
\x01
(
\x08\x12
$
\n\x17
moving_average_fraction
\x18\x02
\x01
(
\x02
:
\x03\x30
.9
\x12\x12\n\x03\x65
ps
\x18\x03
\x01
(
\x02
:
\x05\x30
.001
\"
]
\n\r
BiasParameter
\x12\x0f\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x01\x31\x12\x13\n\x08
num_axes
\x18\x02
\x01
(
\x05
:
\x01\x31\x12
&
\n\x06\x66
iller
\x18\x03
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\"
L
\n\x18\x43
ontrastiveLossParameter
\x12\x11\n\x06
margin
\x18\x01
\x01
(
\x02
:
\x01\x31\x12\x1d\n\x0e
legacy_version
\x18\x02
\x01
(
\x08
:
\x05\x66\x61
lse
\"\xfc\x03\n\x14\x43
onvolutionParameter
\x12\x12\n\n
num_output
\x18\x01
\x01
(
\r\x12\x17\n\t
bias_term
\x18\x02
\x01
(
\x08
:
\x04
true
\x12\x0b\n\x03
pad
\x18\x03
\x03
(
\r\x12\x13\n\x0b
kernel_size
\x18\x04
\x03
(
\r\x12\x0e\n\x06
stride
\x18\x06
\x03
(
\r\x12\x10\n\x08\x64
ilation
\x18\x12
\x03
(
\r\x12\x10\n\x05
pad_h
\x18\t
\x01
(
\r
:
\x01\x30\x12\x10\n\x05
pad_w
\x18\n
\x01
(
\r
:
\x01\x30\x12\x10\n\x08
kernel_h
\x18\x0b
\x01
(
\r\x12\x10\n\x08
kernel_w
\x18\x0c
\x01
(
\r\x12\x10\n\x08
stride_h
\x18\r
\x01
(
\r\x12\x10\n\x08
stride_w
\x18\x0e
\x01
(
\r\x12\x10\n\x05
group
\x18\x05
\x01
(
\r
:
\x01\x31\x12
-
\n\r
weight_filler
\x18\x07
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12
+
\n\x0b\x62
ias_filler
\x18\x08
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12
;
\n\x06\x65
ngine
\x18\x0f
\x01
(
\x0e\x32\"
.caffe.ConvolutionParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\x12\x0f\n\x04\x61
xis
\x18\x10
\x01
(
\x05
:
\x01\x31\x12\x1e\n\x0f\x66
orce_nd_im2col
\x18\x11
\x01
(
\x08
:
\x05\x66\x61
lse
\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"
0
\n\r
CropParameter
\x12\x0f\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x01\x32\x12\x0e\n\x06
offset
\x18\x02
\x03
(
\r\"\xa4\x02\n\r
DataParameter
\x12\x0e\n\x06
source
\x18\x01
\x01
(
\t\x12\x12\n\n
batch_size
\x18\x04
\x01
(
\r\x12\x14\n\t
rand_skip
\x18\x07
\x01
(
\r
:
\x01\x30\x12\x31\n\x07\x62\x61\x63
kend
\x18\x08
\x01
(
\x0e\x32\x17
.caffe.DataParameter.DB:
\x07
LEVELDB
\x12\x10\n\x05
scale
\x18\x02
\x01
(
\x02
:
\x01\x31\x12\x11\n\t
mean_file
\x18\x03
\x01
(
\t\x12\x14\n\t
crop_size
\x18\x05
\x01
(
\r
:
\x01\x30\x12\x15\n\x06
mirror
\x18\x06
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\"\n\x13\x66
orce_encoded_color
\x18\t
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x13\n\x08
prefetch
\x18\n
\x01
(
\r
:
\x01\x35\"\x1b\n\x02\x44\x42\x12\x0b\n\x07
LEVELDB
\x10\x00\x12\x08\n\x04
LMDB
\x10\x01\"
I
\n\x10\x44
ropoutParameter
\x12\x1a\n\r
dropout_ratio
\x18\x01
\x01
(
\x02
:
\x03\x30
.5
\x12\x19\n\x0b
scale_train
\x18\x02
\x01
(
\x08
:
\x04
true
\"\xa0\x01\n\x12\x44
ummyDataParameter
\x12
+
\n\x0b\x64\x61
ta_filler
\x18\x01
\x03
(
\x0b\x32\x16
.caffe.FillerParameter
\x12\x1f\n\x05
shape
\x18\x06
\x03
(
\x0b\x32\x10
.caffe.BlobShape
\x12\x0b\n\x03
num
\x18\x02
\x03
(
\r\x12\x10\n\x08\x63
hannels
\x18\x03
\x03
(
\r\x12\x0e\n\x06
height
\x18\x04
\x03
(
\r\x12\r\n\x05
width
\x18\x05
\x03
(
\r\"\xa5\x01\n\x10\x45
ltwiseParameter
\x12\x39\n\t
operation
\x18\x01
\x01
(
\x0e\x32
!.caffe.EltwiseParameter.EltwiseOp:
\x03
SUM
\x12\r\n\x05\x63
oeff
\x18\x02
\x03
(
\x02\x12\x1e\n\x10
stable_prod_grad
\x18\x03
\x01
(
\x08
:
\x04
true
\"\'\n\t
EltwiseOp
\x12\x08\n\x04
PROD
\x10\x00\x12\x07\n\x03
SUM
\x10\x01\x12\x07\n\x03
MAX
\x10\x02\"
\n\x0c\x45
LUParameter
\x12\x10\n\x05\x61
lpha
\x18\x01
\x01
(
\x02
:
\x01\x31\"\xac\x01\n\x0e\x45
mbedParameter
\x12\x12\n\n
num_output
\x18\x01
\x01
(
\r\x12\x11\n\t
input_dim
\x18\x02
\x01
(
\r\x12\x17\n\t
bias_term
\x18\x03
\x01
(
\x08
:
\x04
true
\x12
-
\n\r
weight_filler
\x18\x04
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12
+
\n\x0b\x62
ias_filler
\x18\x05
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\"
D
\n\x0c\x45
xpParameter
\x12\x10\n\x04\x62\x61
se
\x18\x01
\x01
(
\x02
:
\x02
-1
\x12\x10\n\x05
scale
\x18\x02
\x01
(
\x02
:
\x01\x31\x12\x10\n\x05
shift
\x18\x03
\x01
(
\x02
:
\x01\x30\"
9
\n\x10\x46
lattenParameter
\x12\x0f\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x01\x31\x12\x14\n\x08\x65
nd_axis
\x18\x02
\x01
(
\x05
:
\x02
-1
\"
O
\n\x11
HDF5DataParameter
\x12\x0e\n\x06
source
\x18\x01
\x01
(
\t\x12\x12\n\n
batch_size
\x18\x02
\x01
(
\r\x12\x16\n\x07
shuffle
\x18\x03
\x01
(
\x08
:
\x05\x66\x61
lse
\"
(
\n\x13
HDF5OutputParameter
\x12\x11\n\t
file_name
\x18\x01
\x01
(
\t\"
^
\n\x12
HingeLossParameter
\x12\x30\n\x04
norm
\x18\x01
\x01
(
\x0e\x32\x1e
.caffe.HingeLossParameter.Norm:
\x02
L1
\"\x16\n\x04
Norm
\x12\x06\n\x02
L1
\x10\x01\x12\x06\n\x02
L2
\x10\x02\"\x97\x02\n\x12
ImageDataParameter
\x12\x0e\n\x06
source
\x18\x01
\x01
(
\t\x12\x15\n\n
batch_size
\x18\x04
\x01
(
\r
:
\x01\x31\x12\x14\n\t
rand_skip
\x18\x07
\x01
(
\r
:
\x01\x30\x12\x16\n\x07
shuffle
\x18\x08
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x15\n\n
new_height
\x18\t
\x01
(
\r
:
\x01\x30\x12\x14\n\t
new_width
\x18\n
\x01
(
\r
:
\x01\x30\x12\x16\n\x08
is_color
\x18\x0b
\x01
(
\x08
:
\x04
true
\x12\x10\n\x05
scale
\x18\x02
\x01
(
\x02
:
\x01\x31\x12\x11\n\t
mean_file
\x18\x03
\x01
(
\t\x12\x14\n\t
crop_size
\x18\x05
\x01
(
\r
:
\x01\x30\x12\x15\n\x06
mirror
\x18\x06
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x15\n\x0b
root_folder
\x18\x0c
\x01
(
\t
:
\x00\"\'\n\x15
InfogainLossParameter
\x12\x0e\n\x06
source
\x18\x01
\x01
(
\t\"\xcb\x01\n\x15
InnerProductParameter
\x12\x12\n\n
num_output
\x18\x01
\x01
(
\r\x12\x17\n\t
bias_term
\x18\x02
\x01
(
\x08
:
\x04
true
\x12
-
\n\r
weight_filler
\x18\x03
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12
+
\n\x0b\x62
ias_filler
\x18\x04
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12\x0f\n\x04\x61
xis
\x18\x05
\x01
(
\x05
:
\x01\x31\x12\x18\n\t
transpose
\x18\x06
\x01
(
\x08
:
\x05\x66\x61
lse
\"
1
\n\x0e
InputParameter
\x12\x1f\n\x05
shape
\x18\x01
\x03
(
\x0b\x32\x10
.caffe.BlobShape
\"
D
\n\x0c
LogParameter
\x12\x10\n\x04\x62\x61
se
\x18\x01
\x01
(
\x02
:
\x02
-1
\x12\x10\n\x05
scale
\x18\x02
\x01
(
\x02
:
\x01\x31\x12\x10\n\x05
shift
\x18\x03
\x01
(
\x02
:
\x01\x30\"\xb8\x02\n\x0c
LRNParameter
\x12\x15\n\n
local_size
\x18\x01
\x01
(
\r
:
\x01\x35\x12\x10\n\x05\x61
lpha
\x18\x02
\x01
(
\x02
:
\x01\x31\x12\x12\n\x04\x62\x65
ta
\x18\x03
\x01
(
\x02
:
\x04\x30
.75
\x12\x44\n\x0b
norm_region
\x18\x04
\x01
(
\x0e\x32\x1e
.caffe.LRNParameter.NormRegion:
\x0f\x41\x43
ROSS_CHANNELS
\x12\x0c\n\x01
k
\x18\x05
\x01
(
\x02
:
\x01\x31\x12\x33\n\x06\x65
ngine
\x18\x06
\x01
(
\x0e\x32\x1a
.caffe.LRNParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\"
5
\n\n
NormRegion
\x12\x13\n\x0f\x41\x43
ROSS_CHANNELS
\x10\x00\x12\x12\n\x0e
WITHIN_CHANNEL
\x10\x01\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"\xbd\x01\n\x13
MemoryDataParameter
\x12\x12\n\n
batch_size
\x18\x01
\x01
(
\r\x12\x10\n\x08\x63
hannels
\x18\x02
\x01
(
\r\x12\x0e\n\x06
height
\x18\x03
\x01
(
\r\x12\r\n\x05
width
\x18\x04
\x01
(
\r\x12
;
\n\x05\x64
type
\x18\x05
\x01
(
\x0e\x32
#.caffe.MemoryDataParameter.DataType:
\x07\x46
LOAT32
\"
$
\n\x08\x44\x61
taType
\x12\x0b\n\x07\x46
LOAT32
\x10\x00\x12\x0b\n\x07\x46
LOAT16
\x10\x01\"
e
\n\x0c
MVNParameter
\x12
\n\x12
normalize_variance
\x18\x01
\x01
(
\x08
:
\x04
true
\x12\x1e\n\x0f\x61\x63
ross_channels
\x18\x02
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x13\n\x03\x65
ps
\x18\x03
\x01
(
\x02
:
\x06\x31\x65
-009
\"
5
\n\x12
ParameterParameter
\x12\x1f\n\x05
shape
\x18\x01
\x01
(
\x0b\x32\x10
.caffe.BlobShape
\"\xa2\x03\n\x10
PoolingParameter
\x12\x35\n\x04
pool
\x18\x01
\x01
(
\x0e\x32\"
.caffe.PoolingParameter.PoolMethod:
\x03
MAX
\x12\x0e\n\x03
pad
\x18\x04
\x01
(
\r
:
\x01\x30\x12\x10\n\x05
pad_h
\x18\t
\x01
(
\r
:
\x01\x30\x12\x10\n\x05
pad_w
\x18\n
\x01
(
\r
:
\x01\x30\x12\x13\n\x0b
kernel_size
\x18\x02
\x01
(
\r\x12\x10\n\x08
kernel_h
\x18\x05
\x01
(
\r\x12\x10\n\x08
kernel_w
\x18\x06
\x01
(
\r\x12\x11\n\x06
stride
\x18\x03
\x01
(
\r
:
\x01\x31\x12\x10\n\x08
stride_h
\x18\x07
\x01
(
\r\x12\x10\n\x08
stride_w
\x18\x08
\x01
(
\r\x12\x37\n\x06\x65
ngine
\x18\x0b
\x01
(
\x0e\x32\x1e
.caffe.PoolingParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\x12\x1d\n\x0e
global_pooling
\x18\x0c
\x01
(
\x08
:
\x05\x66\x61
lse
\"
.
\n\n
PoolMethod
\x12\x07\n\x03
MAX
\x10\x00\x12\x07\n\x03\x41
VE
\x10\x01\x12\x0e\n\n
STOCHASTIC
\x10\x02\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"
Y
\n\x13
ROIPoolingParameter
\x12\x13\n\x08
pooled_h
\x18\x01
\x01
(
\r
:
\x01\x30\x12\x13\n\x08
pooled_w
\x18\x02
\x01
(
\r
:
\x01\x30\x12\x18\n\r
spatial_scale
\x18\x03
\x01
(
\x02
:
\x01\x31\"
F
\n\x0e
PowerParameter
\x12\x10\n\x05
power
\x18\x01
\x01
(
\x02
:
\x01\x31\x12\x10\n\x05
scale
\x18\x02
\x01
(
\x02
:
\x01\x31\x12\x10\n\x05
shift
\x18\x03
\x01
(
\x02
:
\x01\x30\"
g
\n\x0f
PythonParameter
\x12\x0e\n\x06
module
\x18\x01
\x01
(
\t\x12\r\n\x05
layer
\x18\x02
\x01
(
\t\x12\x13\n\t
param_str
\x18\x03
\x01
(
\t
:
\x00\x12
\n\x11
share_in_parallel
\x18\x04
\x01
(
\x08
:
\x05\x66\x61
lse
\"\xad\x01\n\x12
ReductionParameter
\x12
=
\n\t
operation
\x18\x01
\x01
(
\x0e\x32
%
.caffe.ReductionParameter.ReductionOp:
\x03
SUM
\x12\x0f\n\x04\x61
xis
\x18\x02
\x01
(
\x05
:
\x01\x30\x12\x10\n\x05\x63
oeff
\x18\x03
\x01
(
\x02
:
\x01\x31\"
5
\n\x0b
ReductionOp
\x12\x07\n\x03
SUM
\x10\x01\x12\x08\n\x04\x41
SUM
\x10\x02\x12\t\n\x05
SUMSQ
\x10\x03\x12\x08\n\x04
MEAN
\x10\x04\"\x8d\x01\n\r
ReLUParameter
\x12\x19\n\x0e
negative_slope
\x18\x01
\x01
(
\x02
:
\x01\x30\x12\x34\n\x06\x65
ngine
\x18\x02
\x01
(
\x0e\x32\x1b
.caffe.ReLUParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"
Z
\n\x10
ReshapeParameter
\x12\x1f\n\x05
shape
\x18\x01
\x01
(
\x0b\x32\x10
.caffe.BlobShape
\x12\x0f\n\x04\x61
xis
\x18\x02
\x01
(
\x05
:
\x01\x30\x12\x14\n\x08
num_axes
\x18\x03
\x01
(
\x05
:
\x02
-1
\"\xa5\x01\n\x0e
ScaleParameter
\x12\x0f\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x01\x31\x12\x13\n\x08
num_axes
\x18\x02
\x01
(
\x05
:
\x01\x31\x12
&
\n\x06\x66
iller
\x18\x03
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12\x18\n\t
bias_term
\x18\x04
\x01
(
\x08
:
\x05\x66\x61
lse
\x12
+
\n\x0b\x62
ias_filler
\x18\x05
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\"
x
\n\x10
SigmoidParameter
\x12\x37\n\x06\x65
ngine
\x18\x01
\x01
(
\x0e\x32\x1e
.caffe.SigmoidParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"
L
\n\x0e
SliceParameter
\x12\x0f\n\x04\x61
xis
\x18\x03
\x01
(
\x05
:
\x01\x31\x12\x13\n\x0b
slice_point
\x18\x02
\x03
(
\r\x12\x14\n\t
slice_dim
\x18\x01
\x01
(
\r
:
\x01\x31\"\x89\x01\n\x10
SoftmaxParameter
\x12\x37\n\x06\x65
ngine
\x18\x01
\x01
(
\x0e\x32\x1e
.caffe.SoftmaxParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\x12\x0f\n\x04\x61
xis
\x18\x02
\x01
(
\x05
:
\x01\x31\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"
r
\n\r
TanHParameter
\x12\x34\n\x06\x65
ngine
\x18\x01
\x01
(
\x0e\x32\x1b
.caffe.TanHParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"
T
\n\r
TileParameter
\x12\x0f\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x01\x31\x12\r\n\x05
tiles
\x18\x02
\x01
(
\x05\x12
#
\n\t
multiples
\x18\x03
\x01
(
\x0b\x32\x10
.caffe.BlobShape
\"
*
\n\x12
ThresholdParameter
\x12\x14\n\t
threshold
\x18\x01
\x01
(
\x02
:
\x01\x30\"\xc1\x02\n\x13
WindowDataParameter
\x12\x0e\n\x06
source
\x18\x01
\x01
(
\t\x12\x10\n\x05
scale
\x18\x02
\x01
(
\x02
:
\x01\x31\x12\x11\n\t
mean_file
\x18\x03
\x01
(
\t\x12\x12\n\n
batch_size
\x18\x04
\x01
(
\r\x12\x14\n\t
crop_size
\x18\x05
\x01
(
\r
:
\x01\x30\x12\x15\n\x06
mirror
\x18\x06
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x19\n\x0c\x66
g_threshold
\x18\x07
\x01
(
\x02
:
\x03\x30
.5
\x12\x19\n\x0c\x62
g_threshold
\x18\x08
\x01
(
\x02
:
\x03\x30
.5
\x12\x19\n\x0b\x66
g_fraction
\x18\t
\x01
(
\x02
:
\x04\x30
.25
\x12\x16\n\x0b\x63
ontext_pad
\x18\n
\x01
(
\r
:
\x01\x30\x12\x17\n\t
crop_mode
\x18\x0b
\x01
(
\t
:
\x04
warp
\x12\x1b\n\x0c\x63\x61\x63
he_images
\x18\x0c
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x15\n\x0b
root_folder
\x18\r
\x01
(
\t
:
\x00\"\xeb\x01\n\x0c
SPPParameter
\x12\x16\n\x0e
pyramid_height
\x18\x01
\x01
(
\r\x12\x31\n\x04
pool
\x18\x02
\x01
(
\x0e\x32\x1e
.caffe.SPPParameter.PoolMethod:
\x03
MAX
\x12\x33\n\x06\x65
ngine
\x18\x06
\x01
(
\x0e\x32\x1a
.caffe.SPPParameter.Engine:
\x07\x44\x45\x46\x41
ULT
\"
.
\n\n
PoolMethod
\x12\x07\n\x03
MAX
\x10\x00\x12\x07\n\x03\x41
VE
\x10\x01\x12\x0e\n\n
STOCHASTIC
\x10\x02\"
+
\n\x06\x45
ngine
\x12\x0b\n\x07\x44\x45\x46\x41
ULT
\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43
UDNN
\x10\x02\"\xe0\x13\n\x10
V1LayerParameter
\x12\x0e\n\x06\x62
ottom
\x18\x02
\x03
(
\t\x12\x0b\n\x03
top
\x18\x03
\x03
(
\t\x12\x0c\n\x04
name
\x18\x04
\x01
(
\t\x12
$
\n\x07
include
\x18
\x03
(
\x0b\x32\x13
.caffe.NetStateRule
\x12
$
\n\x07\x65
xclude
\x18
!
\x03
(
\x0b\x32\x13
.caffe.NetStateRule
\x12
/
\n\x04
type
\x18\x05
\x01
(
\x0e\x32
!.caffe.V1LayerParameter.LayerType
\x12\x1f\n\x05\x62
lobs
\x18\x06
\x03
(
\x0b\x32\x10
.caffe.BlobProto
\x12\x0e\n\x05
param
\x18\xe9\x07
\x03
(
\t\x12
>
\n\x0f\x62
lob_share_mode
\x18\xea\x07
\x03
(
\x0e\x32
$.caffe.V1LayerParameter.DimCheckMode
\x12\x10\n\x08\x62
lobs_lr
\x18\x07
\x03
(
\x02\x12\x14\n\x0c
weight_decay
\x18\x08
\x03
(
\x02\x12\x13\n\x0b
loss_weight
\x18
#
\x03
(
\x02\x12\x30\n\x0e\x61\x63\x63
uracy_param
\x18\x1b
\x01
(
\x0b\x32\x18
.caffe.AccuracyParameter
\x12
,
\n\x0c\x61
rgmax_param
\x18\x17
\x01
(
\x0b\x32\x16
.caffe.ArgMaxParameter
\x12
,
\n\x0c\x63
oncat_param
\x18\t
\x01
(
\x0b\x32\x16
.caffe.ConcatParameter
\x12
?
\n\x16\x63
ontrastive_loss_param
\x18
(
\x01
(
\x0b\x32\x1f
.caffe.ContrastiveLossParameter
\x12\x36\n\x11\x63
onvolution_param
\x18\n
\x01
(
\x0b\x32\x1b
.caffe.ConvolutionParameter
\x12
(
\n\n
data_param
\x18\x0b
\x01
(
\x0b\x32\x14
.caffe.DataParameter
\x12
.
\n\r
dropout_param
\x18\x0c
\x01
(
\x0b\x32\x17
.caffe.DropoutParameter
\x12\x33\n\x10\x64
ummy_data_param
\x18\x1a
\x01
(
\x0b\x32\x19
.caffe.DummyDataParameter
\x12
.
\n\r
eltwise_param
\x18\x18
\x01
(
\x0b\x32\x17
.caffe.EltwiseParameter
\x12
&
\n\t
exp_param
\x18
)
\x01
(
\x0b\x32\x13
.caffe.ExpParameter
\x12\x31\n\x0f
hdf5_data_param
\x18\r
\x01
(
\x0b\x32\x18
.caffe.HDF5DataParameter
\x12\x35\n\x11
hdf5_output_param
\x18\x0e
\x01
(
\x0b\x32\x1a
.caffe.HDF5OutputParameter
\x12\x33\n\x10
hinge_loss_param
\x18\x1d
\x01
(
\x0b\x32\x19
.caffe.HingeLossParameter
\x12\x33\n\x10
image_data_param
\x18\x0f
\x01
(
\x0b\x32\x19
.caffe.ImageDataParameter
\x12\x39\n\x13
infogain_loss_param
\x18\x10
\x01
(
\x0b\x32\x1c
.caffe.InfogainLossParameter
\x12\x39\n\x13
inner_product_param
\x18\x11
\x01
(
\x0b\x32\x1c
.caffe.InnerProductParameter
\x12
&
\n\t
lrn_param
\x18\x12
\x01
(
\x0b\x32\x13
.caffe.LRNParameter
\x12\x35\n\x11
memory_data_param
\x18\x16
\x01
(
\x0b\x32\x1a
.caffe.MemoryDataParameter
\x12
&
\n\t
mvn_param
\x18\"
\x01
(
\x0b\x32\x13
.caffe.MVNParameter
\x12
.
\n\r
pooling_param
\x18\x13
\x01
(
\x0b\x32\x17
.caffe.PoolingParameter
\x12
*
\n\x0b
power_param
\x18\x15
\x01
(
\x0b\x32\x15
.caffe.PowerParameter
\x12
(
\n\n
relu_param
\x18\x1e
\x01
(
\x0b\x32\x14
.caffe.ReLUParameter
\x12
.
\n\r
sigmoid_param
\x18
&
\x01
(
\x0b\x32\x17
.caffe.SigmoidParameter
\x12
.
\n\r
softmax_param
\x18\'
\x01
(
\x0b\x32\x17
.caffe.SoftmaxParameter
\x12
*
\n\x0b
slice_param
\x18\x1f
\x01
(
\x0b\x32\x15
.caffe.SliceParameter
\x12
(
\n\n
tanh_param
\x18
%
\x01
(
\x0b\x32\x14
.caffe.TanHParameter
\x12\x32\n\x0f
threshold_param
\x18\x19
\x01
(
\x0b\x32\x19
.caffe.ThresholdParameter
\x12\x35\n\x11
window_data_param
\x18\x14
\x01
(
\x0b\x32\x1a
.caffe.WindowDataParameter
\x12\x37\n\x0f
transform_param
\x18
$
\x01
(
\x0b\x32\x1e
.caffe.TransformationParameter
\x12
(
\n\n
loss_param
\x18
*
\x01
(
\x0b\x32\x14
.caffe.LossParameter
\x12
&
\n\x05
layer
\x18\x01
\x01
(
\x0b\x32\x17
.caffe.V0LayerParameter
\"\xd8\x04\n\t
LayerType
\x12\x08\n\x04
NONE
\x10\x00\x12\n\n\x06\x41\x42
SVAL
\x10
#
\x12\x0c\n\x08\x41\x43\x43
URACY
\x10\x01\x12\n\n\x06\x41
RGMAX
\x10\x1e\x12\x08\n\x04\x42
NLL
\x10\x02\x12\n\n\x06\x43
ONCAT
\x10\x03\x12\x14\n\x10\x43
ONTRASTIVE_LOSS
\x10
%
\x12\x0f\n\x0b\x43
ONVOLUTION
\x10\x04\x12\x08\n\x04\x44\x41
TA
\x10\x05\x12\x11\n\r
DECONVOLUTION
\x10\'\x12\x0b\n\x07\x44
ROPOUT
\x10\x06\x12\x0e\n\n
DUMMY_DATA
\x10
\x12\x12\n\x0e\x45
UCLIDEAN_LOSS
\x10\x07\x12\x0b\n\x07\x45
LTWISE
\x10\x19\x12\x07\n\x03\x45
XP
\x10
&
\x12\x0b\n\x07\x46
LATTEN
\x10\x08\x12\r\n\t
HDF5_DATA
\x10\t\x12\x0f\n\x0b
HDF5_OUTPUT
\x10\n\x12\x0e\n\n
HINGE_LOSS
\x10\x1c\x12\n\n\x06
IM2COL
\x10\x0b\x12\x0e\n\n
IMAGE_DATA
\x10\x0c\x12\x11\n\r
INFOGAIN_LOSS
\x10\r\x12\x11\n\r
INNER_PRODUCT
\x10\x0e\x12\x07\n\x03
LRN
\x10\x0f\x12\x0f\n\x0b
MEMORY_DATA
\x10\x1d\x12\x1d\n\x19
MULTINOMIAL_LOGISTIC_LOSS
\x10\x10\x12\x07\n\x03
MVN
\x10\"\x12\x0b\n\x07
POOLING
\x10\x11\x12\t\n\x05
POWER
\x10\x1a\x12\x08\n\x04
RELU
\x10\x12\x12\x0b\n\x07
SIGMOID
\x10\x13\x12\x1e\n\x1a
SIGMOID_CROSS_ENTROPY_LOSS
\x10\x1b\x12\x0b\n\x07
SILENCE
\x10
$
\x12\x0b\n\x07
SOFTMAX
\x10\x14\x12\x10\n\x0c
SOFTMAX_LOSS
\x10\x15\x12\t\n\x05
SPLIT
\x10\x16\x12\t\n\x05
SLICE
\x10
!
\x12\x08\n\x04
TANH
\x10\x17\x12\x0f\n\x0b
WINDOW_DATA
\x10\x18\x12\r\n\t
THRESHOLD
\x10\x1f\"
*
\n\x0c\x44
imCheckMode
\x12\n\n\x06
STRICT
\x10\x00\x12\x0e\n\n
PERMISSIVE
\x10\x01\"\xfd\x07\n\x10
V0LayerParameter
\x12\x0c\n\x04
name
\x18\x01
\x01
(
\t\x12\x0c\n\x04
type
\x18\x02
\x01
(
\t\x12\x12\n\n
num_output
\x18\x03
\x01
(
\r\x12\x16\n\x08\x62
iasterm
\x18\x04
\x01
(
\x08
:
\x04
true
\x12
-
\n\r
weight_filler
\x18\x05
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12
+
\n\x0b\x62
ias_filler
\x18\x06
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12\x0e\n\x03
pad
\x18\x07
\x01
(
\r
:
\x01\x30\x12\x12\n\n
kernelsize
\x18\x08
\x01
(
\r\x12\x10\n\x05
group
\x18\t
\x01
(
\r
:
\x01\x31\x12\x11\n\x06
stride
\x18\n
\x01
(
\r
:
\x01\x31\x12\x35\n\x04
pool
\x18\x0b
\x01
(
\x0e\x32\"
.caffe.V0LayerParameter.PoolMethod:
\x03
MAX
\x12\x1a\n\r
dropout_ratio
\x18\x0c
\x01
(
\x02
:
\x03\x30
.5
\x12\x15\n\n
local_size
\x18\r
\x01
(
\r
:
\x01\x35\x12\x10\n\x05\x61
lpha
\x18\x0e
\x01
(
\x02
:
\x01\x31\x12\x12\n\x04\x62\x65
ta
\x18\x0f
\x01
(
\x02
:
\x04\x30
.75
\x12\x0c\n\x01
k
\x18\x16
\x01
(
\x02
:
\x01\x31\x12\x0e\n\x06
source
\x18\x10
\x01
(
\t\x12\x10\n\x05
scale
\x18\x11
\x01
(
\x02
:
\x01\x31\x12\x10\n\x08
meanfile
\x18\x12
\x01
(
\t\x12\x11\n\t
batchsize
\x18\x13
\x01
(
\r\x12\x13\n\x08\x63
ropsize
\x18\x14
\x01
(
\r
:
\x01\x30\x12\x15\n\x06
mirror
\x18\x15
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x1f\n\x05\x62
lobs
\x18\x32
\x03
(
\x0b\x32\x10
.caffe.BlobProto
\x12\x10\n\x08\x62
lobs_lr
\x18\x33
\x03
(
\x02\x12\x14\n\x0c
weight_decay
\x18\x34
\x03
(
\x02\x12\x14\n\t
rand_skip
\x18\x35
\x01
(
\r
:
\x01\x30\x12\x1d\n\x10\x64\x65
t_fg_threshold
\x18\x36
\x01
(
\x02
:
\x03\x30
.5
\x12\x1d\n\x10\x64\x65
t_bg_threshold
\x18\x37
\x01
(
\x02
:
\x03\x30
.5
\x12\x1d\n\x0f\x64\x65
t_fg_fraction
\x18\x38
\x01
(
\x02
:
\x04\x30
.25
\x12\x1a\n\x0f\x64\x65
t_context_pad
\x18
:
\x01
(
\r
:
\x01\x30\x12\x1b\n\r
det_crop_mode
\x18
;
\x01
(
\t
:
\x04
warp
\x12\x12\n\x07
new_num
\x18
<
\x01
(
\x05
:
\x01\x30\x12\x17\n\x0c
new_channels
\x18
=
\x01
(
\x05
:
\x01\x30\x12\x15\n\n
new_height
\x18
>
\x01
(
\x05
:
\x01\x30\x12\x14\n\t
new_width
\x18
?
\x01
(
\x05
:
\x01\x30\x12\x1d\n\x0e
shuffle_images
\x18
@
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x15\n\n
concat_dim
\x18\x41
\x01
(
\r
:
\x01\x31\x12\x36\n\x11
hdf5_output_param
\x18\xe9\x07
\x01
(
\x0b\x32\x1a
.caffe.HDF5OutputParameter
\"
.
\n\n
PoolMethod
\x12\x07\n\x03
MAX
\x10\x00\x12\x07\n\x03\x41
VE
\x10\x01\x12\x0e\n\n
STOCHASTIC
\x10\x02\"
W
\n\x0e
PReLUParameter
\x12
&
\n\x06\x66
iller
\x18\x01
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12\x1d\n\x0e\x63
hannel_shared
\x18\x02
\x01
(
\x08
:
\x05\x66\x61
lse
\"
)
\n\x15
SmoothL1LossParameter
\x12\x10\n\x05
sigma
\x18\x01
\x01
(
\x02
:
\x01\x31\"
H
\n\x0c
MPIParameter
\x12\x0f\n\x04
root
\x18\x01
\x01
(
\r
:
\x01\x30\x12\x12\n\x07\x63
omm_id
\x18\x02
\x01
(
\x04
:
\x01\x30\x12\x13\n\x08
group_id
\x18\x03
\x01
(
\x04
:
\x01\x30\"
!
\n\x10
PermuteParameter
\x12\r\n\x05
order
\x18\x01
\x03
(
\r\"\x93\x01\n\x12
NormalizeParameter
\x12\x1c\n\x0e\x61\x63
ross_spatial
\x18\x01
\x01
(
\x08
:
\x04
true
\x12
,
\n\x0c
scale_filler
\x18\x02
\x01
(
\x0b\x32\x16
.caffe.FillerParameter
\x12\x1c\n\x0e\x63
hannel_shared
\x18\x03
\x01
(
\x08
:
\x04
true
\x12\x13\n\x03\x65
ps
\x18\x04
\x01
(
\x02
:
\x06\x31\x65
-010
\"
_
\n\x11
ParallelParameter
\x12\x16\n\x07
shuffle
\x18\x01
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x18\n\t
node_step
\x18\x02
\x01
(
\x08
:
\x05\x66\x61
lse
\x12\x18\n\t
partition
\x18\x03
\x01
(
\x08
:
\x05\x66\x61
lse
\"
R
\n\x0f
ResizeParameter
\x12\x1f\n\x05
shape
\x18\x01
\x01
(
\x0b\x32\x10
.caffe.BlobShape
\x12\x0e\n\x02\x66
x
\x18\x02
\x01
(
\x02
:
\x02
-1
\x12\x0e\n\x02\x66
y
\x18\x03
\x01
(
\x02
:
\x02
-1
\"\'\n\x13\x45
xpandDimsParameter
\x12\x10\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x02
-1
\"\x90\x02\n\x11
ProposalParameter
\x12\x0e\n\x06
stride
\x18\x01
\x03
(
\x05\x12\r\n\x05
ratio
\x18\x02
\x03
(
\x02\x12\r\n\x05
scale
\x18\x03
\x03
(
\x02\x12\x1b\n\r
pre_nms_top_n
\x18\x04
\x01
(
\r
:
\x04\x36\x30\x30\x30\x12\x1b\n\x0e
post_nms_top_n
\x18\x05
\x01
(
\r
:
\x03\x33\x30\x30\x12\x17\n\n
nms_thresh
\x18\x06
\x01
(
\x02
:
\x03\x30
.7
\x12\x14\n\x08
min_size
\x18\x07
\x01
(
\r
:
\x02\x31\x36\x12\x14\n\t
min_level
\x18\x08
\x01
(
\x05
:
\x01\x32\x12\x14\n\t
max_level
\x18\t
\x01
(
\x05
:
\x01\x35\x12\x1c\n\x0f\x63\x61
nonical_scale
\x18\n
\x01
(
\x05
:
\x03\x32\x32\x34\x12\x1a\n\x0f\x63\x61
nonical_level
\x18\x0b
\x01
(
\x05
:
\x01\x34\"\xa6\x01\n\x14\x42\x61
tchRenormParameter
\x12\x18\n\x10
use_global_stats
\x18\x01
\x01
(
\x08\x12
$
\n\x17
moving_average_fraction
\x18\x02
\x01
(
\x02
:
\x03\x30
.9
\x12\x12\n\x03\x65
ps
\x18\x03
\x01
(
\x02
:
\x05\x30
.001
\x12\x10\n\x05
r_max
\x18\x04
\x01
(
\x02
:
\x01\x33\x12\x10\n\x05\x64
_max
\x18\x05
\x01
(
\x02
:
\x01\x35\x12\x16\n\x07
t_delta
\x18\x06
\x01
(
\x02
:
\x05\x30
.001
\"
?
\n\x14\x44\x65
nseConcatParameter
\x12\x0f\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x01\x31\x12\x16\n\x0b
growth_rate
\x18\x02
\x01
(
\x05
:
\x01\x30\"
c
\n\x12\x46
ocalLossParameter
\x12\x12\n\x05\x61
lpha
\x18\x01
\x01
(
\x02
:
\x03\x30
.5
\x12\x10\n\x05
gamma
\x18\x02
\x01
(
\x02
:
\x01\x30\x12\x13\n\x03\x65
ps
\x18\x03
\x01
(
\x02
:
\x06\x31\x65
-010
\x12\x12\n\x06
neg_id
\x18\x04
\x01
(
\x05
:
\x02
-1
\"\"\n\x0f
GatherParameter
\x12\x0f\n\x04\x61
xis
\x18\x01
\x01
(
\x05
:
\x01\x30\"
{
\n\x12
GroupNormParameter
\x12\x18\n\x10
use_global_stats
\x18\x01
\x01
(
\x08\x12
$
\n\x17
moving_average_fraction
\x18\x02
\x01
(
\x02
:
\x03\x30
.9
\x12\x12\n\x03\x65
ps
\x18\x03
\x01
(
\x02
:
\x05\x30
.001
\x12\x11\n\x05
group
\x18\x05
\x01
(
\r
:
\x02\x33\x32
*
\x1c\n\x05
Phase
\x12\t\n\x05
TRAIN
\x10\x00\x12\x08\n\x04
TEST
\x10\x01
'
)
)
_sym_db
.
RegisterFileDescriptor
(
DESCRIPTOR
)
...
...
@@ -40,8 +40,8 @@ _PHASE = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
17
463
,
serialized_end
=
17
491
,
serialized_start
=
17
642
,
serialized_end
=
17
670
,
)
_sym_db
.
RegisterEnumDescriptor
(
_PHASE
)
...
...
@@ -209,8 +209,8 @@ _LOSSPARAMETER_NORMALIZATIONMODE = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
65
25
,
serialized_end
=
66
01
,
serialized_start
=
65
79
,
serialized_end
=
66
55
,
)
_sym_db
.
RegisterEnumDescriptor
(
_LOSSPARAMETER_NORMALIZATIONMODE
)
...
...
@@ -235,8 +235,8 @@ _CONVOLUTIONPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
7
564
,
serialized_end
=
76
07
,
serialized_start
=
7
618
,
serialized_end
=
76
61
,
)
_sym_db
.
RegisterEnumDescriptor
(
_CONVOLUTIONPARAMETER_ENGINE
)
...
...
@@ -257,8 +257,8 @@ _DATAPARAMETER_DB = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
79
25
,
serialized_end
=
7952
,
serialized_start
=
79
79
,
serialized_end
=
8006
,
)
_sym_db
.
RegisterEnumDescriptor
(
_DATAPARAMETER_DB
)
...
...
@@ -283,8 +283,8 @@ _ELTWISEPARAMETER_ELTWISEOP = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
83
19
,
serialized_end
=
8
358
,
serialized_start
=
83
73
,
serialized_end
=
8
412
,
)
_sym_db
.
RegisterEnumDescriptor
(
_ELTWISEPARAMETER_ELTWISEOP
)
...
...
@@ -305,8 +305,8 @@ _HINGELOSSPARAMETER_NORM = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
8
893
,
serialized_end
=
89
15
,
serialized_start
=
8
947
,
serialized_end
=
89
69
,
)
_sym_db
.
RegisterEnumDescriptor
(
_HINGELOSSPARAMETER_NORM
)
...
...
@@ -327,8 +327,8 @@ _LRNPARAMETER_NORMREGION = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
9
782
,
serialized_end
=
98
35
,
serialized_start
=
9
836
,
serialized_end
=
98
89
,
)
_sym_db
.
RegisterEnumDescriptor
(
_LRNPARAMETER_NORMREGION
)
...
...
@@ -353,8 +353,8 @@ _LRNPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
7
564
,
serialized_end
=
76
07
,
serialized_start
=
7
618
,
serialized_end
=
76
61
,
)
_sym_db
.
RegisterEnumDescriptor
(
_LRNPARAMETER_ENGINE
)
...
...
@@ -375,8 +375,8 @@ _MEMORYDATAPARAMETER_DATATYPE = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
100
36
,
serialized_end
=
10
072
,
serialized_start
=
100
90
,
serialized_end
=
10
126
,
)
_sym_db
.
RegisterEnumDescriptor
(
_MEMORYDATAPARAMETER_DATATYPE
)
...
...
@@ -401,8 +401,8 @@ _POOLINGPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
10
560
,
serialized_end
=
106
06
,
serialized_start
=
10
614
,
serialized_end
=
106
60
,
)
_sym_db
.
RegisterEnumDescriptor
(
_POOLINGPARAMETER_POOLMETHOD
)
...
...
@@ -427,8 +427,8 @@ _POOLINGPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
7
564
,
serialized_end
=
76
07
,
serialized_start
=
7
618
,
serialized_end
=
76
61
,
)
_sym_db
.
RegisterEnumDescriptor
(
_POOLINGPARAMETER_ENGINE
)
...
...
@@ -457,8 +457,8 @@ _REDUCTIONPARAMETER_REDUCTIONOP = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
110
42
,
serialized_end
=
11
095
,
serialized_start
=
110
96
,
serialized_end
=
11
149
,
)
_sym_db
.
RegisterEnumDescriptor
(
_REDUCTIONPARAMETER_REDUCTIONOP
)
...
...
@@ -483,8 +483,8 @@ _RELUPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
7
564
,
serialized_end
=
76
07
,
serialized_start
=
7
618
,
serialized_end
=
76
61
,
)
_sym_db
.
RegisterEnumDescriptor
(
_RELUPARAMETER_ENGINE
)
...
...
@@ -509,8 +509,8 @@ _SIGMOIDPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
7
564
,
serialized_end
=
76
07
,
serialized_start
=
7
618
,
serialized_end
=
76
61
,
)
_sym_db
.
RegisterEnumDescriptor
(
_SIGMOIDPARAMETER_ENGINE
)
...
...
@@ -535,8 +535,8 @@ _SOFTMAXPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
7
564
,
serialized_end
=
76
07
,
serialized_start
=
7
618
,
serialized_end
=
76
61
,
)
_sym_db
.
RegisterEnumDescriptor
(
_SOFTMAXPARAMETER_ENGINE
)
...
...
@@ -561,8 +561,8 @@ _TANHPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
7
564
,
serialized_end
=
76
07
,
serialized_start
=
7
618
,
serialized_end
=
76
61
,
)
_sym_db
.
RegisterEnumDescriptor
(
_TANHPARAMETER_ENGINE
)
...
...
@@ -587,8 +587,8 @@ _SPPPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
10
560
,
serialized_end
=
106
06
,
serialized_start
=
10
614
,
serialized_end
=
106
60
,
)
_sym_db
.
RegisterEnumDescriptor
(
_SPPPARAMETER_POOLMETHOD
)
...
...
@@ -613,8 +613,8 @@ _SPPPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
7
564
,
serialized_end
=
76
07
,
serialized_start
=
7
618
,
serialized_end
=
76
61
,
)
_sym_db
.
RegisterEnumDescriptor
(
_SPPPARAMETER_ENGINE
)
...
...
@@ -787,8 +787,8 @@ _V1LAYERPARAMETER_LAYERTYPE = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
145
34
,
serialized_end
=
151
34
,
serialized_start
=
145
88
,
serialized_end
=
151
88
,
)
_sym_db
.
RegisterEnumDescriptor
(
_V1LAYERPARAMETER_LAYERTYPE
)
...
...
@@ -835,8 +835,8 @@ _V0LAYERPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
],
containing_type
=
None
,
options
=
None
,
serialized_start
=
10
560
,
serialized_end
=
106
06
,
serialized_start
=
10
614
,
serialized_end
=
106
60
,
)
_sym_db
.
RegisterEnumDescriptor
(
_V0LAYERPARAMETER_POOLMETHOD
)
...
...
@@ -2261,6 +2261,13 @@ _LAYERPARAMETER = _descriptor.Descriptor(
message_type
=
None
,
enum_type
=
None
,
containing_type
=
None
,
is_extension
=
False
,
extension_scope
=
None
,
options
=
None
),
_descriptor
.
FieldDescriptor
(
name
=
'group_norm_param'
,
full_name
=
'caffe.LayerParameter.group_norm_param'
,
index
=
71
,
number
=
166
,
type
=
11
,
cpp_type
=
10
,
label
=
1
,
has_default_value
=
False
,
default_value
=
None
,
message_type
=
None
,
enum_type
=
None
,
containing_type
=
None
,
is_extension
=
False
,
extension_scope
=
None
,
options
=
None
),
],
extensions
=
[
],
...
...
@@ -2273,7 +2280,7 @@ _LAYERPARAMETER = _descriptor.Descriptor(
oneofs
=
[
],
serialized_start
=
2834
,
serialized_end
=
6
055
,
serialized_end
=
6
109
,
)
...
...
@@ -2372,8 +2379,8 @@ _TRANSFORMATIONPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
6
058
,
serialized_end
=
6
353
,
serialized_start
=
6
112
,
serialized_end
=
6
407
,
)
...
...
@@ -2402,8 +2409,8 @@ _LOSSPARAMETER_EXPANDDIMSPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
6
484
,
serialized_end
=
65
23
,
serialized_start
=
6
538
,
serialized_end
=
65
77
,
)
_LOSSPARAMETER
=
_descriptor
.
Descriptor
(
...
...
@@ -2446,8 +2453,8 @@ _LOSSPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
6
356
,
serialized_end
=
66
01
,
serialized_start
=
6
410
,
serialized_end
=
66
55
,
)
...
...
@@ -2490,8 +2497,8 @@ _ACCURACYPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
66
03
,
serialized_end
=
6
679
,
serialized_start
=
66
57
,
serialized_end
=
6
733
,
)
...
...
@@ -2534,8 +2541,8 @@ _ARGMAXPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
6
681
,
serialized_end
=
6
758
,
serialized_start
=
6
735
,
serialized_end
=
6
812
,
)
...
...
@@ -2571,8 +2578,8 @@ _CONCATPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
6
760
,
serialized_end
=
68
17
,
serialized_start
=
6
814
,
serialized_end
=
68
71
,
)
...
...
@@ -2615,8 +2622,8 @@ _BATCHNORMPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
68
19
,
serialized_end
=
69
23
,
serialized_start
=
68
73
,
serialized_end
=
69
77
,
)
...
...
@@ -2659,8 +2666,8 @@ _BIASPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
69
25
,
serialized_end
=
70
18
,
serialized_start
=
69
79
,
serialized_end
=
70
72
,
)
...
...
@@ -2696,8 +2703,8 @@ _CONTRASTIVELOSSPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
70
20
,
serialized_end
=
7
096
,
serialized_start
=
70
74
,
serialized_end
=
7
150
,
)
...
...
@@ -2846,8 +2853,8 @@ _CONVOLUTIONPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
7
099
,
serialized_end
=
76
07
,
serialized_start
=
7
153
,
serialized_end
=
76
61
,
)
...
...
@@ -2883,8 +2890,8 @@ _CROPPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
76
09
,
serialized_end
=
7
657
,
serialized_start
=
76
63
,
serialized_end
=
7
711
,
)
...
...
@@ -2977,8 +2984,8 @@ _DATAPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
7
660
,
serialized_end
=
7952
,
serialized_start
=
7
714
,
serialized_end
=
8006
,
)
...
...
@@ -3014,8 +3021,8 @@ _DROPOUTPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
7954
,
serialized_end
=
80
27
,
serialized_start
=
8008
,
serialized_end
=
80
81
,
)
...
...
@@ -3079,8 +3086,8 @@ _DUMMYDATAPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
80
30
,
serialized_end
=
8
190
,
serialized_start
=
80
84
,
serialized_end
=
8
244
,
)
...
...
@@ -3124,8 +3131,8 @@ _ELTWISEPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
8
193
,
serialized_end
=
8
358
,
serialized_start
=
8
247
,
serialized_end
=
8
412
,
)
...
...
@@ -3154,8 +3161,8 @@ _ELUPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
8
360
,
serialized_end
=
8
392
,
serialized_start
=
8
414
,
serialized_end
=
8
446
,
)
...
...
@@ -3212,8 +3219,8 @@ _EMBEDPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
8
395
,
serialized_end
=
8
567
,
serialized_start
=
8
449
,
serialized_end
=
8
621
,
)
...
...
@@ -3256,8 +3263,8 @@ _EXPPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
8
569
,
serialized_end
=
86
37
,
serialized_start
=
8
623
,
serialized_end
=
86
91
,
)
...
...
@@ -3293,8 +3300,8 @@ _FLATTENPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
86
39
,
serialized_end
=
8
696
,
serialized_start
=
86
93
,
serialized_end
=
8
750
,
)
...
...
@@ -3337,8 +3344,8 @@ _HDF5DATAPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
8
698
,
serialized_end
=
8
777
,
serialized_start
=
8
752
,
serialized_end
=
8
831
,
)
...
...
@@ -3367,8 +3374,8 @@ _HDF5OUTPUTPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
8
779
,
serialized_end
=
88
19
,
serialized_start
=
8
833
,
serialized_end
=
88
73
,
)
...
...
@@ -3398,8 +3405,8 @@ _HINGELOSSPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
88
21
,
serialized_end
=
89
15
,
serialized_start
=
88
75
,
serialized_end
=
89
69
,
)
...
...
@@ -3505,8 +3512,8 @@ _IMAGEDATAPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
89
18
,
serialized_end
=
9
197
,
serialized_start
=
89
72
,
serialized_end
=
9
251
,
)
...
...
@@ -3535,8 +3542,8 @@ _INFOGAINLOSSPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
9
199
,
serialized_end
=
92
38
,
serialized_start
=
9
253
,
serialized_end
=
92
92
,
)
...
...
@@ -3600,8 +3607,8 @@ _INNERPRODUCTPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
92
41
,
serialized_end
=
94
44
,
serialized_start
=
92
95
,
serialized_end
=
94
98
,
)
...
...
@@ -3630,8 +3637,8 @@ _INPUTPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
9
446
,
serialized_end
=
9
495
,
serialized_start
=
9
500
,
serialized_end
=
9
549
,
)
...
...
@@ -3674,8 +3681,8 @@ _LOGPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
9
497
,
serialized_end
=
9
565
,
serialized_start
=
9
551
,
serialized_end
=
9
619
,
)
...
...
@@ -3741,8 +3748,8 @@ _LRNPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
9
568
,
serialized_end
=
9
880
,
serialized_start
=
9
622
,
serialized_end
=
9
934
,
)
...
...
@@ -3800,8 +3807,8 @@ _MEMORYDATAPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
9
883
,
serialized_end
=
10
072
,
serialized_start
=
9
937
,
serialized_end
=
10
126
,
)
...
...
@@ -3844,8 +3851,8 @@ _MVNPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
10
074
,
serialized_end
=
10
175
,
serialized_start
=
10
128
,
serialized_end
=
10
229
,
)
...
...
@@ -3874,8 +3881,8 @@ _PARAMETERPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
10
177
,
serialized_end
=
102
30
,
serialized_start
=
10
231
,
serialized_end
=
102
84
,
)
...
...
@@ -3983,8 +3990,8 @@ _POOLINGPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
102
33
,
serialized_end
=
10
651
,
serialized_start
=
102
87
,
serialized_end
=
10
705
,
)
...
...
@@ -4027,8 +4034,8 @@ _ROIPOOLINGPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
10
653
,
serialized_end
=
107
42
,
serialized_start
=
10
707
,
serialized_end
=
107
96
,
)
...
...
@@ -4071,8 +4078,8 @@ _POWERPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
107
44
,
serialized_end
=
108
14
,
serialized_start
=
107
98
,
serialized_end
=
108
68
,
)
...
...
@@ -4122,8 +4129,8 @@ _PYTHONPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
108
16
,
serialized_end
=
109
19
,
serialized_start
=
108
70
,
serialized_end
=
109
73
,
)
...
...
@@ -4167,8 +4174,8 @@ _REDUCTIONPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
109
22
,
serialized_end
=
11
095
,
serialized_start
=
109
76
,
serialized_end
=
11
149
,
)
...
...
@@ -4205,8 +4212,8 @@ _RELUPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
11
098
,
serialized_end
=
112
39
,
serialized_start
=
11
152
,
serialized_end
=
112
93
,
)
...
...
@@ -4249,8 +4256,8 @@ _RESHAPEPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
112
41
,
serialized_end
=
113
31
,
serialized_start
=
112
95
,
serialized_end
=
113
85
,
)
...
...
@@ -4307,8 +4314,8 @@ _SCALEPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
113
34
,
serialized_end
=
11
499
,
serialized_start
=
113
88
,
serialized_end
=
11
553
,
)
...
...
@@ -4338,8 +4345,8 @@ _SIGMOIDPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
115
01
,
serialized_end
=
116
21
,
serialized_start
=
115
55
,
serialized_end
=
116
75
,
)
...
...
@@ -4382,8 +4389,8 @@ _SLICEPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
116
23
,
serialized_end
=
11
699
,
serialized_start
=
116
77
,
serialized_end
=
11
753
,
)
...
...
@@ -4420,8 +4427,8 @@ _SOFTMAXPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
117
02
,
serialized_end
=
118
39
,
serialized_start
=
117
56
,
serialized_end
=
118
93
,
)
...
...
@@ -4451,8 +4458,8 @@ _TANHPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
118
41
,
serialized_end
=
1
1955
,
serialized_start
=
118
95
,
serialized_end
=
1
2009
,
)
...
...
@@ -4495,8 +4502,8 @@ _TILEPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
1
1957
,
serialized_end
=
120
41
,
serialized_start
=
1
2011
,
serialized_end
=
120
95
,
)
...
...
@@ -4525,8 +4532,8 @@ _THRESHOLDPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
120
43
,
serialized_end
=
12
085
,
serialized_start
=
120
97
,
serialized_end
=
12
139
,
)
...
...
@@ -4639,8 +4646,8 @@ _WINDOWDATAPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
12
088
,
serialized_end
=
124
09
,
serialized_start
=
12
142
,
serialized_end
=
124
63
,
)
...
...
@@ -4685,8 +4692,8 @@ _SPPPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
124
12
,
serialized_end
=
12
647
,
serialized_start
=
124
66
,
serialized_end
=
12
701
,
)
...
...
@@ -5011,8 +5018,8 @@ _V1LAYERPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
12
650
,
serialized_end
=
15
178
,
serialized_start
=
12
704
,
serialized_end
=
15
232
,
)
...
...
@@ -5301,8 +5308,8 @@ _V0LAYERPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
15
181
,
serialized_end
=
162
02
,
serialized_start
=
15
235
,
serialized_end
=
162
56
,
)
...
...
@@ -5338,8 +5345,8 @@ _PRELUPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
162
04
,
serialized_end
=
16
291
,
serialized_start
=
162
58
,
serialized_end
=
16
345
,
)
...
...
@@ -5368,8 +5375,8 @@ _SMOOTHL1LOSSPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
16
293
,
serialized_end
=
163
34
,
serialized_start
=
16
347
,
serialized_end
=
163
88
,
)
...
...
@@ -5412,8 +5419,8 @@ _MPIPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
163
36
,
serialized_end
=
164
08
,
serialized_start
=
163
90
,
serialized_end
=
164
62
,
)
...
...
@@ -5442,8 +5449,8 @@ _PERMUTEPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
164
10
,
serialized_end
=
164
43
,
serialized_start
=
164
64
,
serialized_end
=
164
97
,
)
...
...
@@ -5493,8 +5500,8 @@ _NORMALIZEPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
16
446
,
serialized_end
=
16
593
,
serialized_start
=
16
500
,
serialized_end
=
16
647
,
)
...
...
@@ -5537,8 +5544,8 @@ _PARALLELPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
16
595
,
serialized_end
=
16
690
,
serialized_start
=
16
649
,
serialized_end
=
16
744
,
)
...
...
@@ -5581,8 +5588,8 @@ _RESIZEPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
16
692
,
serialized_end
=
16
774
,
serialized_start
=
16
746
,
serialized_end
=
16
828
,
)
...
...
@@ -5611,8 +5618,8 @@ _EXPANDDIMSPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
6
484
,
serialized_end
=
65
23
,
serialized_start
=
6
538
,
serialized_end
=
65
77
,
)
...
...
@@ -5711,8 +5718,8 @@ _PROPOSALPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
168
18
,
serialized_end
=
17
090
,
serialized_start
=
168
72
,
serialized_end
=
17
144
,
)
...
...
@@ -5776,8 +5783,8 @@ _BATCHRENORMPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
17
093
,
serialized_end
=
17
259
,
serialized_start
=
17
147
,
serialized_end
=
17
313
,
)
...
...
@@ -5813,8 +5820,8 @@ _DENSECONCATPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
17
261
,
serialized_end
=
173
24
,
serialized_start
=
17
315
,
serialized_end
=
173
78
,
)
...
...
@@ -5864,8 +5871,8 @@ _FOCALLOSSPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
173
26
,
serialized_end
=
174
25
,
serialized_start
=
173
80
,
serialized_end
=
174
79
,
)
...
...
@@ -5894,8 +5901,59 @@ _GATHERPARAMETER = _descriptor.Descriptor(
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
17427
,
serialized_end
=
17461
,
serialized_start
=
17481
,
serialized_end
=
17515
,
)
_GROUPNORMPARAMETER
=
_descriptor
.
Descriptor
(
name
=
'GroupNormParameter'
,
full_name
=
'caffe.GroupNormParameter'
,
filename
=
None
,
file
=
DESCRIPTOR
,
containing_type
=
None
,
fields
=
[
_descriptor
.
FieldDescriptor
(
name
=
'use_global_stats'
,
full_name
=
'caffe.GroupNormParameter.use_global_stats'
,
index
=
0
,
number
=
1
,
type
=
8
,
cpp_type
=
7
,
label
=
1
,
has_default_value
=
False
,
default_value
=
False
,
message_type
=
None
,
enum_type
=
None
,
containing_type
=
None
,
is_extension
=
False
,
extension_scope
=
None
,
options
=
None
),
_descriptor
.
FieldDescriptor
(
name
=
'moving_average_fraction'
,
full_name
=
'caffe.GroupNormParameter.moving_average_fraction'
,
index
=
1
,
number
=
2
,
type
=
2
,
cpp_type
=
6
,
label
=
1
,
has_default_value
=
True
,
default_value
=
0.9
,
message_type
=
None
,
enum_type
=
None
,
containing_type
=
None
,
is_extension
=
False
,
extension_scope
=
None
,
options
=
None
),
_descriptor
.
FieldDescriptor
(
name
=
'eps'
,
full_name
=
'caffe.GroupNormParameter.eps'
,
index
=
2
,
number
=
3
,
type
=
2
,
cpp_type
=
6
,
label
=
1
,
has_default_value
=
True
,
default_value
=
0.001
,
message_type
=
None
,
enum_type
=
None
,
containing_type
=
None
,
is_extension
=
False
,
extension_scope
=
None
,
options
=
None
),
_descriptor
.
FieldDescriptor
(
name
=
'group'
,
full_name
=
'caffe.GroupNormParameter.group'
,
index
=
3
,
number
=
5
,
type
=
13
,
cpp_type
=
3
,
label
=
1
,
has_default_value
=
True
,
default_value
=
32
,
message_type
=
None
,
enum_type
=
None
,
containing_type
=
None
,
is_extension
=
False
,
extension_scope
=
None
,
options
=
None
),
],
extensions
=
[
],
nested_types
=
[],
enum_types
=
[
],
options
=
None
,
is_extendable
=
False
,
extension_ranges
=
[],
oneofs
=
[
],
serialized_start
=
17517
,
serialized_end
=
17640
,
)
_BLOBPROTO
.
fields_by_name
[
'shape'
]
.
message_type
=
_BLOBSHAPE
...
...
@@ -5986,6 +6044,7 @@ _LAYERPARAMETER.fields_by_name['batch_renorm_param'].message_type = _BATCHRENORM
_LAYERPARAMETER
.
fields_by_name
[
'dense_concat_param'
]
.
message_type
=
_DENSECONCATPARAMETER
_LAYERPARAMETER
.
fields_by_name
[
'focal_loss_param'
]
.
message_type
=
_FOCALLOSSPARAMETER
_LAYERPARAMETER
.
fields_by_name
[
'gather_param'
]
.
message_type
=
_GATHERPARAMETER
_LAYERPARAMETER
.
fields_by_name
[
'group_norm_param'
]
.
message_type
=
_GROUPNORMPARAMETER
_LOSSPARAMETER_EXPANDDIMSPARAMETER
.
containing_type
=
_LOSSPARAMETER
_LOSSPARAMETER
.
fields_by_name
[
'normalization'
]
.
enum_type
=
_LOSSPARAMETER_NORMALIZATIONMODE
_LOSSPARAMETER_NORMALIZATIONMODE
.
containing_type
=
_LOSSPARAMETER
...
...
@@ -6156,6 +6215,7 @@ DESCRIPTOR.message_types_by_name['BatchRenormParameter'] = _BATCHRENORMPARAMETER
DESCRIPTOR
.
message_types_by_name
[
'DenseConcatParameter'
]
=
_DENSECONCATPARAMETER
DESCRIPTOR
.
message_types_by_name
[
'FocalLossParameter'
]
=
_FOCALLOSSPARAMETER
DESCRIPTOR
.
message_types_by_name
[
'GatherParameter'
]
=
_GATHERPARAMETER
DESCRIPTOR
.
message_types_by_name
[
'GroupNormParameter'
]
=
_GROUPNORMPARAMETER
DESCRIPTOR
.
enum_types_by_name
[
'Phase'
]
=
_PHASE
BlobShape
=
_reflection
.
GeneratedProtocolMessageType
(
'BlobShape'
,
(
_message
.
Message
,),
dict
(
...
...
@@ -6677,6 +6737,13 @@ GatherParameter = _reflection.GeneratedProtocolMessageType('GatherParameter', (_
))
_sym_db
.
RegisterMessage
(
GatherParameter
)
GroupNormParameter
=
_reflection
.
GeneratedProtocolMessageType
(
'GroupNormParameter'
,
(
_message
.
Message
,),
dict
(
DESCRIPTOR
=
_GROUPNORMPARAMETER
,
__module__
=
'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.GroupNormParameter)
))
_sym_db
.
RegisterMessage
(
GroupNormParameter
)
_BLOBSHAPE
.
fields_by_name
[
'dim'
]
.
has_options
=
True
_BLOBSHAPE
.
fields_by_name
[
'dim'
]
.
_options
=
_descriptor
.
_ParseOptions
(
descriptor_pb2
.
FieldOptions
(),
_b
(
'
\020\001
'
))
...
...
Dragon/python/setup.py
View file @
6683676
...
...
@@ -36,7 +36,7 @@ find_packages('dragon')
find_modules
()
setup
(
name
=
'dragon'
,
version
=
'0.2.1.1
2
'
,
version
=
'0.2.1.1
3
'
,
description
=
'Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework'
,
url
=
'https://github.com/neopenx/Dragon'
,
author
=
'Ting Pan'
,
...
...
Dragon/src/operators/ndarray/reshape_op.cc
View file @
6683676
#include "operators/ndarray/reshape_op.h"
#include "core/workspace.h"
namespace
dragon
{
string
dim_string
(
const
vector
<
TIndex
>&
shape
)
{
std
::
stringstream
ss
;
ss
<<
"("
;
for
(
int
i
=
0
;
i
<
shape
.
size
()
-
1
;
i
++
)
ss
<<
shape
[
i
]
<<
","
;
ss
<<
shape
[
shape
.
size
()
-
1
]
<<
")"
;
return
ss
.
str
();
}
template
<
class
Context
>
void
ReshapeOp
<
Context
>::
RunOnDevice
()
{
if
(
shape_desc
.
size
()
>
0
||
shape_value
.
size
()
>
0
)
{
require_shape
.
resize
(
std
::
max
(
shape_desc
.
size
(),
shape_value
.
size
()));
for
(
int
i
=
0
;
i
<
require_shape
.
size
();
i
++
)
require_shape
[
i
]
=
shape
(
i
);
}
else
if
(
shape_like_desc
.
size
()
>
0
)
{
Tensor
*
shape_like_tensor
=
ws
()
->
GetTensor
(
shape_like_desc
);
require_shape
.
resize
(
shape_like_tensor
->
ndim
());
for
(
int
i
=
0
;
i
<
require_shape
.
size
();
i
++
)
require_shape
[
i
]
=
shape_like_tensor
->
dim
(
i
);
}
else
{
LOG
(
FATAL
)
<<
"Missing the require shape."
;
}
vector
<
TIndex
>
Xdims
=
input
(
0
).
dims
();
new_shape
.
resize
(
require_shape
.
size
());
int
infer_dim
=
-
1
;
TIndex
total_count
=
1
;
for
(
int
i
=
0
;
i
<
shape
.
size
();
i
++
)
{
for
(
int
i
=
0
;
i
<
require_shape
.
size
();
i
++
)
{
if
(
require_shape
[
i
]
==
0
)
{
// handle unchanged dim
if
(
shape
[
i
]
==
0
)
{
CHECK_LT
(
i
,
(
int
)
Xdims
.
size
())
<<
"
\n
Dim("
<<
i
<<
") is out of the Xdims range of (0, "
<<
Xdims
.
size
()
<<
")."
;
new_shape
[
i
]
=
Xdims
[
i
];
}
}
else
if
(
require_shape
[
i
]
>
0
)
{
// handle reseted dim
else
if
(
shape
[
i
]
>
0
)
{
new_shape
[
i
]
=
shape
[
i
];
}
new_shape
[
i
]
=
require_shape
[
i
];
}
else
{
// handle inferred dim
else
{
CHECK_EQ
(
infer_dim
,
-
1
)
<<
"
\n
Dim("
<<
infer_dim
<<
") required infer before"
<<
"
\n
could not infer for dim("
<<
i
<<
") both."
;
...
...
@@ -35,7 +54,8 @@ void ReshapeOp<Context>::RunOnDevice() {
for
(
int
i
=
0
;
i
<
new_shape
.
size
();
i
++
)
{
if
(
new_shape
[
i
]
==
-
1
)
{
CHECK_EQ
(
input
(
0
).
count
()
%
total_count
,
0
)
<<
"
\n
Can not change the total size."
;
<<
"
\n
Can not change the total size: "
<<
input
(
0
).
dim_string
()
<<
" -> "
<<
dim_string
(
new_shape
);
new_shape
[
i
]
=
input
(
0
).
count
()
/
total_count
;
total_count
*=
new_shape
[
i
];
break
;
...
...
@@ -43,7 +63,8 @@ void ReshapeOp<Context>::RunOnDevice() {
}
}
CHECK_EQ
(
total_count
,
input
(
0
).
count
())
<<
"
\n
Can not change the total size."
;
<<
"
\n
Can not change the total size."
<<
input
(
0
).
dim_string
()
<<
" -> "
<<
dim_string
(
new_shape
);
output
(
0
)
->
Reshape
(
new_shape
);
output
(
0
)
->
Share
(
input
(
0
));
}
...
...
Dragon/src/operators/norm/batch_norm_op.cc
View file @
6683676
...
...
@@ -19,9 +19,9 @@ void BatchNormOp<Context>::TrainingRunWithType() {
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
Xdata
=
input
(
0
).
template
data
<
T
,
Context
>
();
auto
*
Ydata
=
output
(
0
)
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
auto
*
Std_data
=
stddev
->
template
mutable_data
<
T
,
Context
>
();
ctx
().
template
Copy
<
T
,
Context
,
Context
>
(
output
(
0
)
->
count
(),
Ydata
,
Xdata
);
...
...
@@ -127,9 +127,9 @@ void BatchNormOp<Context>::InferenceRunWithType() {
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
Xdata
=
input
(
0
).
template
data
<
T
,
Context
>
();
auto
*
Ydata
=
output
(
0
)
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
auto
*
Std_data
=
stddev
->
template
mutable_data
<
T
,
Context
>
();
ctx
().
template
Copy
<
T
,
Context
,
Context
>
(
input
(
0
).
count
(),
Ydata
,
Xdata
);
...
...
@@ -248,9 +248,9 @@ void BatchNormGradientOp<Context>::TrainingRunWithType() {
auto
*
dXdata
=
output
(
0
)
->
template
mutable_data
<
T
,
Context
>
();
auto
*
Std_data
=
stddev
->
template
mutable_data
<
T
,
Context
>
();
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
if
(
data_format
==
"NCHW"
)
{
...
...
@@ -337,9 +337,9 @@ void BatchNormGradientOp<Context>::InferenceRunWithType() {
auto
*
dXdata
=
output
(
0
)
->
template
mutable_data
<
T
,
Context
>
();
auto
*
Std_data
=
stddev
->
template
mutable_data
<
T
,
Context
>
();
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
if
(
data_format
==
"NCHW"
)
{
...
...
Dragon/src/operators/norm/fused_batch_norm.cc
View file @
6683676
...
...
@@ -23,9 +23,9 @@ void FusedBatchNormOp<Context>::TrainingRunWithType() {
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
Xdata
=
input
(
0
).
template
data
<
T
,
Context
>
();
auto
*
Ydata
=
output
(
0
)
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
auto
*
Std_data
=
stddev
->
template
mutable_data
<
T
,
Context
>
();
ctx
().
template
Copy
<
T
,
Context
,
Context
>
(
output
(
0
)
->
count
(),
Ydata
,
Xdata
);
...
...
@@ -153,9 +153,9 @@ void FusedBatchNormOp<Context>::InferenceRunWithType() {
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
Xdata
=
input
(
0
).
template
data
<
T
,
Context
>
();
auto
*
Ydata
=
output
(
0
)
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
auto
*
Std_data
=
stddev
->
template
mutable_data
<
T
,
Context
>
();
ctx
().
template
Copy
<
T
,
Context
,
Context
>
(
input
(
0
).
count
(),
Ydata
,
Xdata
);
...
...
@@ -296,9 +296,9 @@ void FusedBatchNormGradientOp<Context>::TrainingRunWithType() {
auto
*
Std_data
=
stddev
->
template
mutable_data
<
T
,
Context
>
();
auto
*
tMean_data
=
mean
->
template
mutable_data
<
T
,
Context
>
();
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
auto
*
XNorm_data
=
x_norm
->
template
data
<
T
,
Context
>
();
...
...
@@ -436,9 +436,9 @@ void FusedBatchNormGradientOp<Context>::InferenceRunWithType() {
auto
*
dYdata
=
input
(
-
1
).
template
data
<
T
,
Context
>
();
auto
*
Sdata
=
input
(
3
).
template
data
<
T
,
Context
>
();
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
// gradient w.r.t. scale
...
...
Dragon/src/operators/norm/fused_group_norm.cc
0 → 100644
View file @
6683676
#include "operators/norm/group_norm_op.h"
#include "core/workspace.h"
#include "utils/math_functions.h"
#include "utils/filler.h"
namespace
dragon
{
template
<
class
Context
>
template
<
typename
T
>
void
FusedGroupNormOp
<
Context
>::
TrainingRunWithType
()
{
INIT_MULTIPLIER
(
multiplier
,
NS
);
INIT_MULTIPLIER
(
num_multiplier
,
N
);
INIT_MULTIPLIER
(
spatial_multiplier
,
S
);
INIT_MULTIPLIER
(
cgs_multiplier
,
CGS
);
TENSOR_FILL
(
input
(
1
),
vector
<
TIndex
>
(
1
,
NG
));
// history_mean
TENSOR_FILL
(
input
(
2
),
vector
<
TIndex
>
(
1
,
NG
));
// history_var
TENSOR_FILL
(
input
(
3
),
vector
<
TIndex
>
(
1
,
C
));
// scale
TENSOR_FILL
(
input
(
4
),
vector
<
TIndex
>
(
1
,
C
));
// bias
auto
*
hMean_data
=
input
(
1
).
template
mutable_data
<
T
,
Context
>
();
auto
*
hVar_data
=
input
(
2
).
template
mutable_data
<
T
,
Context
>
();
auto
*
Sdata
=
input
(
3
).
template
data
<
T
,
Context
>
();
auto
*
Bdata
=
input
(
4
).
template
data
<
T
,
Context
>
();
auto
*
tMean_data
=
mean
->
template
mutable_data
<
T
,
Context
>
();
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
Xdata
=
input
(
0
).
template
data
<
T
,
Context
>
();
auto
*
Ydata
=
output
(
0
)
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
CGSMul_data
=
cgs_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
auto
*
Std_data
=
stddev
->
template
mutable_data
<
T
,
Context
>
();
ctx
().
template
Copy
<
T
,
Context
,
Context
>
(
output
(
0
)
->
count
(),
Ydata
,
Xdata
);
// compute mean
if
(
data_format
==
"NCHW"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasNoTrans
,
NG
,
CGS
,
1.0
/
CGS
,
Xdata
,
CGSMul_data
,
0
,
tMean_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
// subtract mean
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NG
,
CGS
,
1
,
-
1.0
,
tMean_data
,
CGSMul_data
,
1.0
,
Ydata
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
// compute variance
// note that we use VAR(X) = E((X - EX) ^ 2)
math
::
Square
<
T
,
Context
>
(
output
(
0
)
->
count
(),
Ydata
,
Std_data
);
if
(
data_format
==
"NCHW"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasNoTrans
,
NG
,
CGS
,
1.0
/
CGS
,
Std_data
,
CGSMul_data
,
0.0
,
tVar_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
// compute moving average
if
(
!
is_recomputing
)
{
// History(X) = (1 - momentum) * Cur(X) + momentum * History(X)
math
::
Axpby
<
T
,
Context
>
(
mean
->
count
(),
1.0
-
momentum
,
tMean_data
,
momentum
,
hMean_data
);
math
::
Axpby
<
T
,
Context
>
(
var
->
count
(),
1.0
-
momentum
,
tVar_data
,
momentum
,
hVar_data
);
}
// compute stddev
math
::
AddScalar
<
T
,
Context
>
(
var
->
count
(),
eps
,
tVar_data
);
math
::
Sqrt
<
T
,
Context
>
(
var
->
count
(),
tVar_data
,
tVar_data
);
// divide by stddev
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NG
,
CGS
,
1
,
1.0
,
tVar_data
,
CGSMul_data
,
0.0
,
Std_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
math
::
Div
<
T
,
Context
>
(
output
(
0
)
->
count
(),
Ydata
,
Std_data
,
Ydata
);
// store x_norm for backward
auto
*
XNorm_data
=
x_norm
->
template
mutable_data
<
T
,
Context
>
();
ctx
().
template
Copy
<
T
,
Context
,
Context
>
(
output
(
0
)
->
count
(),
XNorm_data
,
Ydata
);
// scale
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
N
,
C
,
1
,
1.0
,
NMul_data
,
Sdata
,
0.0
,
NC_data
);
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NC
,
S
,
1
,
1.0
,
NC_data
,
SMul_data
,
0.0
,
Std_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NS
,
C
,
1
,
1.0
,
NSMul_data
,
Sdata
,
0.0
,
Std_data
);
}
math
::
Mul
<
T
,
Context
>
(
output
(
0
)
->
count
(),
Ydata
,
Std_data
,
Ydata
);
// shift
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
N
,
C
,
1
,
1.0
,
NMul_data
,
Bdata
,
0.0
,
NC_data
);
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NC
,
S
,
1
,
1.0
,
NC_data
,
SMul_data
,
1.0
,
Ydata
);
}
else
if
(
data_format
==
"NHWC"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NS
,
C
,
1
,
1.0
,
NSMul_data
,
Bdata
,
1.0
,
Ydata
);
}
ws
()
->
ReleaseBuffer
(
stddev
);
}
template
<
class
Context
>
template
<
typename
T
>
void
FusedGroupNormOp
<
Context
>::
InferenceRunWithType
()
{
INIT_MULTIPLIER
(
multiplier
,
NS
);
INIT_MULTIPLIER
(
num_multiplier
,
N
);
INIT_MULTIPLIER
(
spatial_multiplier
,
S
);
INIT_MULTIPLIER
(
cgs_multiplier
,
CGS
);
TENSOR_FILL
(
input
(
1
),
vector
<
TIndex
>
(
1
,
NG
));
// history_mean
TENSOR_FILL
(
input
(
2
),
vector
<
TIndex
>
(
1
,
NG
));
// history_var
TENSOR_FILL
(
input
(
3
),
vector
<
TIndex
>
(
1
,
C
));
// scale
TENSOR_FILL
(
input
(
4
),
vector
<
TIndex
>
(
1
,
C
));
// bias
auto
*
hMean_data
=
input
(
1
).
template
mutable_data
<
T
,
Context
>
();
auto
*
hVar_data
=
input
(
2
).
template
mutable_data
<
T
,
Context
>
();
auto
*
Sdata
=
input
(
3
).
template
data
<
T
,
Context
>
();
auto
*
Bdata
=
input
(
4
).
template
data
<
T
,
Context
>
();
auto
*
tMean_data
=
mean
->
template
mutable_data
<
T
,
Context
>
();
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
Xdata
=
input
(
0
).
template
data
<
T
,
Context
>
();
auto
*
Ydata
=
output
(
0
)
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
CGSMul_data
=
cgs_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
auto
*
Std_data
=
stddev
->
template
mutable_data
<
T
,
Context
>
();
ctx
().
template
Copy
<
T
,
Context
,
Context
>
(
input
(
0
).
count
(),
Ydata
,
Xdata
);
ctx
().
template
Copy
<
T
,
Context
,
Context
>
(
mean
->
count
(),
tMean_data
,
hMean_data
);
ctx
().
template
Copy
<
T
,
Context
,
Context
>
(
var
->
count
(),
tVar_data
,
hVar_data
);
// subtract mean
if
(
data_format
==
"NCHW"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasNoTrans
,
NG
,
CGS
,
1.0
/
CGS
,
Xdata
,
CGSMul_data
,
0
,
tMean_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
// compute stddev
math
::
AddScalar
<
T
,
Context
>
(
var
->
count
(),
eps
,
tVar_data
);
math
::
Sqrt
<
T
,
Context
>
(
var
->
count
(),
tVar_data
,
tVar_data
);
// divide by stddev
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NG
,
CGS
,
1
,
1.0
,
tVar_data
,
CGSMul_data
,
0.0
,
Std_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
math
::
Div
<
T
,
Context
>
(
output
(
0
)
->
count
(),
Ydata
,
Std_data
,
Ydata
);
// scale
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
N
,
C
,
1
,
1.0
,
NMul_data
,
Sdata
,
0.0
,
NC_data
);
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NC
,
S
,
1
,
1.0
,
NC_data
,
SMul_data
,
0.0
,
Std_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NS
,
C
,
1
,
1.0
,
NSMul_data
,
Sdata
,
0.0
,
Std_data
);
}
math
::
Mul
<
T
,
Context
>
(
output
(
0
)
->
count
(),
Ydata
,
Std_data
,
Ydata
);
// shift
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
N
,
C
,
1
,
1.0
,
NMul_data
,
Bdata
,
0.0
,
NC_data
);
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NC
,
S
,
1
,
1.0
,
NC_data
,
SMul_data
,
1.0
,
Ydata
);
}
else
if
(
data_format
==
"NHWC"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NS
,
C
,
1
,
1.0
,
NSMul_data
,
Bdata
,
1.0
,
Ydata
);
}
ws
()
->
ReleaseBuffer
(
stddev
);
}
template
<
class
Context
>
void
FusedGroupNormOp
<
Context
>::
Setup
()
{
// determine the mode
if
(
use_stats
==
-
1
)
use_global_stats
=
phase
()
==
"TEST"
?
true
:
false
;
else
use_global_stats
=
use_stats
==
1
?
true
:
false
;
is_recomputing
=
ws
()
->
GetTensor
(
"/opt/mirror_stage/recompute_flag"
)
->
template
data
<
bool
,
CPUContext
>
()[
0
];
// determine the data format
TIndex
channel_axis
=
axis
;
data_format
=
"NCHW"
;
if
(
channel_axis
==
-
1
)
channel_axis
+=
(
int
)
input
(
0
).
ndim
();
if
(
channel_axis
+
1
==
(
int
)
input
(
0
).
ndim
())
data_format
=
"NHWC"
;
if
(
input
(
0
).
ndim
()
==
2
)
data_format
=
"NCHW"
;
N
=
input
(
0
).
dim
(
0
);
C
=
input
(
0
).
dim
(
channel_axis
);
CHECK_EQ
(
C
%
group
,
0
)
<<
"
\n
The "
<<
C
<<
" channels "
<<
"can not be split into "
<<
group
<<
" groups."
;
if
(
group
==
C
&&
input
(
0
).
ndim
()
==
2
)
// InstanceNorm
LOG
(
WARNING
)
<<
"The 2d input will output all zeros."
;
NC
=
N
*
C
;
NG
=
N
*
group
;
S
=
input
(
0
).
count
()
/
NC
;
CGS
=
(
C
/
group
)
*
S
;
NS
=
N
*
S
;
// make resource
mean
=
ws
()
->
CreateTensor
(
"/mnt/"
+
anchor
()
+
"/gn_mean"
);
var
=
ws
()
->
CreateTensor
(
"/mnt/"
+
anchor
()
+
"/gn_var"
);
x_norm
=
ws
()
->
CreateTensor
(
"/mnt/"
+
anchor
()
+
"/gn_x_norm"
);
stddev
=
ws
()
->
GetBuffer
();
stddev
->
ReshapeLike
(
input
(
0
));
// reshape
mean
->
Reshape
(
vector
<
TIndex
>
(
1
,
NG
));
var
->
Reshape
(
vector
<
TIndex
>
(
1
,
NG
));
num_by_chans
.
Reshape
(
vector
<
TIndex
>
(
1
,
NC
));
x_norm
->
ReshapeLike
(
input
(
0
));
output
(
0
)
->
ReshapeLike
(
input
(
0
));
}
template
<
class
Context
>
void
FusedGroupNormOp
<
Context
>::
RunOnDevice
()
{
Setup
();
if
(
input
(
0
).
template
IsType
<
float
>
())
{
if
(
use_global_stats
)
InferenceRunWithType
<
float
>
();
else
TrainingRunWithType
<
float
>
();
}
#ifdef WITH_CUDA_FP16
else
if
(
input
(
0
).
template
IsType
<
float16
>
())
{
if
(
use_global_stats
)
InferenceRunWithType
<
float16
>
();
else
TrainingRunWithType
<
float16
>
();
}
#endif
else
LOG
(
FATAL
)
<<
"Unsupported input types."
;
}
DEPLOY_CPU
(
FusedGroupNorm
);
#ifdef WITH_CUDA
DEPLOY_CUDA
(
FusedGroupNorm
);
#endif
OPERATOR_SCHEMA
(
FusedGroupNorm
).
NumInputs
(
5
).
NumOutputs
(
1
);
template
<
class
Context
>
template
<
typename
T
>
void
FusedGroupNormGradientOp
<
Context
>::
TrainingRunWithType
()
{
INIT_MULTIPLIER
(
multiplier
,
NS
);
INIT_MULTIPLIER
(
num_multiplier
,
N
);
INIT_MULTIPLIER
(
spatial_multiplier
,
S
);
INIT_MULTIPLIER
(
cgs_multiplier
,
CGS
);
auto
*
dYdata
=
input
(
-
1
).
template
data
<
T
,
Context
>
();
auto
*
dXdata
=
output
(
0
)
->
template
mutable_data
<
T
,
Context
>
();
auto
*
Sdata
=
input
(
3
).
template
data
<
T
,
Context
>
();
auto
*
Std_data
=
stddev
->
template
mutable_data
<
T
,
Context
>
();
auto
*
tMean_data
=
mean
->
template
mutable_data
<
T
,
Context
>
();
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
CGSMul_data
=
cgs_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
auto
*
XNorm_data
=
x_norm
->
template
data
<
T
,
Context
>
();
// gradient w.r.t. scale
if
(
output
(
1
)
->
name
()
!=
"ignore"
)
{
auto
*
dSdata
=
output
(
1
)
->
template
mutable_data
<
T
,
Context
>
();
math
::
Mul
<
T
,
Context
>
(
stddev
->
count
(),
XNorm_data
,
dYdata
,
Std_data
);
if
(
data_format
==
"NCHW"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasNoTrans
,
NC
,
S
,
1.0
,
Std_data
,
SMul_data
,
0.0
,
NC_data
);
math
::
Gemv
<
T
,
Context
>
(
CblasTrans
,
N
,
C
,
1.0
,
NC_data
,
NMul_data
,
1.0
,
dSdata
);
}
else
if
(
data_format
==
"NHWC"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasTrans
,
NS
,
C
,
1.0
,
Std_data
,
NSMul_data
,
1.0
,
dSdata
);
}
}
// gradient w.r.t. bias
if
(
output
(
2
)
->
name
()
!=
"ignore"
)
{
auto
*
dBdata
=
output
(
2
)
->
template
mutable_data
<
T
,
Context
>
();
if
(
data_format
==
"NCHW"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasNoTrans
,
NC
,
S
,
1.0
,
dYdata
,
SMul_data
,
0.0
,
NC_data
);
math
::
Gemv
<
T
,
Context
>
(
CblasTrans
,
N
,
C
,
1.0
,
NC_data
,
NMul_data
,
1.0
,
dBdata
);
}
else
if
(
data_format
==
"NHWC"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasTrans
,
NS
,
C
,
1.0
,
dYdata
,
NSMul_data
,
1.0
,
dBdata
);
}
}
// gradient w.r.t. x
if
(
output
(
0
)
->
name
()
!=
"ignore"
)
{
// scale * dY
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
N
,
C
,
1
,
1.0
,
NMul_data
,
Sdata
,
0.0
,
NC_data
);
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NC
,
S
,
1
,
1.0
,
NC_data
,
SMul_data
,
0.0
,
Std_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NS
,
C
,
1
,
1.0
,
NSMul_data
,
Sdata
,
0.0
,
Std_data
);
}
math
::
Mul
<
T
,
Context
>
(
stddev
->
count
(),
Std_data
,
dYdata
,
Std_data
);
// sum of x_hat * (dl / dx_hat)
math
::
Mul
<
T
,
Context
>
(
stddev
->
count
(),
XNorm_data
,
Std_data
,
dXdata
);
if
(
data_format
==
"NCHW"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasNoTrans
,
NG
,
CGS
,
1.0
,
dXdata
,
CGSMul_data
,
0.0
,
tMean_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
// x_hat times the sum
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NG
,
CGS
,
1
,
1.0
,
tMean_data
,
CGSMul_data
,
0.0
,
dXdata
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
math
::
Mul
<
T
,
Context
>
(
stddev
->
count
(),
XNorm_data
,
dXdata
,
dXdata
);
// subtract the average of x_hat times the sum
if
(
data_format
==
"NCHW"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasNoTrans
,
NG
,
CGS
,
1.0
,
Std_data
,
CGSMul_data
,
0.0
,
tMean_data
);
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NG
,
CGS
,
1
,
1.0
,
tMean_data
,
CGSMul_data
,
1.0
,
dXdata
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
math
::
Axpby
<
T
,
Context
>
(
stddev
->
count
(),
1.0
,
Std_data
,
-
1.0
/
CGS
,
dXdata
);
// multiply with the inverse std
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NG
,
CGS
,
1
,
1.0
,
tVar_data
,
CGSMul_data
,
0.0
,
Std_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
// divide by stddev
math
::
Div
<
T
,
Context
>
(
output
(
0
)
->
count
(),
dXdata
,
Std_data
,
dXdata
);
}
ws
()
->
ReleaseBuffer
(
stddev
);
}
template
<
class
Context
>
template
<
typename
T
>
void
FusedGroupNormGradientOp
<
Context
>::
InferenceRunWithType
()
{
INIT_MULTIPLIER
(
multiplier
,
NS
);
INIT_MULTIPLIER
(
num_multiplier
,
N
);
INIT_MULTIPLIER
(
spatial_multiplier
,
S
);
INIT_MULTIPLIER
(
cgs_multiplier
,
CGS
);
auto
*
dYdata
=
input
(
-
1
).
template
data
<
T
,
Context
>
();
auto
*
Sdata
=
input
(
3
).
template
data
<
T
,
Context
>
();
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
CGSMul_data
=
cgs_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
// gradient w.r.t. scale
if
(
output
(
1
)
->
name
()
!=
"ignore"
)
LOG
(
FATAL
)
<<
"The gamma should be fixed if using global stats."
;
// gradient w.r.t. bias
if
(
output
(
2
)
->
name
()
!=
"ignore"
)
{
auto
*
dBdata
=
output
(
2
)
->
template
mutable_data
<
T
,
Context
>
();
if
(
data_format
==
"NCHW"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasNoTrans
,
NC
,
S
,
1.0
,
dYdata
,
SMul_data
,
0.0
,
NC_data
);
math
::
Gemv
<
T
,
Context
>
(
CblasTrans
,
N
,
C
,
1.0
,
NC_data
,
NMul_data
,
1.0
,
dBdata
);
}
else
if
(
data_format
==
"NHWC"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasTrans
,
NS
,
C
,
1.0
,
dYdata
,
NSMul_data
,
1.0
,
dBdata
);
}
}
// gradient w.r.t. x
if
(
output
(
0
)
->
name
()
!=
"ignore"
)
{
auto
*
dXdata
=
output
(
0
)
->
template
mutable_data
<
T
,
Context
>
();
auto
*
Std_data
=
stddev
->
template
mutable_data
<
T
,
Context
>
();
// divide scale by stddev
math
::
Div
<
T
,
Context
>
(
var
->
count
(),
Sdata
,
tVar_data
,
tVar_data
);
// compute dE/dY \cot (scale / std(X))
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NG
,
CGS
,
1
,
1.0
,
tVar_data
,
CGSMul_data
,
0.0
,
Std_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
math
::
Mul
<
T
,
Context
>
(
output
(
0
)
->
count
(),
dYdata
,
Std_data
,
dXdata
);
}
ws
()
->
ReleaseBuffer
(
stddev
);
}
template
<
class
Context
>
void
FusedGroupNormGradientOp
<
Context
>::
Setup
()
{
// determine the mode
if
(
use_stats
==
-
1
)
use_global_stats
=
phase
()
==
"TEST"
?
true
:
false
;
else
use_global_stats
=
use_stats
==
1
?
true
:
false
;
// determine the data format
TIndex
channel_axis
=
axis
;
data_format
=
"NCHW"
;
if
(
channel_axis
==
-
1
)
channel_axis
+=
(
int
)
input
(
0
).
ndim
();
if
(
channel_axis
+
1
==
(
int
)
input
(
0
).
ndim
())
data_format
=
"NHWC"
;
if
(
input
(
0
).
ndim
()
==
2
)
data_format
=
"NCHW"
;
N
=
input
(
0
).
dim
(
0
);
C
=
input
(
0
).
dim
(
channel_axis
);
CHECK_EQ
(
C
%
group
,
0
)
<<
"
\n
The "
<<
C
<<
" channels "
<<
"can not be split into "
<<
group
<<
" groups."
;
if
(
group
==
C
&&
input
(
0
).
ndim
()
==
2
)
// InstanceNorm
LOG
(
WARNING
)
<<
"The 2d input will output all zeros."
;
NC
=
N
*
C
;
NG
=
N
*
group
;
S
=
input
(
0
).
count
()
/
NC
;
CGS
=
(
C
/
group
)
*
S
;
NS
=
N
*
S
;
// make resource
mean
=
ws
()
->
GetTensor
(
"/mnt/"
+
anchor
()
+
"/gn_mean"
);
var
=
ws
()
->
GetTensor
(
"/mnt/"
+
anchor
()
+
"/gn_var"
);
x_norm
=
ws
()
->
GetTensor
(
"/mnt/"
+
anchor
()
+
"/gn_x_norm"
);
stddev
=
ws
()
->
GetBuffer
();
stddev
->
ReshapeLike
(
input
(
0
));
// reshape
num_by_chans
.
Reshape
(
vector
<
TIndex
>
(
1
,
NC
));
output
(
0
)
->
ReshapeLike
(
input
(
0
));
// dX
output
(
1
)
->
ReshapeLike
(
input
(
3
));
// dScale
output
(
2
)
->
ReshapeLike
(
input
(
3
));
// dBias
}
template
<
class
Context
>
void
FusedGroupNormGradientOp
<
Context
>::
RunOnDevice
()
{
Setup
();
if
(
input
(
0
).
template
IsType
<
float
>
())
{
if
(
use_global_stats
)
InferenceRunWithType
<
float
>
();
else
TrainingRunWithType
<
float
>
();
}
#ifdef WITH_CUDA_FP16
else
if
(
input
(
0
).
template
IsType
<
float16
>
())
{
if
(
use_global_stats
)
InferenceRunWithType
<
float16
>
();
else
TrainingRunWithType
<
float16
>
();
}
#endif
else
LOG
(
FATAL
)
<<
"Unsupported input types."
;
}
template
<
class
Context
>
void
FusedGroupNormGradientOp
<
Context
>::
ShareGradient
()
{
if
(
use_global_stats
)
{
if
(
output
(
0
)
->
name
()
!=
"ignore"
)
{
Tensor
*
dX
=
ws
()
->
GetBuffer
(
"Grad"
);
ws
()
->
CreateAvatar
(
output
(
0
),
dX
);
}
}
else
{
if
(
output
(
0
)
->
name
()
!=
"ignore"
||
output
(
1
)
->
name
()
!=
"ignore"
||
output
(
2
)
->
name
()
!=
"ignore"
)
{
Tensor
*
dX
=
ws
()
->
GetBuffer
(
"Grad"
);
ws
()
->
CreateAvatar
(
output
(
0
),
dX
);
}
}
}
DEPLOY_CPU
(
FusedGroupNormGradient
);
#ifdef WITH_CUDA
DEPLOY_CUDA
(
FusedGroupNormGradient
);
#endif
OPERATOR_SCHEMA
(
FusedGroupNormGradient
).
NumInputs
(
5
).
NumOutputs
(
3
);
class
GetFusedGroupNormGradient
final
:
public
GradientMakerBase
{
public
:
GRADIENT_MAKER_CTOR
(
GetFusedGroupNormGradient
);
vector
<
OperatorDef
>
MakeDefs
()
override
{
return
SingleDef
(
def
.
type
()
+
"Gradient"
,
""
,
vector
<
string
>
{
I
(
0
),
I
(
1
),
I
(
2
),
I
(
3
),
GO
(
0
)},
vector
<
string
>
{
GI
(
0
),
GI
(
3
),
GI
(
4
)});
}
};
REGISTER_GRADIENT
(
FusedGroupNorm
,
GetFusedGroupNormGradient
);
}
//
namespace
dragon
\ No newline at end of file
Dragon/src/operators/norm/group_norm_op.cc
0 → 100644
View file @
6683676
#include "operators/norm/group_norm_op.h"
#include "core/workspace.h"
#include "utils/math_functions.h"
#include "utils/filler.h"
namespace
dragon
{
template
<
class
Context
>
template
<
typename
T
>
void
GroupNormOp
<
Context
>::
TrainingRunWithType
()
{
INIT_MULTIPLIER
(
multiplier
,
NS
);
INIT_MULTIPLIER
(
num_multiplier
,
N
);
INIT_MULTIPLIER
(
spatial_multiplier
,
S
);
INIT_MULTIPLIER
(
cgs_multiplier
,
CGS
);
TENSOR_FILL
(
input
(
1
),
vector
<
TIndex
>
(
1
,
NG
));
// history_mean
TENSOR_FILL
(
input
(
2
),
vector
<
TIndex
>
(
1
,
NG
));
// history_var
auto
*
hMean_data
=
input
(
1
).
template
mutable_data
<
T
,
Context
>
();
auto
*
hVar_data
=
input
(
2
).
template
mutable_data
<
T
,
Context
>
();
auto
*
tMean_data
=
mean
.
template
mutable_data
<
T
,
Context
>
();
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
Xdata
=
input
(
0
).
template
data
<
T
,
Context
>
();
auto
*
Ydata
=
output
(
0
)
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
CGSMul_data
=
cgs_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
auto
*
Std_data
=
stddev
->
template
mutable_data
<
T
,
Context
>
();
ctx
().
template
Copy
<
T
,
Context
,
Context
>
(
output
(
0
)
->
count
(),
Ydata
,
Xdata
);
// compute mean
if
(
data_format
==
"NCHW"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasNoTrans
,
NG
,
CGS
,
1.0
/
CGS
,
Xdata
,
CGSMul_data
,
0
,
tMean_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
// subtract mean
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NG
,
CGS
,
1
,
-
1.0
,
tMean_data
,
CGSMul_data
,
1.0
,
Ydata
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
// compute variance
// note that we use VAR(X) = E((X - EX) ^ 2)
math
::
Square
<
T
,
Context
>
(
output
(
0
)
->
count
(),
Ydata
,
Std_data
);
if
(
data_format
==
"NCHW"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasNoTrans
,
NG
,
CGS
,
1.0
/
CGS
,
Std_data
,
CGSMul_data
,
0.0
,
tVar_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
// compute moving average
if
(
!
is_recomputing
)
{
if
(
mode
==
"CAFFE"
)
{
CHECK_EQ
(
InputSize
(),
4
)
<<
"
\n
The number of inputs should be 4 if use CAFFE mode."
;
TENSOR_FILL
(
input
(
3
),
vector
<
TIndex
>
(
1
,
1
));
auto
*
hFact_data
=
input
(
3
).
template
mutable_data
<
T
,
CPUContext
>
();
float
factor
=
dragon_cast
<
float
,
T
>
(
hFact_data
[
0
]);
factor
*=
momentum
;
factor
+=
1
;
hFact_data
[
0
]
=
dragon_cast
<
T
,
float
>
(
factor
);
int
m
=
input
(
0
).
count
()
/
C
;
float
coeff
=
m
>
1
?
float
(
m
)
/
(
m
-
1
)
:
1
;
// History(X) = Cur(X) + momentum * History(X)
math
::
Axpby
<
T
,
Context
>
(
mean
.
count
(),
1.0
,
tMean_data
,
momentum
,
hMean_data
);
math
::
Axpby
<
T
,
Context
>
(
var
->
count
(),
coeff
,
tVar_data
,
momentum
,
hVar_data
);
}
else
{
// History(X) = (1 - momentum) * Cur(X) + momentum * History(X)
math
::
Axpby
<
T
,
Context
>
(
mean
.
count
(),
1.0
-
momentum
,
tMean_data
,
momentum
,
hMean_data
);
math
::
Axpby
<
T
,
Context
>
(
var
->
count
(),
1.0
-
momentum
,
tVar_data
,
momentum
,
hVar_data
);
}
}
// compute stddev
math
::
AddScalar
<
T
,
Context
>
(
var
->
count
(),
eps
,
tVar_data
);
math
::
Sqrt
<
T
,
Context
>
(
var
->
count
(),
tVar_data
,
tVar_data
);
// divide by stddev
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NG
,
CGS
,
1
,
1.0
,
tVar_data
,
CGSMul_data
,
0.0
,
Std_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
math
::
Div
<
T
,
Context
>
(
output
(
0
)
->
count
(),
Ydata
,
Std_data
,
Ydata
);
ws
()
->
ReleaseBuffer
(
stddev
);
}
template
<
class
Context
>
template
<
typename
T
>
void
GroupNormOp
<
Context
>::
InferenceRunWithType
()
{
INIT_MULTIPLIER
(
multiplier
,
NS
);
INIT_MULTIPLIER
(
num_multiplier
,
N
);
INIT_MULTIPLIER
(
spatial_multiplier
,
S
);
INIT_MULTIPLIER
(
cgs_multiplier
,
CGS
);
TENSOR_FILL
(
input
(
1
),
vector
<
TIndex
>
(
1
,
NG
));
// history_mean
TENSOR_FILL
(
input
(
2
),
vector
<
TIndex
>
(
1
,
NG
));
// history_var
auto
*
hMean_data
=
input
(
1
).
template
mutable_data
<
T
,
Context
>
();
auto
*
hVar_data
=
input
(
2
).
template
mutable_data
<
T
,
Context
>
();
auto
*
tMean_data
=
mean
.
template
mutable_data
<
T
,
Context
>
();
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
Xdata
=
input
(
0
).
template
data
<
T
,
Context
>
();
auto
*
Ydata
=
output
(
0
)
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
CGSMul_data
=
cgs_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
auto
*
Std_data
=
stddev
->
template
mutable_data
<
T
,
Context
>
();
ctx
().
template
Copy
<
T
,
Context
,
Context
>
(
input
(
0
).
count
(),
Ydata
,
Xdata
);
// scale the mean and variance if necessary
if
(
mode
==
"CAFFE"
)
{
CHECK_EQ
(
InputSize
(),
4
)
<<
"
\n
The number of inputs should be 4 if use CAFFE mode."
;
TENSOR_FILL
(
input
(
3
),
vector
<
TIndex
>
(
1
,
1
));
auto
*
hFact_data
=
input
(
3
).
template
mutable_data
<
T
,
CPUContext
>
();
const
float
factor
=
dragon_cast
<
float
,
T
>
(
hFact_data
[
0
]);
const
float
scale
=
factor
==
0
?
0
:
1.0
/
factor
;
math
::
Scale
<
T
,
Context
>
(
mean
.
count
(),
scale
,
hMean_data
,
tMean_data
);
math
::
Scale
<
T
,
Context
>
(
var
->
count
(),
scale
,
hVar_data
,
tVar_data
);
}
else
{
ctx
().
template
Copy
<
T
,
Context
,
Context
>
(
mean
.
count
(),
tMean_data
,
hMean_data
);
ctx
().
template
Copy
<
T
,
Context
,
Context
>
(
var
->
count
(),
tVar_data
,
hVar_data
);
}
// subtract mean
if
(
data_format
==
"NCHW"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasNoTrans
,
NG
,
CGS
,
1.0
/
CGS
,
Xdata
,
CGSMul_data
,
0
,
tMean_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
// compute stddev
math
::
AddScalar
<
T
,
Context
>
(
var
->
count
(),
eps
,
tVar_data
);
math
::
Sqrt
<
T
,
Context
>
(
var
->
count
(),
tVar_data
,
tVar_data
);
// divide by stddev
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NG
,
CGS
,
1
,
1.0
,
tVar_data
,
CGSMul_data
,
0.0
,
Std_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
math
::
Div
<
T
,
Context
>
(
output
(
0
)
->
count
(),
Ydata
,
Std_data
,
Ydata
);
ws
()
->
ReleaseBuffer
(
stddev
);
}
template
<
class
Context
>
void
GroupNormOp
<
Context
>::
Setup
()
{
// determine the mode
if
(
use_stats
==
-
1
)
use_global_stats
=
phase
()
==
"TEST"
?
true
:
false
;
else
use_global_stats
=
use_stats
==
1
?
true
:
false
;
is_recomputing
=
ws
()
->
GetTensor
(
"/opt/mirror_stage/recompute_flag"
)
->
template
data
<
bool
,
CPUContext
>
()[
0
];
// determine the data format
TIndex
channel_axis
=
axis
;
data_format
=
"NCHW"
;
if
(
channel_axis
==
-
1
)
channel_axis
+=
(
int
)
input
(
0
).
ndim
();
if
(
channel_axis
+
1
==
(
int
)
input
(
0
).
ndim
())
data_format
=
"NHWC"
;
if
(
input
(
0
).
ndim
()
==
2
)
data_format
=
"NCHW"
;
N
=
input
(
0
).
dim
(
0
);
C
=
input
(
0
).
dim
(
channel_axis
);
CHECK_EQ
(
C
%
group
,
0
)
<<
"
\n
The "
<<
C
<<
" channels "
<<
"can not be split into "
<<
group
<<
" groups."
;
if
(
group
==
C
&&
input
(
0
).
ndim
()
==
2
)
// InstanceNorm
LOG
(
WARNING
)
<<
"The 2d input will output all zeros."
;
NC
=
N
*
C
;
NG
=
N
*
group
;
S
=
input
(
0
).
count
()
/
NC
;
CGS
=
(
C
/
group
)
*
S
;
NS
=
N
*
S
;
// make resource
var
=
ws
()
->
CreateTensor
(
"/mnt/"
+
anchor
()
+
"/gn_var"
);
stddev
=
ws
()
->
GetBuffer
();
stddev
->
ReshapeLike
(
input
(
0
));
// reshape
mean
.
Reshape
(
vector
<
TIndex
>
(
1
,
NG
));
var
->
Reshape
(
vector
<
TIndex
>
(
1
,
NG
));
num_by_chans
.
Reshape
(
vector
<
TIndex
>
(
1
,
NC
));
output
(
0
)
->
ReshapeLike
(
input
(
0
));
}
template
<
class
Context
>
void
GroupNormOp
<
Context
>::
RunOnDevice
()
{
Setup
();
if
(
input
(
0
).
template
IsType
<
float
>
())
{
if
(
use_global_stats
)
InferenceRunWithType
<
float
>
();
else
TrainingRunWithType
<
float
>
();
}
#ifdef WITH_CUDA_FP16
else
if
(
input
(
0
).
template
IsType
<
float16
>
())
{
if
(
use_global_stats
)
InferenceRunWithType
<
float16
>
();
else
TrainingRunWithType
<
float16
>
();
}
#endif
else
LOG
(
FATAL
)
<<
"Unsupported input types."
;
}
DEPLOY_CPU
(
GroupNorm
);
#ifdef WITH_CUDA
DEPLOY_CUDA
(
GroupNorm
);
#endif
OPERATOR_SCHEMA
(
GroupNorm
).
NumInputs
(
3
,
4
).
NumOutputs
(
1
);
template
<
class
Context
>
template
<
typename
T
>
void
GroupNormGradientOp
<
Context
>::
TrainingRunWithType
()
{
INIT_MULTIPLIER
(
multiplier
,
NS
);
INIT_MULTIPLIER
(
num_multiplier
,
N
);
INIT_MULTIPLIER
(
spatial_multiplier
,
S
);
INIT_MULTIPLIER
(
cgs_multiplier
,
CGS
);
auto
*
dYdata
=
input
(
-
1
).
template
data
<
T
,
Context
>
();
auto
*
dXdata
=
output
(
0
)
->
template
mutable_data
<
T
,
Context
>
();
auto
*
Std_data
=
stddev
->
template
mutable_data
<
T
,
Context
>
();
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
CGSMul_data
=
cgs_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NG
,
CGS
,
1
,
1.0
,
tVar_data
,
CGSMul_data
,
0.0
,
Std_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
auto
*
Ydata
=
input
(
1
).
template
data
<
T
,
Context
>
();
math
::
Mul
<
T
,
Context
>
(
output
(
0
)
->
count
(),
Ydata
,
dYdata
,
dXdata
);
// sum(dE/dY \cdot Y)
if
(
data_format
==
"NCHW"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasNoTrans
,
NG
,
CGS
,
1.0
,
dXdata
,
CGSMul_data
,
0.0
,
tVar_data
);
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NG
,
CGS
,
1
,
1.0
,
tVar_data
,
CGSMul_data
,
0.0
,
dXdata
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
// sum(dE/dY \cdot Y) \cdot Y
math
::
Mul
<
T
,
Context
>
(
output
(
0
)
->
count
(),
Ydata
,
dXdata
,
dXdata
);
// sum(dE/dY) + sum(dE/dY \cdot Y) \cdot Y
if
(
data_format
==
"NCHW"
)
{
math
::
Gemv
<
T
,
Context
>
(
CblasNoTrans
,
NG
,
CGS
,
1.0
,
dYdata
,
CGSMul_data
,
0.0
,
tVar_data
);
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NG
,
CGS
,
1
,
1.0
,
tVar_data
,
CGSMul_data
,
1.0
,
dXdata
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
// dE/dY - mean(dE/dY)- mean(dE/dY \cdot Y) \cdot Y
// = dE/dY - mean(sum(dE/dY) + sum(dE/dY \cdot Y) \cdot Y)
math
::
Axpby
<
T
,
Context
>
(
output
(
0
)
->
count
(),
1.0
,
dYdata
,
-
1.0
/
CGS
,
dXdata
);
// divide by stddev
math
::
Div
<
T
,
Context
>
(
output
(
0
)
->
count
(),
dXdata
,
Std_data
,
dXdata
);
ws
()
->
ReleaseBuffer
(
stddev
);
}
template
<
class
Context
>
template
<
typename
T
>
void
GroupNormGradientOp
<
Context
>::
InferenceRunWithType
()
{
INIT_MULTIPLIER
(
multiplier
,
NS
);
INIT_MULTIPLIER
(
num_multiplier
,
N
);
INIT_MULTIPLIER
(
spatial_multiplier
,
S
);
INIT_MULTIPLIER
(
cgs_multiplier
,
CGS
);
auto
*
dYdata
=
input
(
-
1
).
template
data
<
T
,
Context
>
();
auto
*
dXdata
=
output
(
0
)
->
template
mutable_data
<
T
,
Context
>
();
auto
*
Std_data
=
stddev
->
template
mutable_data
<
T
,
Context
>
();
auto
*
tVar_data
=
var
->
template
mutable_data
<
T
,
Context
>
();
auto
*
NMul_data
=
num_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
SMul_data
=
spatial_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NSMul_data
=
multiplier
->
template
data
<
T
,
Context
>
();
auto
*
CGSMul_data
=
cgs_multiplier
->
template
data
<
T
,
Context
>
();
auto
*
NC_data
=
num_by_chans
.
template
mutable_data
<
T
,
Context
>
();
if
(
data_format
==
"NCHW"
)
{
math
::
Gemm
<
T
,
Context
>
(
CblasNoTrans
,
CblasNoTrans
,
NG
,
CGS
,
1
,
1.0
,
tVar_data
,
NSMul_data
,
0.0
,
Std_data
);
}
else
if
(
data_format
==
"NHWC"
)
{
NOT_IMPLEMENTED
;
}
math
::
Div
<
T
,
Context
>
(
output
(
0
)
->
count
(),
dYdata
,
Std_data
,
dXdata
);
ws
()
->
ReleaseBuffer
(
stddev
);
}
template
<
class
Context
>
void
GroupNormGradientOp
<
Context
>::
Setup
()
{
// determine the mode
if
(
use_stats
==
-
1
)
use_global_stats
=
phase
()
==
"TEST"
?
true
:
false
;
else
use_global_stats
=
use_stats
==
1
?
true
:
false
;
// determine the data format
TIndex
channel_axis
=
axis
;
data_format
=
"NCHW"
;
if
(
channel_axis
==
-
1
)
channel_axis
+=
(
int
)
input
(
0
).
ndim
();
if
(
channel_axis
+
1
==
(
int
)
input
(
0
).
ndim
())
data_format
=
"NHWC"
;
if
(
input
(
0
).
ndim
()
==
2
)
data_format
=
"NCHW"
;
N
=
input
(
0
).
dim
(
0
);
C
=
input
(
0
).
dim
(
channel_axis
);
CHECK_EQ
(
C
%
group
,
0
)
<<
"
\n
The "
<<
C
<<
" channels "
<<
"can not be split into "
<<
group
<<
" groups."
;
if
(
group
==
C
&&
input
(
0
).
ndim
()
==
2
)
// InstanceNorm
LOG
(
WARNING
)
<<
"The 2d input will output all zeros."
;
NC
=
N
*
C
;
NG
=
N
*
group
;
S
=
input
(
0
).
count
()
/
NC
;
CGS
=
(
C
/
group
)
*
S
;
NS
=
N
*
S
;
// make resource
var
=
ws
()
->
GetTensor
(
"/mnt/"
+
anchor
()
+
"/gn_var"
);
stddev
=
ws
()
->
GetBuffer
();
stddev
->
ReshapeLike
(
input
(
0
));
// reshape
num_by_chans
.
Reshape
(
vector
<
TIndex
>
(
1
,
NC
));
output
(
0
)
->
ReshapeLike
(
input
(
0
));
}
template
<
class
Context
>
void
GroupNormGradientOp
<
Context
>::
RunOnDevice
()
{
Setup
();
if
(
input
(
0
).
template
IsType
<
float
>
())
{
if
(
use_global_stats
)
InferenceRunWithType
<
float
>
();
else
TrainingRunWithType
<
float
>
();
}
#ifdef WITH_CUDA_FP16
else
if
(
input
(
0
).
template
IsType
<
float16
>
())
{
if
(
use_global_stats
)
InferenceRunWithType
<
float16
>
();
else
TrainingRunWithType
<
float16
>
();
}
#endif
else
LOG
(
FATAL
)
<<
"Unsupported input types."
;
}
DEPLOY_CPU
(
GroupNormGradient
);
#ifdef WITH_CUDA
DEPLOY_CUDA
(
GroupNormGradient
);
#endif
OPERATOR_SCHEMA
(
GroupNormGradient
).
NumInputs
(
3
).
NumOutputs
(
1
);
class
GetGroupNormGradient
final
:
public
GradientMakerBase
{
public
:
GRADIENT_MAKER_CTOR
(
GetGroupNormGradient
);
vector
<
OperatorDef
>
MakeDefs
()
override
{
return
SingleDef
(
def
.
type
()
+
"Gradient"
,
""
,
vector
<
string
>
{
I
(
0
),
O
(
0
),
GO
(
0
)},
vector
<
string
>
{
GI
(
0
)});
}
};
REGISTER_GRADIENT
(
GroupNorm
,
GetGroupNormGradient
);
}
//
namespace
dragon
\ No newline at end of file
Write
Preview
Markdown
is supported
Attach a file
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to post a comment