Commit 6f2751b1 by Ting PAN

Refactor Norm Module

1 parent 5bd1f6b5
Showing with 1783 additions and 1299 deletions
...@@ -97,6 +97,7 @@ link_directories(${UINX_CUDNN_LIBS}) ...@@ -97,6 +97,7 @@ link_directories(${UINX_CUDNN_LIBS})
# ---[ Install # ---[ Install
set(CMAKE_INSTALL_PREFIX ${PROJECT_SOURCE_DIR} CACHE STRING "set install prefix" FORCE) set(CMAKE_INSTALL_PREFIX ${PROJECT_SOURCE_DIR} CACHE STRING "set install prefix" FORCE)
set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_RPATH} ${3RDPARTY_LIBS})
# ---[ defines # ---[ defines
if (WITH_PYTHON3) if (WITH_PYTHON3)
......
...@@ -26,19 +26,8 @@ class OperatorBase { ...@@ -26,19 +26,8 @@ class OperatorBase {
public: public:
OperatorBase(const OperatorDef& op_def, Workspace* ws); OperatorBase(const OperatorDef& op_def, Workspace* ws);
inline Tensor& input(int idx) { Tensor& input(int idx);
CHECK_LT(idx, (int)inputs_.size()); Tensor* output(int idx);
CHECK_GE(idx, -(int)inputs_.size());
if (idx >= 0) return *inputs_[idx];
else return *inputs_[idx + inputs_.size()];
}
inline Tensor* output(int idx) {
CHECK_LT(idx, (int)outputs_.size());
CHECK_GE(idx, -(int)outputs_.size());
if (idx >= 0) return outputs_[idx];
else return outputs_[idx + outputs_.size()];
}
inline size_t InputSize() { return inputs_.size(); } inline size_t InputSize() { return inputs_.size(); }
inline size_t OutputSize() { return outputs_.size(); } inline size_t OutputSize() { return outputs_.size(); }
...@@ -46,7 +35,6 @@ class OperatorBase { ...@@ -46,7 +35,6 @@ class OperatorBase {
inline void SwitchToPhase(const string& phase) { this->phase_ = phase; } inline void SwitchToPhase(const string& phase) { this->phase_ = phase; }
virtual void Run() { NOT_IMPLEMENTED; } virtual void Run() { NOT_IMPLEMENTED; }
inline const string& name() const { return op_def_.name(); } inline const string& name() const { return op_def_.name(); }
inline const string& type() const { return op_def_.type(); } inline const string& type() const { return op_def_.type(); }
inline const string& phase() const { return phase_; } inline const string& phase() const { return phase_; }
...@@ -171,7 +159,7 @@ DECLARE_REGISTRY(CUDNNOperatorRegistry, OperatorBase, const OperatorDef&, Worksp ...@@ -171,7 +159,7 @@ DECLARE_REGISTRY(CUDNNOperatorRegistry, OperatorBase, const OperatorDef&, Worksp
} }
#define INIT_MULTIPLIER(ptr_tensor, size) { \ #define INIT_MULTIPLIER(ptr_tensor, size) { \
ptr_tensor = ws()->CreateTensor("_t_multiplier"); \ ptr_tensor = ws()->CreateTensor("/share/multiplier"); \
if (size > ptr_tensor->count()) { \ if (size > ptr_tensor->count()) { \
ptr_tensor->Reshape(vector<TIndex>(1, size)); \ ptr_tensor->Reshape(vector<TIndex>(1, size)); \
math::Set<T, Context>(size, dragon_cast<T, float>(1.0f), \ math::Set<T, Context>(size, dragon_cast<T, float>(1.0f), \
......
...@@ -153,6 +153,7 @@ class Tensor { ...@@ -153,6 +153,7 @@ class Tensor {
void* data_ptr; void* data_ptr;
mutable_data_ptr<Context>(&data_ptr); mutable_data_ptr<Context>(&data_ptr);
if (meta_ == meta && data_ptr) return data_ptr; if (meta_ == meta && data_ptr) return data_ptr;
if (meta_ != meta && data_ptr && !own_mem_) delete ex_memory_;
meta_ = meta; meta_ = meta;
CHECK_GT(size_, 0); CHECK_GT(size_, 0);
if (own_mem_) memory_.reset(new MixedMemory(meta, size_* meta_.itemsize())); if (own_mem_) memory_.reset(new MixedMemory(meta, size_* meta_.itemsize()));
...@@ -196,14 +197,6 @@ class Tensor { ...@@ -196,14 +197,6 @@ class Tensor {
capacity_ = other.capacity_; capacity_ = other.capacity_;
} }
inline void Replace(const Tensor& other) {
memory_ = other.memory_;
meta_ = other.meta_;
capacity_ = other.capacity_;
size_ = other.size_;
dims_ = other.dims_;
}
inline void Move(MixedMemory* mem) { inline void Move(MixedMemory* mem) {
if (mem != nullptr) ex_memory_ = mem; if (mem != nullptr) ex_memory_ = mem;
else ex_memory_ = new MixedMemory(TypeMeta::Make<float>(), 4); else ex_memory_ = new MixedMemory(TypeMeta::Make<float>(), 4);
......
...@@ -26,17 +26,18 @@ class Workspace { ...@@ -26,17 +26,18 @@ class Workspace {
typedef Map<string, unique_ptr<GraphBase> > GraphMap; typedef Map<string, unique_ptr<GraphBase> > GraphMap;
typedef Map<string, TensorFiller> FillerMap; typedef Map<string, TensorFiller> FillerMap;
typedef Map<string, string> RenameMap; typedef Map<string, string> RenameMap;
typedef Map<string, string> AvatarMap;
Workspace(const string& name) : name_(name) { init(); } Workspace(const string& name) : name_(name) { Init(); }
~Workspace(); ~Workspace();
void init() { void Init() {
CreateTensor("ignore"); CreateTensor("ignore");
CreateBuffer("Common", WORKSPACE_COMMON_BUFFER_SIZE); CreateBuffer("Common", WORKSPACE_COMMON_BUFFER_SIZE);
CreateBuffer("Grad", WORKSPACE_GRAD_BUFFER_SIZE); CreateBuffer("Grad", WORKSPACE_GRAD_BUFFER_SIZE);
} }
const string& name() { return name_; } inline const string& name() { return name_; }
/******************** Workspace ********************/ /******************** Workspace ********************/
...@@ -55,7 +56,7 @@ class Workspace { ...@@ -55,7 +56,7 @@ class Workspace {
} else { return name; } } else { return name; }
} }
inline bool HasTensor(const string& name, bool use_remote=true) { bool HasTensor(const string& name, bool use_remote=true) {
// search local workspace // search local workspace
string query = GetTensorName(name); string query = GetTensorName(name);
bool result = tensor_map_.count(query) > 0; bool result = tensor_map_.count(query) > 0;
...@@ -74,7 +75,7 @@ class Workspace { ...@@ -74,7 +75,7 @@ class Workspace {
return tensor_map_[query].get(); return tensor_map_[query].get();
} }
inline Tensor* GetTensor(const string& name, bool use_remote=true) { Tensor* GetTensor(const string& name, bool use_remote=true) {
string query = GetTensorName(name); string query = GetTensorName(name);
// search local workspace // search local workspace
if (tensor_map_.count(query) > 0) if (tensor_map_.count(query) > 0)
...@@ -113,7 +114,7 @@ class Workspace { ...@@ -113,7 +114,7 @@ class Workspace {
tensor_map_[query]->Reset(); tensor_map_[query]->Reset();
} }
inline vector<string> GetTensors() { vector<string> GetTensors() {
vector<string> names; vector<string> names;
// search local workspace // search local workspace
for (auto& it : tensor_map_) for (auto& it : tensor_map_)
...@@ -140,13 +141,28 @@ class Workspace { ...@@ -140,13 +141,28 @@ class Workspace {
else return nullptr; else return nullptr;
} }
/******************** Avatar ********************/
inline void CreateAvatar(Tensor* orig, Tensor* avatar) {
CHECK(tensor_map_.count(orig->name()) > 0)
<< "\nFailed to create avatar for Tensor(" << orig->name() << ")."
<< "\nAs it has not been registered in the current workspace.";
avatar_map_[orig->name()] = avatar->name();
}
inline Tensor* SearchAvatar(Tensor* orig) {
if (avatar_map_.count(orig->name()) > 0)
return tensor_map_[avatar_map_[orig->name()]].get();
return orig;
}
/******************** Buffer ********************/ /******************** Buffer ********************/
inline void CreateBuffer(string category, int num) { void CreateBuffer(string category, int num) {
CHECK(!buffer_map_.count(category)); CHECK(!buffer_map_.count(category));
buffer_map_[category] = stack<string>(); buffer_map_[category] = stack<string>();
for (int i = 1; i <= num; i++) { for (int i = 1; i <= num; i++) {
string name = "_t_" + category + "_buffer_" + dragon_cast<string, int>(i); string name = "/share/buffer/" + category + "_" + dragon_cast<string, int>(i);
buffer_map_[category].push(name); buffer_map_[category].push(name);
CreateTensor(name); CreateTensor(name);
} }
...@@ -163,17 +179,18 @@ class Workspace { ...@@ -163,17 +179,18 @@ class Workspace {
return nullptr; return nullptr;
} }
inline void ReleaseBuffer(Tensor* tensor, void ReleaseBuffer(Tensor* tensor,
string category = "Common", string category = "Common",
bool enforce = false) { bool enforce = false) {
static Map<string, int> limits = { static Map<string, int> limits = {
{ "Common", WORKSPACE_COMMON_BUFFER_SIZE }, { "Common", WORKSPACE_COMMON_BUFFER_SIZE },
{ "Grad", WORKSPACE_GRAD_BUFFER_SIZE }}; { "Grad", WORKSPACE_GRAD_BUFFER_SIZE }
};
if (buffer_map_[category].size() >= limits[category] || enforce) { if (buffer_map_[category].size() >= limits[category] || enforce) {
// release directly
ReleaseTensor(tensor->name()); ReleaseTensor(tensor->name());
if (buffer_map_[category].empty())
buffer_map_[category].push(tensor->name());
} else { } else {
// recover as a available buffer
buffer_map_[category].push(tensor->name()); buffer_map_[category].push(tensor->name());
} }
} }
...@@ -182,7 +199,7 @@ class Workspace { ...@@ -182,7 +199,7 @@ class Workspace {
GraphBase* CreateGraph(const GraphDef& meta_graph); GraphBase* CreateGraph(const GraphDef& meta_graph);
inline bool RunGraph(const string& graph_name, bool RunGraph(const string& graph_name,
const string& include, const string& include,
const string& exclude) { const string& exclude) {
if (!graph_map_.count(graph_name)) { if (!graph_map_.count(graph_name)) {
...@@ -192,7 +209,7 @@ class Workspace { ...@@ -192,7 +209,7 @@ class Workspace {
return graph_map_[graph_name]->Run(include, exclude); return graph_map_[graph_name]->Run(include, exclude);
} }
inline vector<string> GetGraphs() { vector<string> GetGraphs() {
vector<string> names; vector<string> names;
for (auto& it : graph_map_) names.push_back(it.first); for (auto& it : graph_map_) names.push_back(it.first);
return names; return names;
...@@ -214,6 +231,7 @@ class Workspace { ...@@ -214,6 +231,7 @@ class Workspace {
GraphMap graph_map_; GraphMap graph_map_;
FillerMap filler_map_; FillerMap filler_map_;
RenameMap rename_map_; RenameMap rename_map_;
AvatarMap avatar_map_;
}; };
} // namespace dragon } // namespace dragon
......
...@@ -47,7 +47,6 @@ class DropoutGradientOp final : public Operator<Context> { ...@@ -47,7 +47,6 @@ class DropoutGradientOp final : public Operator<Context> {
} }
void RunOnDevice() override; void RunOnDevice() override;
void CleanResource() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
protected: protected:
......
...@@ -32,7 +32,9 @@ class BiasAddGradientOp final : public Operator<Context> { ...@@ -32,7 +32,9 @@ class BiasAddGradientOp final : public Operator<Context> {
public: public:
BiasAddGradientOp(const OperatorDef& op_def, Workspace* ws) BiasAddGradientOp(const OperatorDef& op_def, Workspace* ws)
: Operator<Context>(op_def, ws), : Operator<Context>(op_def, ws),
data_format(OperatorBase::GetSingleArg<string>("data_format", "NCHW")) {} data_format(OperatorBase::GetSingleArg<string>("data_format", "NCHW")) {
DISABLE_SHARE_GRADIENT;
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
...@@ -45,4 +47,4 @@ class BiasAddGradientOp final : public Operator<Context> { ...@@ -45,4 +47,4 @@ class BiasAddGradientOp final : public Operator<Context> {
} // namespace dragon } // namespace dragon
#endif // DRAGON_OPERATORS_ARITHMETIC_BIAS_OP_H_ #endif // DRAGON_OPERATORS_ARITHMETIC_BIAS_ADD_OP_H_
\ No newline at end of file \ No newline at end of file
...@@ -17,13 +17,14 @@ class ScaleOp : public Operator<Context> { ...@@ -17,13 +17,14 @@ class ScaleOp : public Operator<Context> {
ScaleOp(const OperatorDef& op_def, Workspace* ws) ScaleOp(const OperatorDef& op_def, Workspace* ws)
: Operator<Context>(op_def, ws), : Operator<Context>(op_def, ws),
axis(OperatorBase::GetSingleArg<int>("axis", 1)), axis(OperatorBase::GetSingleArg<int>("axis", 1)),
num_axes(OperatorBase::GetSingleArg<int>("num_axes", -1)) {} num_axes(OperatorBase::GetSingleArg<int>("num_axes", 1)) {}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
protected: protected:
TIndex axis, num_axes, inner_dim; TIndex axis, start_axis, num_axes;
TIndex inner_dim;
Tensor* bias_multiplier; Tensor* bias_multiplier;
}; };
...@@ -41,7 +42,7 @@ class ScaleGradientOp final : public Operator<Context> { ...@@ -41,7 +42,7 @@ class ScaleGradientOp final : public Operator<Context> {
template <typename T> void RunWithType(); template <typename T> void RunWithType();
protected: protected:
TIndex axis, num_axes; TIndex axis, start_axis, num_axes;
TIndex outer_dim, inner_dim, scale_dim, sum_dim, dim; TIndex outer_dim, inner_dim, scale_dim, sum_dim, dim;
Tensor* bias_multiplier, *sum_multiplier; Tensor* bias_multiplier, *sum_multiplier;
Tensor sum_result; Tensor sum_result;
......
...@@ -16,7 +16,8 @@ class SmoothL1LossOp final : public Operator<Context> { ...@@ -16,7 +16,8 @@ class SmoothL1LossOp final : public Operator<Context> {
public: public:
SmoothL1LossOp(const OperatorDef& op_def, Workspace* ws) SmoothL1LossOp(const OperatorDef& op_def, Workspace* ws)
: Operator<Context>(op_def, ws), : Operator<Context>(op_def, ws),
sigma2(OperatorBase::GetSingleArg<float>("sigma", 1.0)) { sigma2(OperatorBase::GetSingleArg<float>("sigma", 1.0)),
normalization(OperatorBase::GetSingleArg<string>("normalization", "BATCH_SIZE")) {
sigma2 *= sigma2; sigma2 *= sigma2;
} }
...@@ -26,6 +27,7 @@ class SmoothL1LossOp final : public Operator<Context> { ...@@ -26,6 +27,7 @@ class SmoothL1LossOp final : public Operator<Context> {
protected: protected:
float sigma2; float sigma2;
Tensor* diff, *error; Tensor* diff, *error;
string normalization;
}; };
template <class Context> template <class Context>
...@@ -33,7 +35,8 @@ class SmoothL1LossGradientOp final : public Operator<Context> { ...@@ -33,7 +35,8 @@ class SmoothL1LossGradientOp final : public Operator<Context> {
public: public:
SmoothL1LossGradientOp(const OperatorDef& op_def, Workspace* ws) SmoothL1LossGradientOp(const OperatorDef& op_def, Workspace* ws)
: Operator<Context>(op_def, ws), : Operator<Context>(op_def, ws),
sigma2(OperatorBase::GetSingleArg<float>("sigma", 1.0)) { sigma2(OperatorBase::GetSingleArg<float>("sigma", 1.0)),
normalization(OperatorBase::GetSingleArg<string>("normalization", "BATCH_SIZE")) {
sigma2 *= sigma2; sigma2 *= sigma2;
} }
...@@ -43,6 +46,7 @@ class SmoothL1LossGradientOp final : public Operator<Context> { ...@@ -43,6 +46,7 @@ class SmoothL1LossGradientOp final : public Operator<Context> {
protected: protected:
float sigma2; float sigma2;
Tensor* diff; Tensor* diff;
string normalization;
}; };
} // namespace dragon } // namespace dragon
......
...@@ -20,7 +20,7 @@ class SoftmaxCrossEntropyOp final : public Operator<Context> { ...@@ -20,7 +20,7 @@ class SoftmaxCrossEntropyOp final : public Operator<Context> {
normalization(OperatorBase::GetSingleArg<string>("normalization", "FULL")) { normalization(OperatorBase::GetSingleArg<string>("normalization", "FULL")) {
OperatorDef softmax_def = MakeOperatorDef("Softmax", "", OperatorDef softmax_def = MakeOperatorDef("Softmax", "",
vector<string>({ input(0).name() }), vector<string>({ input(0).name() }),
vector<string>({ "_t_" + anchor() + "_softmax_prob" })); vector<string>({ "/mnt/" + anchor() + "/softmax_prob" }));
softmax_def.add_arg()->CopyFrom(this->arg("axis")); softmax_def.add_arg()->CopyFrom(this->arg("axis"));
if (op_def.has_device_option()) if (op_def.has_device_option())
softmax_def.mutable_device_option()->CopyFrom(op_def.device_option()); softmax_def.mutable_device_option()->CopyFrom(op_def.device_option());
......
...@@ -26,7 +26,7 @@ class SparseSoftmaxCrossEntropyOp : public Operator<Context> { ...@@ -26,7 +26,7 @@ class SparseSoftmaxCrossEntropyOp : public Operator<Context> {
} }
OperatorDef softmax_def = MakeOperatorDef("Softmax", "", OperatorDef softmax_def = MakeOperatorDef("Softmax", "",
vector<string>({ input(0).name() }), vector<string>({ input(0).name() }),
vector<string>({ "_t_" + anchor() + "_softmax_prob" })); vector<string>({ "/mnt/" + anchor() + "/softmax_prob" }));
softmax_def.add_arg()->CopyFrom(this->arg("axis")); softmax_def.add_arg()->CopyFrom(this->arg("axis"));
if (op_def.has_device_option()) if (op_def.has_device_option())
softmax_def.mutable_device_option()->CopyFrom(op_def.device_option()); softmax_def.mutable_device_option()->CopyFrom(op_def.device_option());
......
...@@ -19,7 +19,7 @@ class SparseSoftmaxFocalLossOp final : public SparseSoftmaxCrossEntropyOp<Contex ...@@ -19,7 +19,7 @@ class SparseSoftmaxFocalLossOp final : public SparseSoftmaxCrossEntropyOp<Contex
axis(OperatorBase::GetSingleArg<int>("axis", 1)), axis(OperatorBase::GetSingleArg<int>("axis", 1)),
normalization(OperatorBase::GetSingleArg<string>("normalization", "VALID")), normalization(OperatorBase::GetSingleArg<string>("normalization", "VALID")),
alpha(OperatorBase::GetSingleArg<float>("alpha", 0.5)), alpha(OperatorBase::GetSingleArg<float>("alpha", 0.5)),
gamma(OperatorBase::GetSingleArg<float>("gamma", 2.0)), gamma(OperatorBase::GetSingleArg<float>("gamma", 0.0)),
neg_id(OperatorBase::GetSingleArg<int>("neg_id", -1)) { neg_id(OperatorBase::GetSingleArg<int>("neg_id", -1)) {
pos_alpha = alpha * 2.0; pos_alpha = alpha * 2.0;
neg_alpha = (1 - alpha) * 2.0; neg_alpha = (1 - alpha) * 2.0;
...@@ -44,7 +44,7 @@ class SparseSoftmaxFocalLossGradientOp final : public SparseSoftmaxCrossEntropyG ...@@ -44,7 +44,7 @@ class SparseSoftmaxFocalLossGradientOp final : public SparseSoftmaxCrossEntropyG
: SparseSoftmaxCrossEntropyGradientOp<Context>(op_def, ws), : SparseSoftmaxCrossEntropyGradientOp<Context>(op_def, ws),
axis(OperatorBase::GetSingleArg<int>("axis", 1)), axis(OperatorBase::GetSingleArg<int>("axis", 1)),
normalization(OperatorBase::GetSingleArg<string>("normalization", "VALID")), normalization(OperatorBase::GetSingleArg<string>("normalization", "VALID")),
gamma(OperatorBase::GetSingleArg<float>("gamma", 2.0)), gamma(OperatorBase::GetSingleArg<float>("gamma", 0.0)),
eps(OperatorBase::GetSingleArg<float>("eps", float(1e-10))), eps(OperatorBase::GetSingleArg<float>("eps", float(1e-10))),
neg_id(OperatorBase::GetSingleArg<int>("neg_id", -1)) {} neg_id(OperatorBase::GetSingleArg<int>("neg_id", -1)) {}
......
...@@ -28,9 +28,8 @@ template <class Context> ...@@ -28,9 +28,8 @@ template <class Context>
class ExpandDimsGradientOp final : public Operator<Context> { class ExpandDimsGradientOp final : public Operator<Context> {
public: public:
ExpandDimsGradientOp(const OperatorDef& op_def, Workspace* ws) ExpandDimsGradientOp(const OperatorDef& op_def, Workspace* ws)
: Operator<Context>(op_def, ws) { : Operator<Context>(op_def, ws) {}
DISABLE_SHARE_GRADIENT;
}
void RunOnDevice() override; void RunOnDevice() override;
}; };
......
...@@ -32,9 +32,8 @@ template <class Context> ...@@ -32,9 +32,8 @@ template <class Context>
class FlattenGradientOp final : public Operator<Context> { class FlattenGradientOp final : public Operator<Context> {
public: public:
FlattenGradientOp(const OperatorDef& op_def, Workspace* ws) FlattenGradientOp(const OperatorDef& op_def, Workspace* ws)
: Operator<Context>(op_def, ws) { : Operator<Context>(op_def, ws) {}
DISABLE_SHARE_GRADIENT;
}
void RunOnDevice() override; void RunOnDevice() override;
}; };
......
...@@ -31,9 +31,8 @@ template <class Context> ...@@ -31,9 +31,8 @@ template <class Context>
class ReshapeGradientOp final : public Operator<Context> { class ReshapeGradientOp final : public Operator<Context> {
public: public:
ReshapeGradientOp(const OperatorDef& op_def, Workspace* ws) ReshapeGradientOp(const OperatorDef& op_def, Workspace* ws)
: Operator<Context>(op_def, ws) { : Operator<Context>(op_def, ws) {}
DISABLE_SHARE_GRADIENT;
}
void RunOnDevice() override; void RunOnDevice() override;
}; };
......
...@@ -16,21 +16,31 @@ class BatchNormOp : public Operator<Context> { ...@@ -16,21 +16,31 @@ class BatchNormOp : public Operator<Context> {
public: public:
BatchNormOp(const OperatorDef& op_def, Workspace* ws) BatchNormOp(const OperatorDef& op_def, Workspace* ws)
: Operator<Context>(op_def, ws), : Operator<Context>(op_def, ws),
axis(OperatorBase::GetSingleArg<int>("axis", -1)),
momentum(OperatorBase::GetSingleArg<float>("momentum", float(0.9))), momentum(OperatorBase::GetSingleArg<float>("momentum", float(0.9))),
eps(OperatorBase::GetSingleArg<float>("eps", float(1e-3))), eps(OperatorBase::GetSingleArg<float>("eps", float(1e-3))),
use_stats(OperatorBase::GetSingleArg<int>("use_stats", -1)), use_stats(OperatorBase::GetSingleArg<int>("use_stats", -1)),
inplace(OperatorBase::GetSingleArg<bool>("inplace", false)) {} mode(OperatorBase::GetSingleArg<string>("mode", "DEFAULT")) {
if (axis != -1)
CHECK_EQ(axis, 1)
<< "\nThe axis can only be set to 1.";
}
void Setup();
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void TrainingRunWithType();
template <typename T> void InferenceRunWithType();
protected: protected:
float momentum, eps; float momentum, eps;
Tensor mean, num_by_chans; Tensor mean, num_by_chans;
Tensor* num_multiplier, *spatial_multiplier, *stddev, *var; Tensor* multiplier, *num_multiplier, *spatial_multiplier;
TIndex num, channels, spatial_dim, nbychans; Tensor* stddev, *var;
TIndex axis, N, C, S, NC, NS;
string data_format, mode;
int use_stats; int use_stats;
bool use_global_stats, inplace, is_recomputing; bool use_global_stats, is_recomputing;
}; };
template <class Context> template <class Context>
...@@ -38,51 +48,72 @@ class BatchNormGradientOp final : public Operator<Context> { ...@@ -38,51 +48,72 @@ class BatchNormGradientOp final : public Operator<Context> {
public: public:
BatchNormGradientOp(const OperatorDef& op_def, Workspace *ws) BatchNormGradientOp(const OperatorDef& op_def, Workspace *ws)
: Operator<Context>(op_def, ws), : Operator<Context>(op_def, ws),
use_stats(OperatorBase::GetSingleArg<int>("use_stats", -1)) {} axis(OperatorBase::GetSingleArg<int>("axis", -1)),
use_stats(OperatorBase::GetSingleArg<int>("use_stats", -1)) {
if (axis != -1)
CHECK_EQ(axis, 1)
<< "\nThe axis can only be set to 1.";
}
void Setup();
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void TrainingRunWithType();
template <typename T> void InferenceRunWithType();
protected: protected:
Tensor num_by_chans; Tensor num_by_chans;
Tensor* num_multiplier, *spatial_multiplier, *stddev, *var; Tensor* multiplier, *num_multiplier, *spatial_multiplier;
TIndex num, channels, spatial_dim, nbychans; Tensor* stddev, *var;
TIndex axis, N, C, S, NC, NS;
string data_format;
int use_stats; int use_stats;
bool use_global_stats; bool use_global_stats;
}; };
template <class Context> template <class Context>
class BNOp : public Operator<Context> { class FusedBatchNormOp : public Operator<Context> {
public: public:
BNOp(const OperatorDef& op_def, Workspace* ws) FusedBatchNormOp(const OperatorDef& op_def, Workspace* ws)
: Operator<Context>(op_def, ws), : Operator<Context>(op_def, ws),
axis(OperatorBase::GetSingleArg<int>("axis", -1)),
momentum(OperatorBase::GetSingleArg<float>("momentum", float(0.9))), momentum(OperatorBase::GetSingleArg<float>("momentum", float(0.9))),
eps(OperatorBase::GetSingleArg<float>("eps", float(1e-3))), eps(OperatorBase::GetSingleArg<float>("eps", float(1e-3))),
use_stats(OperatorBase::GetSingleArg<int>("use_stats", -1)) { } use_stats(OperatorBase::GetSingleArg<int>("use_stats", -1)) {}
void Setup() { NOT_IMPLEMENTED; }
void RunOnDevice() override { NOT_IMPLEMENTED; } void RunOnDevice() override { NOT_IMPLEMENTED; }
template <typename T> void RunWithType() { NOT_IMPLEMENTED; } template <typename T> void RunWithType() { NOT_IMPLEMENTED; }
protected: protected:
float momentum, eps; float momentum, eps;
TIndex axis, N, C, S, NC, NS;
string data_format;
int use_stats; int use_stats;
bool use_global_stats, is_recomputing; bool use_global_stats, is_recomputing;
}; };
template <class Context> template <class Context>
class BNGradientOp : public Operator<Context> { class FusedBatchNormGradientOp : public Operator<Context> {
public: public:
BNGradientOp(const OperatorDef& op_def, Workspace* ws) FusedBatchNormGradientOp(const OperatorDef& op_def, Workspace* ws)
: Operator<Context>(op_def, ws), : Operator<Context>(op_def, ws),
axis(OperatorBase::GetSingleArg<int>("axis", -1)),
eps(OperatorBase::GetSingleArg<float>("eps", float(1e-3))), eps(OperatorBase::GetSingleArg<float>("eps", float(1e-3))),
use_stats(OperatorBase::GetSingleArg<int>("use_stats", -1)) { } use_stats(OperatorBase::GetSingleArg<int>("use_stats", -1)) { }
void Setup() { NOT_IMPLEMENTED; }
void ShareGradient() override; void ShareGradient() override;
void RunOnDevice() override { NOT_IMPLEMENTED; } void RunOnDevice() override { NOT_IMPLEMENTED; }
template <typename T> void RunWithType() { NOT_IMPLEMENTED; } template <typename T> void RunWithType() { NOT_IMPLEMENTED; }
protected: protected:
float eps; float eps;
TIndex axis, N, C, S, NC, NS;
string data_format;
int use_stats; int use_stats;
bool use_global_stats; bool use_global_stats;
}; };
...@@ -94,49 +125,54 @@ class BNGradientOp : public Operator<Context> { ...@@ -94,49 +125,54 @@ class BNGradientOp : public Operator<Context> {
#include "utils/cudnn_device.h" #include "utils/cudnn_device.h"
template <class Context> template <class Context>
class CuDNNBNOp final : public BNOp<Context> { class CuDNNBatchNormOp final : public FusedBatchNormOp<Context> {
public: public:
CuDNNBNOp(const OperatorDef& op_def, Workspace* ws) CuDNNBatchNormOp(const OperatorDef& op_def, Workspace* ws)
: BNOp<Context>(op_def, ws) { : FusedBatchNormOp<Context>(op_def, ws) {
CUDNN_CHECK(cudnnCreateTensorDescriptor(&input_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&input_desc));
CUDNN_CHECK(cudnnCreateTensorDescriptor(&output_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&output_desc));
CUDNN_CHECK(cudnnCreateTensorDescriptor(&bn_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&bn_desc));
this->eps = std::max(this->eps, float(CUDNN_BN_MIN_EPSILON)); this->eps = std::max(this->eps, float(CUDNN_BN_MIN_EPSILON));
} }
void Setup();
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void SpatialRunWithType(); template <typename T> void RunWithType();
template <typename T> void PerActivationRunWithType();
protected: protected:
cudnnTensorDescriptor_t input_desc, output_desc, bn_desc; cudnnTensorDescriptor_t input_desc, output_desc, bn_desc;
TIndex num, channels, spatial_dim; cudnnBatchNormMode_t bn_mode;
TIndex N, C;
string data_format;
Tensor* mean, *var; Tensor* mean, *var;
bool use_global_stats, is_recomputing;
}; };
template <class Context> template <class Context>
class CuDNNBNGradientOp final : public BNGradientOp<Context> { class CuDNNBatchNormGradientOp final : public FusedBatchNormGradientOp<Context> {
public: public:
CuDNNBNGradientOp(const OperatorDef& op_def, Workspace* ws) CuDNNBatchNormGradientOp(const OperatorDef& op_def, Workspace* ws)
: BNGradientOp<Context>(op_def, ws) { : FusedBatchNormGradientOp<Context>(op_def, ws) {
CUDNN_CHECK(cudnnCreateTensorDescriptor(&input_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&input_desc));
CUDNN_CHECK(cudnnCreateTensorDescriptor(&output_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&output_desc));
CUDNN_CHECK(cudnnCreateTensorDescriptor(&bn_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&bn_desc));
this->eps = std::max(this->eps, float(CUDNN_BN_MIN_EPSILON)); this->eps = std::max(this->eps, float(CUDNN_BN_MIN_EPSILON));
} }
void Setup();
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void SpatialRunWithType(); template <typename T> void TrainingRunWithType();
template <typename T> void PerActivationRunWithType(); template <typename T> void InferenceRunWithType();
protected: protected:
cudnnTensorDescriptor_t input_desc, output_desc, bn_desc; cudnnTensorDescriptor_t input_desc, output_desc, bn_desc;
cudnnBatchNormMode_t bn_mode;
TIndex N, C, S, NC, NS;
string data_format;
Tensor num_by_chans; Tensor num_by_chans;
Tensor* num_multiplier, *spatial_multiplier; Tensor* multiplier, *num_multiplier, *spatial_multiplier;
Tensor* mean, *var, *stddev; Tensor* mean, *var, *stddev;
TIndex num, channels, spatial_dim, nbychans;
bool use_global_stats;
}; };
#endif #endif
......
...@@ -16,27 +16,36 @@ class BatchRenormOp : public Operator<Context> { ...@@ -16,27 +16,36 @@ class BatchRenormOp : public Operator<Context> {
public: public:
BatchRenormOp(const OperatorDef& op_def, Workspace* ws) BatchRenormOp(const OperatorDef& op_def, Workspace* ws)
: Operator<Context>(op_def, ws), : Operator<Context>(op_def, ws),
axis(OperatorBase::GetSingleArg<int>("axis", -1)),
momentum(OperatorBase::GetSingleArg<float>("momentum", float(0.9))), momentum(OperatorBase::GetSingleArg<float>("momentum", float(0.9))),
eps(OperatorBase::GetSingleArg<float>("eps", float(1e-3))), eps(OperatorBase::GetSingleArg<float>("eps", float(1e-3))),
r_max(OperatorBase::GetSingleArg<float>("r_max", float(3.0))), r_max(OperatorBase::GetSingleArg<float>("r_max", float(3.0))),
d_max(OperatorBase::GetSingleArg<float>("d_max", float(5.0))), d_max(OperatorBase::GetSingleArg<float>("d_max", float(5.0))),
t_delta(OperatorBase::GetSingleArg<float>("t_delta", float(1.0))), t_delta(OperatorBase::GetSingleArg<float>("t_delta", float(1.0))),
use_stats(OperatorBase::GetSingleArg<int>("use_stats", -1)), use_stats(OperatorBase::GetSingleArg<int>("use_stats", -1)),
inplace(OperatorBase::GetSingleArg<bool>("inplace", false)), t_r_max(float(1.0)), t_d_max(float(0.0)), t_val(float(0.0)),
t_r_max(float(1.0)), t_d_max(float(0.0)), t_val(float(0.0)) {} mode(OperatorBase::GetSingleArg<string>("mode", "DEFAULT")) {
if (axis != -1)
CHECK_EQ(axis, 1)
<< "\nThe axis can only be set to 1.";
}
void Setup();
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void TrainingRunWithType();
template <typename T> void InferenceRunWithType();
protected: protected:
float momentum, eps, r_max, d_max, t_delta; float momentum, eps, r_max, d_max, t_delta;
float t_r_max, t_d_max, t_val; float t_r_max, t_d_max, t_val;
Tensor mean, d, t_h_mean, t_h_var, num_by_chans; Tensor mean, d, t_h_mean, t_h_var, num_by_chans;
Tensor* num_multiplier, *spatial_multiplier; Tensor* multiplier, *num_multiplier, *spatial_multiplier;
Tensor* stddev, *r, *var, *x_norm; Tensor* stddev, *r, *var, *x_norm;
TIndex num, channels, spatial_dim, nbychans; TIndex axis, N, C, S, NC, NS;
string data_format, mode;
int use_stats; int use_stats;
bool use_global_stats, inplace, is_recomputing; bool use_global_stats, is_recomputing;
}; };
template <class Context> template <class Context>
...@@ -44,16 +53,27 @@ class BatchRenormGradientOp final : public Operator<Context> { ...@@ -44,16 +53,27 @@ class BatchRenormGradientOp final : public Operator<Context> {
public: public:
BatchRenormGradientOp(const OperatorDef& op_def, Workspace *ws) BatchRenormGradientOp(const OperatorDef& op_def, Workspace *ws)
: Operator<Context>(op_def, ws), : Operator<Context>(op_def, ws),
use_stats(OperatorBase::GetSingleArg<int>("use_stats", -1)) {} axis(OperatorBase::GetSingleArg<int>("axis", -1)),
use_stats(OperatorBase::GetSingleArg<int>("use_stats", -1)) {
if (axis != -1)
CHECK_EQ(axis, 1)
<< "\nThe axis can only be set to 1.";
}
void Setup();
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void TrainingRunWithType();
template <typename T> void InferenceRunWithType();
template <typename T> void RunWithType(); template <typename T> void RunWithType();
protected: protected:
Tensor mean, num_by_chans; Tensor mean, num_by_chans;
Tensor* num_multiplier, *spatial_multiplier; Tensor* multiplier, *num_multiplier, *spatial_multiplier;
Tensor* stddev, *r, *var, *x_norm; Tensor* stddev, *r, *var, *x_norm;
TIndex num, channels, spatial_dim, nbychans; TIndex axis, N, C, S, NC, NS;
string data_format;
int use_stats; int use_stats;
bool use_global_stats; bool use_global_stats;
}; };
......
...@@ -16,8 +16,14 @@ class InstanceNormOp : public Operator<Context> { ...@@ -16,8 +16,14 @@ class InstanceNormOp : public Operator<Context> {
public: public:
InstanceNormOp(const OperatorDef& op_def, Workspace* ws) InstanceNormOp(const OperatorDef& op_def, Workspace* ws)
: Operator<Context>(op_def, ws), : Operator<Context>(op_def, ws),
eps(OperatorBase::GetSingleArg<float>("eps", float(1e-3))), axis(OperatorBase::GetSingleArg<int>("axis", -1)),
inplace(OperatorBase::GetSingleArg<bool>("inplace", false)) {} eps(OperatorBase::GetSingleArg<float>("eps", float(1e-3))) {
if (axis != -1)
CHECK_EQ(axis, 1)
<< "\nThe axis can only be set to 1.";
}
void Setup();
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
...@@ -26,22 +32,30 @@ class InstanceNormOp : public Operator<Context> { ...@@ -26,22 +32,30 @@ class InstanceNormOp : public Operator<Context> {
float eps; float eps;
Tensor mean; Tensor mean;
Tensor* spatial_multiplier, *stddev, *var; Tensor* spatial_multiplier, *stddev, *var;
TIndex num, channels, spatial_dim, nbychans; TIndex axis, N, C, S, NC, CS;
bool inplace; string data_format;
}; };
template <class Context> template <class Context>
class InstanceNormGradientOp final : public Operator<Context> { class InstanceNormGradientOp final : public Operator<Context> {
public: public:
InstanceNormGradientOp(const OperatorDef& op_def, Workspace *ws) InstanceNormGradientOp(const OperatorDef& op_def, Workspace *ws)
: Operator<Context>(op_def, ws) {} : Operator<Context>(op_def, ws),
axis(OperatorBase::GetSingleArg<int>("axis", -1)) {
if (axis != -1)
CHECK_EQ(axis, 1)
<< "\nThe axis can only be set to 1.";
}
void Setup();
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
protected: protected:
Tensor* spatial_multiplier, *stddev, *var; Tensor* spatial_multiplier, *stddev, *var;
TIndex num, channels, spatial_dim, nbychans; TIndex axis, N, C, S, NC, CS;
string data_format;
}; };
} // namespace dragon } // namespace dragon
......
...@@ -45,7 +45,6 @@ class ROIAlignGradientOp : public Operator<Context> { ...@@ -45,7 +45,6 @@ class ROIAlignGradientOp : public Operator<Context> {
} }
void RunOnDevice() override; void RunOnDevice() override;
void CleanResource() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
protected: protected:
......
...@@ -42,7 +42,6 @@ class ROIPoolingGradientOp final : public Operator<Context> { ...@@ -42,7 +42,6 @@ class ROIPoolingGradientOp final : public Operator<Context> {
spatial_scale(OperatorBase::GetSingleArg<float>("spatial_scale", 1.0)) {} spatial_scale(OperatorBase::GetSingleArg<float>("spatial_scale", 1.0)) {}
void RunOnDevice() override; void RunOnDevice() override;
void CleanResource() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
protected: protected:
......
...@@ -109,7 +109,7 @@ class DeviceGuard { ...@@ -109,7 +109,7 @@ class DeviceGuard {
#else #else
#define CUDA_NOT_COMPILED \ #define CUDA_NOT_COMPILED \
LOG(FATAL) << "CUDA was not compiled."; LOG(FATAL) << "CUDA was not compiled."
#endif // WITH_CUDA #endif // WITH_CUDA
......
...@@ -13,8 +13,8 @@ _ENGINE_SCOPE = '' ...@@ -13,8 +13,8 @@ _ENGINE_SCOPE = ''
SEPARATOR = '/' SEPARATOR = '/'
_CURRENT_OP_IDX = 0 _CURRENT_OP_UID = 0
_SCOPE_TENSOR_IDX = defaultdict(int) _CURRENT_TENSOR_UID = 0
__all__ = [ __all__ = [
'GetTensorIdx', 'GetTensorIdx',
...@@ -35,9 +35,9 @@ def GetOperatorIdx(): ...@@ -35,9 +35,9 @@ def GetOperatorIdx():
The operator index. The operator index.
""" """
global _CURRENT_OP_IDX global _CURRENT_OP_UID
_CURRENT_OP_IDX = _CURRENT_OP_IDX + 1 _CURRENT_OP_UID += 1
return _CURRENT_OP_IDX - 1 return _CURRENT_OP_UID - 1
def GetTensorIdx(): def GetTensorIdx():
...@@ -49,9 +49,9 @@ def GetTensorIdx(): ...@@ -49,9 +49,9 @@ def GetTensorIdx():
The tensor index. The tensor index.
""" """
global _SCOPE_TENSOR_IDX global _CURRENT_TENSOR_UID
_SCOPE_TENSOR_IDX[_TENSOR_SCOPE] += 1 _CURRENT_TENSOR_UID += 1
return _SCOPE_TENSOR_IDX[_TENSOR_SCOPE] - 1 return _CURRENT_TENSOR_UID - 1
def GetOperatorName(name=None): def GetOperatorName(name=None):
...@@ -104,7 +104,11 @@ class TensorScope(object): ...@@ -104,7 +104,11 @@ class TensorScope(object):
def __init__(self, prefix): def __init__(self, prefix):
assert isinstance(prefix, type('str')), \ assert isinstance(prefix, type('str')), \
"TensorScope takes in a string as its argument." "TensorScope takes in a string as its argument."
if prefix != '':
self.prefix = prefix + SEPARATOR self.prefix = prefix + SEPARATOR
else:
# avoid duplicated separators
self.prefix = ''
def __enter__(self): def __enter__(self):
global _TENSOR_SCOPE global _TENSOR_SCOPE
...@@ -114,9 +118,15 @@ class TensorScope(object): ...@@ -114,9 +118,15 @@ class TensorScope(object):
def __exit__(self, type, value, traceback): def __exit__(self, type, value, traceback):
global _TENSOR_SCOPE global _TENSOR_SCOPE
assert _TENSOR_SCOPE.endswith(self.prefix) assert _TENSOR_SCOPE.endswith(self.prefix)
if self.prefix != '':
_TENSOR_SCOPE = _TENSOR_SCOPE[:-len(self.prefix)] _TENSOR_SCOPE = _TENSOR_SCOPE[:-len(self.prefix)]
def get_tensor_scope():
global _TENSOR_SCOPE
return _TENSOR_SCOPE
def set_tensor_scope(name_scope): def set_tensor_scope(name_scope):
global _TENSOR_SCOPE global _TENSOR_SCOPE
_TENSOR_SCOPE = name_scope _TENSOR_SCOPE = name_scope
......
...@@ -12,6 +12,7 @@ from dragon.core.utils import MakeOperatorDef ...@@ -12,6 +12,7 @@ from dragon.core.utils import MakeOperatorDef
from dragon.core.scope import GetOperatorName, GetTensorName from dragon.core.scope import GetOperatorName, GetTensorName
from six.moves import range as xrange from six.moves import range as xrange
class Tensor(object): class Tensor(object):
""" """
Tensor is generally used to represent a n-dim array, Tensor is generally used to represent a n-dim array,
...@@ -228,8 +229,11 @@ class Tensor(object): ...@@ -228,8 +229,11 @@ class Tensor(object):
@name.setter @name.setter
def name(self, value): def name(self, value):
from .scope import _TENSOR_SCOPE from .scope import _TENSOR_SCOPE
if value is None: self._name = _TENSOR_SCOPE + GetTensorName() if value is None:
else: self._name = _TENSOR_SCOPE + value # ignore the scope for the name generated by uid
self._name = GetTensorName()
else:
self._name = _TENSOR_SCOPE + value
@property @property
def grad_wrts(self): def grad_wrts(self):
...@@ -837,27 +841,7 @@ class Tensor(object): ...@@ -837,27 +841,7 @@ class Tensor(object):
>>> [1, 2, 3, 4] >>> [1, 2, 3, 4]
""" """
raise NotImplementedError('Implemented in <vm.tensorflow.framework.tensor_shape>')
class TensorShape(object):
class Dimension(object):
def __init__(self, dim):
self.dim = dim
def __str__(self):
return 'Dimension({})'.format(self.dim)
def __init__(self, shape):
self.dims = [self.Dimension(dim) for dim in shape]
self.shape = shape
def __str__(self):
dims = [str(dim) for dim in self.dims]
return 'TensorShape([{}])'.format(', '.join(dims))
def as_list(self):
return self.shape
return TensorShape(self.shape) if self.shape is not None else None
############################################ ############################################
# # # #
......
...@@ -20,7 +20,9 @@ ...@@ -20,7 +20,9 @@
\sigma_{B}^{2} = \frac{1}{m} \sum_{i=1}^{m}(x_{i} - \mu_{B})^{2} \\ \sigma_{B}^{2} = \frac{1}{m} \sum_{i=1}^{m}(x_{i} - \mu_{B})^{2} \\
\hat{x}_{i} = \frac{x_{i} - \mu_{B}}{\sqrt{\sigma_{B}^{2} + \epsilon}} \cdot r + d \\ \, \hat{x}_{i} = \frac{x_{i} - \mu_{B}}{\sqrt{\sigma_{B}^{2} + \epsilon}} \cdot r + d \\ \,
.. |moving_average_function| mathmacro:: \\ \, \\ x_{moving} = Momentum * x_{moving} + x_{stat} .. |default_moving_average_function| mathmacro:: \\ \, \\ x_{moving} \leftarrow Momentum * x_{moving} + (1 - Momentum) * x_{stat} \\ \,
.. |caffe_moving_average_function| mathmacro:: \\ \, \\ x_{moving} \leftarrow Momentum * x_{moving} + x_{stat} \\ \,
.. _ops.Scale(*args, **kwargs): arithmetic.html#dragon.operators.arithmetic.Scale .. _ops.Scale(*args, **kwargs): arithmetic.html#dragon.operators.arithmetic.Scale
......
...@@ -107,15 +107,15 @@ List Brief ...@@ -107,15 +107,15 @@ List Brief
Normalization Normalization
------------- -------------
=============== ====================================================================== ================== ======================================================================
List Brief List Brief
=============== ====================================================================== ================== ======================================================================
`BatchNorm`_ Batch Normalization, introduced by `[Ioffe & Szegedy, 2015] <https://arxiv.org/abs/1502.03167>`_. `BatchNorm`_ Batch Normalization, introduced by `[Ioffe & Szegedy, 2015] <https://arxiv.org/abs/1502.03167>`_.
`BatchRenorm`_ Batch Renormalization, introduced by `[Ioffe, 2017] <https://arxiv.org/abs/1702.03275>`_. `BatchRenorm`_ Batch Renormalization, introduced by `[Ioffe, 2017] <https://arxiv.org/abs/1702.03275>`_.
`BN`_ Batch Normalization, with scale procedure after normalization. `FusedBatchNorm`_ Batch Normalization, with scale procedure after normalization.
`InstanceNorm`_ Instance Normalization, introduced by `[Ulyanov et.al, 2016] <https://arxiv.org/abs/1607.08022>`_. `InstanceNorm`_ Instance Normalization, introduced by `[Ulyanov et.al, 2016] <https://arxiv.org/abs/1607.08022>`_.
`L2Norm`_ L2 Normalization, introduced by `[Liu et.al, 2015] <https://arxiv.org/abs/1506.04579>`_. `L2Norm`_ L2 Normalization, introduced by `[Liu et.al, 2015] <https://arxiv.org/abs/1506.04579>`_.
=============== ====================================================================== ================== ======================================================================
NDArray NDArray
------- -------
...@@ -244,7 +244,7 @@ List Brief ...@@ -244,7 +244,7 @@ List Brief
.. _BatchNorm: operators/norm.html#dragon.operators.norm.BatchNorm .. _BatchNorm: operators/norm.html#dragon.operators.norm.BatchNorm
.. _BatchRenorm: operators/norm.html#dragon.operators.norm.BatchRenorm .. _BatchRenorm: operators/norm.html#dragon.operators.norm.BatchRenorm
.. _BN: operators/norm.html#dragon.operators.norm.BN .. _FusedBatchNorm: operators/norm.html#dragon.operators.norm.FusedBatchNorm
.. _InstanceNorm: operators/norm.html#dragon.operators.norm.InstanceNorm .. _InstanceNorm: operators/norm.html#dragon.operators.norm.InstanceNorm
.. _L2Norm: operators/norm.html#dragon.operators.norm.L2Norm .. _L2Norm: operators/norm.html#dragon.operators.norm.L2Norm
......
...@@ -170,34 +170,15 @@ Installation - Linux (Distributed, CPU) ...@@ -170,34 +170,15 @@ Installation - Linux (Distributed, CPU)
**$** Set ``PYTHON_INCLUDE_DIR`` / ``ANACONDA_ROOT_DIR`` and ``NUMPY_ROOT_DIR`` **$** Set ``PYTHON_INCLUDE_DIR`` / ``ANACONDA_ROOT_DIR`` and ``NUMPY_ROOT_DIR``
**Step 5:** Set Environment Variables **Step 5:** Setup MPI
**$** Create dragon.conf
.. code-block:: shell
sudo vim /etc/ld.so.conf.d/dragon.conf
**$** Append 1 line for ``REPO_ROOT/3rdparty/lib``
.. code-block:: shell
/xyz/Dragon/3rdparty/lib
**$** Rebuild the scanning cache
.. code-block:: shell
sudo ldconfig
**Step 6:** Setup MPI
.. code-block:: shell .. code-block:: shell
cd $REPO_ROOT/3rdparty cd $REPO_ROOT/3rdparty
bash ./setup_mpi.sh bash ./setup_mpi.sh
sudo cp openmpi/install/bin/mpirun /usr/bin
**Step 7:** Compile Dragon **Step 6:** Compile Dragon
**$** Install CMake **$** Install CMake
...@@ -215,7 +196,7 @@ Installation - Linux (Distributed, CPU) ...@@ -215,7 +196,7 @@ Installation - Linux (Distributed, CPU)
cmake .. cmake ..
make install -j16 make install -j16
**Step 8:** Install Dragon **Step 7:** Install Dragon
.. code-block:: shell .. code-block:: shell
...@@ -275,34 +256,15 @@ Installation - Linux (Distributed, GPU) ...@@ -275,34 +256,15 @@ Installation - Linux (Distributed, GPU)
**$** OpenMPI can take ``NCCL`` and our ``CUDA-AWARE`` communications at the same time. **$** OpenMPI can take ``NCCL`` and our ``CUDA-AWARE`` communications at the same time.
**Step 6:** Set Environment Variables **Step 6:** Setup MPI
**$** Create dragon.conf
.. code-block:: shell
sudo vim /etc/ld.so.conf.d/dragon.conf
**$** Append 1 line for ``REPO_ROOT/3rdparty/lib``
.. code-block:: shell
/xyz/Dragon/3rdparty/lib
**$** Rebuild the scanning cache
.. code-block:: shell
sudo ldconfig
**Step 7:** Setup MPI
.. code-block:: shell .. code-block:: shell
cd $REPO_ROOT/3rdparty cd $REPO_ROOT/3rdparty
bash ./setup_mpi.sh bash ./setup_mpi.sh
sudo cp openmpi/install/bin/mpirun /usr/bin
**Step 8:** Compile Dragon **Step 7:** Compile Dragon
**$** Install CMake **$** Install CMake
...@@ -320,7 +282,7 @@ Installation - Linux (Distributed, GPU) ...@@ -320,7 +282,7 @@ Installation - Linux (Distributed, GPU)
cmake .. cmake ..
make install -j16 make install -j16
**Step 9:** Install Dragon **Step 8:** Install Dragon
.. code-block:: shell .. code-block:: shell
...@@ -379,7 +341,7 @@ Add ``REPO_ROOT/3rdparty/bin`` to system environment variables ...@@ -379,7 +341,7 @@ Add ``REPO_ROOT/3rdparty/bin`` to system environment variables
**$** Open ``DRAGON_ROOT/build/Dragon.sln`` **$** Open ``DRAGON_ROOT/build/Dragon.sln``
**$** Compile and generate for ``INSTAL`` solution **$** Compile and generate for ``INSTALL`` solution
**Step 6:** Install Dragon **Step 6:** Install Dragon
......
...@@ -165,7 +165,20 @@ def Matmul(inputs, TransA=False, TransB=False, **kwargs): ...@@ -165,7 +165,20 @@ def Matmul(inputs, TransA=False, TransB=False, **kwargs):
if inputs[0].shape is not None and \ if inputs[0].shape is not None and \
inputs[1].shape is not None: inputs[1].shape is not None:
pass if len(inputs[0].shape) < 2 or \
len(inputs[1].shape) < 2:
raise ValueError('The rank of A and B should be at least 2.')
if len(inputs[0].shape) != len(inputs[1].shape):
raise ValueError('Both A and B should have the same number of dimensions.')
M = inputs[0].shape[-1] if TransA else inputs[0].shape[-2]
K1 = inputs[0].shape[-2] if TransA else inputs[0].shape[-1]
K2 = inputs[1].shape[-1] if TransB else inputs[1].shape[-2]
N = inputs[1].shape[-2] if TransB else inputs[1].shape[-1]
if K1 != K2:
raise ValueError('Can not multiply A: ({}, {}} with B: ({}, {})'.format(M, K1, K2, N))
output.shape = inputs[0].shape[:]
output.shape[-2] = M
output.shape[-1] = N
return output return output
...@@ -412,6 +425,10 @@ def Scale(inputs, axis=1, num_axes=1, **kwargs): ...@@ -412,6 +425,10 @@ def Scale(inputs, axis=1, num_axes=1, **kwargs):
The scale ranges are: |scale_function| The scale ranges are: |scale_function|
Set ``axis`` to specific the start axis(can be negative).
Set ``num_axes`` to -1 will scale all remained axes.
Parameters Parameters
---------- ----------
inputs : list of Tensor inputs : list of Tensor
......
...@@ -118,7 +118,7 @@ def SoftmaxCrossEntropy(inputs, axis=1, normalization='FULL', **kwargs): ...@@ -118,7 +118,7 @@ def SoftmaxCrossEntropy(inputs, axis=1, normalization='FULL', **kwargs):
return output return output
def SmoothL1Loss(inputs, sigma=1.0, **kwargs): def SmoothL1Loss(inputs, sigma=1.0, normalization='BATCH_SIZE', **kwargs):
"""SmoothL1Loss, introduced by `[Girshick, 2015] <https://arxiv.org/abs/1504.08083>`_. """SmoothL1Loss, introduced by `[Girshick, 2015] <https://arxiv.org/abs/1504.08083>`_.
Parameters Parameters
...@@ -127,6 +127,8 @@ def SmoothL1Loss(inputs, sigma=1.0, **kwargs): ...@@ -127,6 +127,8 @@ def SmoothL1Loss(inputs, sigma=1.0, **kwargs):
The inputs, represent [input, targets, inside_w, outside_w]. The inputs, represent [input, targets, inside_w, outside_w].
sigma : float sigma : float
The sigma of L1 bound. The sigma of L1 bound.
normalization : str
The normalization, ``FULL``, ``BATCH_SIZE``, or ``NONE``.
Returns Returns
------- -------
...@@ -203,7 +205,7 @@ def L2Loss(inputs, normalization='BATCH_SIZE', **kwargs): ...@@ -203,7 +205,7 @@ def L2Loss(inputs, normalization='BATCH_SIZE', **kwargs):
def SparseSoftmaxFocalLoss(inputs, axis=1, normalization='VALID', ignore_labels=(), def SparseSoftmaxFocalLoss(inputs, axis=1, normalization='VALID', ignore_labels=(),
alpha=0.5, gamma=2.0, eps=1e-10, neg_id=-1, **kwargs): alpha=0.5, gamma=0.0, eps=1e-10, neg_id=-1, **kwargs):
"""SoftmaxFocalLoss with sparse labels, introduced by `[Lin et.al, 2017] <https://arxiv.org/abs/1708.02002>`_. """SoftmaxFocalLoss with sparse labels, introduced by `[Lin et.al, 2017] <https://arxiv.org/abs/1708.02002>`_.
Parameters Parameters
...@@ -219,7 +221,7 @@ def SparseSoftmaxFocalLoss(inputs, axis=1, normalization='VALID', ignore_labels= ...@@ -219,7 +221,7 @@ def SparseSoftmaxFocalLoss(inputs, axis=1, normalization='VALID', ignore_labels=
alpha : float alpha : float
The scale factor on the rare class. Default is ``0.5``. The scale factor on the rare class. Default is ``0.5``.
gamma : float gamma : float
The exponential decay factor on the easy examples. Default is ``2.0``. The exponential decay factor on the easy examples. Default is ``0.0``.
eps : float eps : float
The eps. The eps.
neg_id : int neg_id : int
......
...@@ -612,22 +612,24 @@ def Flatten(inputs, axis=0, num_axes=-1, keep_axes=None, **kwargs): ...@@ -612,22 +612,24 @@ def Flatten(inputs, axis=0, num_axes=-1, keep_axes=None, **kwargs):
output = Tensor.CreateOperator(nout=1, op_type='Flatten', **arguments) output = Tensor.CreateOperator(nout=1, op_type='Flatten', **arguments)
if inputs.shape is not None: if inputs.shape is not None:
fake_shape = inputs.shape[:]
fake_shape = [1 if dim is None else dim for dim in fake_shape]
if keep_axes is not None: if keep_axes is not None:
if keep_axes > len(inputs.shape): if keep_axes > len(inputs.shape):
raise ValueError('The total number of axes is {}, can not keep {}.' raise ValueError('The total number of axes is {}, can not keep {}.'
.format(len(inputs.shape), keep_axes)) .format(len(inputs.shape), keep_axes))
total_count = np.prod(inputs.shape) total_count = np.prod(fake_shape)
output.shape = [] output.shape = []
for i in xrange(keep_axes - 1): for i in xrange(keep_axes - 1):
output.shape.append(inputs.shape[i]) output.shape.append(inputs.shape[i])
total_count *= inputs.shape[i] total_count *= fake_shape[i]
if total_count != 1: if total_count != 1:
output.shape.append(np.long(total_count)) output.shape.append(total_count)
else: else:
if num_axes == -1: num_axes = len(inputs.shape) - axis if num_axes == -1: num_axes = len(inputs.shape) - axis
elif num_axes == 0: elif num_axes == 0:
raise ValueError('num_axes must > 0 or be -1.') raise ValueError('num_axes must > 0 or be -1.')
num_flatten = np.prod(inputs.shape[axis : axis + num_axes]) num_flatten = np.prod(fake_shape[axis : axis + num_axes])
output.shape = inputs.shape[: axis] + [num_flatten] + inputs.shape[axis + num_axes :] output.shape = inputs.shape[: axis] + [num_flatten] + inputs.shape[axis + num_axes :]
return output return output
......
...@@ -6,21 +6,28 @@ ...@@ -6,21 +6,28 @@
from . import * from . import *
def BatchNorm(inputs, momentum=0.9, eps=1e-3, use_stats=-1, inplace=False, **kwargs): def BatchNorm(inputs, axis=-1, momentum=0.9, eps=1e-3,
"""Batch Normalization, introduced by `[Ioffe & Szegedy, 2015] <https://arxiv.org/abs/1502.03167>`_ use_stats=-1, mode='DEFAULT', **kwargs):
"""Batch Normalization, introduced by `[Ioffe & Szegedy, 2015] <https://arxiv.org/abs/1502.03167>`_.
It follows the implementation of `Caffe`_, that scale procedure is moved to `ops.Scale(*args, **kwargs)`_.
The number of inputs vary from ``3`` to ``4`` (``DEFAULT`` or ``CAFFE`` mode).
Parameters Parameters
---------- ----------
inputs : list of Tensor inputs : list of Tensor
The inputs, represent [input, mean, var, factor]. The inputs, represent [input, mean, var] or [input, mean, var, factor].
axis : int
The channel axis.
momentum : float momentum : float
The momentum of moving average. The momentum of moving average.
eps : float eps : float
The eps. The eps.
use_stats : int use_stats : int
Whether to use global stats. Default is ``-1`` (Auto). Whether to use global stats. Default is ``-1`` (Auto).
inplace : boolean mode : str
Whether to share input for the output. The moving average mode. ``DEFAULT`` or ``CAFFE``.
Returns Returns
------- -------
...@@ -29,20 +36,22 @@ def BatchNorm(inputs, momentum=0.9, eps=1e-3, use_stats=-1, inplace=False, **kwa ...@@ -29,20 +36,22 @@ def BatchNorm(inputs, momentum=0.9, eps=1e-3, use_stats=-1, inplace=False, **kwa
|batchnorm_function| |batchnorm_function|
The moving average of mean/var, calculated as: The ``DEFAULT`` moving average of mean/var, calculated as:
|moving_average_function| |default_moving_average_function|
Notes The ``CAFFE`` moving average of mean/var, calculated as:
-----
This operator follows the implementation of `Caffe`_, without scale after normalization.
The scale procedure is moved to `ops.Scale(*args, **kwargs)`_. |caffe_moving_average_function|
""" """
CheckInputs(inputs, 4) CheckInputs(inputs, 3, 4)
arguments = ParseArguments(locals()) arguments = ParseArguments(locals())
if len(inputs) > 3:
if mode != 'CAFFE':
raise ValueError('Only the CAFFE mode will take 4 inputs.')
output = Tensor.CreateOperator(nout=1, op_type='BatchNorm', **arguments) output = Tensor.CreateOperator(nout=1, op_type='BatchNorm', **arguments)
if inputs[0].shape is not None: if inputs[0].shape is not None:
...@@ -51,14 +60,21 @@ def BatchNorm(inputs, momentum=0.9, eps=1e-3, use_stats=-1, inplace=False, **kwa ...@@ -51,14 +60,21 @@ def BatchNorm(inputs, momentum=0.9, eps=1e-3, use_stats=-1, inplace=False, **kwa
return output return output
def BatchRenorm(inputs, momentum=0.9, eps=1e-3, r_max=3.0, d_max=5.0, def BatchRenorm(inputs, axis=-1, momentum=0.9, eps=1e-3,
t_delta=1.0, use_stats=-1, inplace=False, **kwargs): r_max=3.0, d_max=5.0, t_delta=0.001,
"""Batch Renormalization, introduced by `[Ioffe, 2017] <https://arxiv.org/abs/1702.03275>`_ use_stats=-1, mode='DEFAULT', **kwargs):
"""Batch Renormalization, introduced by `[Ioffe, 2017] <https://arxiv.org/abs/1702.03275>`_.
It follows the implementation of `Caffe`_, that scale procedure is moved to `ops.Scale(*args, **kwargs)`_.
The number of inputs vary from ``3`` to ``4`` (``DEFAULT`` or ``CAFFE`` mode).
Parameters Parameters
---------- ----------
inputs : list of Tensor inputs : list of Tensor
The inputs, represent [input, mean, var, factor]. The inputs, represent [input, mean, var, factor].
axis : int
The channel axis.
momentum : float momentum : float
The momentum of moving average. The momentum of moving average.
eps : float eps : float
...@@ -71,8 +87,8 @@ def BatchRenorm(inputs, momentum=0.9, eps=1e-3, r_max=3.0, d_max=5.0, ...@@ -71,8 +87,8 @@ def BatchRenorm(inputs, momentum=0.9, eps=1e-3, r_max=3.0, d_max=5.0,
The magnitude of incrementing after each iteration. The magnitude of incrementing after each iteration.
use_stats : int use_stats : int
Whether to use global stats. Default is ``-1`` (Auto). Whether to use global stats. Default is ``-1`` (Auto).
inplace : boolean mode : str
Whether to share input for the output. The moving average mode. ``DEFAULT`` or ``CAFFE``.
Returns Returns
------- -------
...@@ -81,20 +97,22 @@ def BatchRenorm(inputs, momentum=0.9, eps=1e-3, r_max=3.0, d_max=5.0, ...@@ -81,20 +97,22 @@ def BatchRenorm(inputs, momentum=0.9, eps=1e-3, r_max=3.0, d_max=5.0,
|batchrenorm_function| |batchrenorm_function|
The moving average of mean/var, calculated as: The ``DEFAULT`` moving average of mean/var, calculated as:
|moving_average_function| |default_moving_average_function|
Notes The ``CAFFE`` moving average of mean/var, calculated as:
-----
This operator follows the implementation of `Caffe`_, without scale after normalization.
The scale procedure is moved to `ops.Scale(*args, **kwargs)`_. |caffe_moving_average_function|
""" """
CheckInputs(inputs, 4) CheckInputs(inputs, 3, 4)
arguments = ParseArguments(locals()) arguments = ParseArguments(locals())
if len(inputs) > 3:
if mode != 'CAFFE':
raise ValueError('Only the CAFFE mode will take 4 inputs.')
output = Tensor.CreateOperator(nout=1, op_type='BatchRenorm', **arguments) output = Tensor.CreateOperator(nout=1, op_type='BatchRenorm', **arguments)
if inputs[0].shape is not None: if inputs[0].shape is not None:
...@@ -103,13 +121,15 @@ def BatchRenorm(inputs, momentum=0.9, eps=1e-3, r_max=3.0, d_max=5.0, ...@@ -103,13 +121,15 @@ def BatchRenorm(inputs, momentum=0.9, eps=1e-3, r_max=3.0, d_max=5.0,
return output return output
def BN(inputs, momentum=0.9, eps=1e-3, use_stats=-1, **kwargs): def FusedBatchNorm(inputs, axis=-1, momentum=0.9, eps=1e-3, use_stats=-1, **kwargs):
"""Batch Normalization, with scale procedure after normalization. """Batch Normalization, with scale procedure after normalization.
Parameters Parameters
---------- ----------
inputs : list of Tensor inputs : list of Tensor
The inputs, represent [input, mean, var, scale, bias]. The inputs, represent [input, mean, var, scale, bias].
axis : int
The channel axis.
momentum : float momentum : float
The momentum of moving average. The momentum of moving average.
eps : float eps : float
...@@ -126,13 +146,13 @@ def BN(inputs, momentum=0.9, eps=1e-3, use_stats=-1, **kwargs): ...@@ -126,13 +146,13 @@ def BN(inputs, momentum=0.9, eps=1e-3, use_stats=-1, **kwargs):
The moving average of mean/var, calculated as: The moving average of mean/var, calculated as:
|moving_average_function| |default_moving_average_function|
""" """
CheckInputs(inputs, 5) CheckInputs(inputs, 5)
arguments = ParseArguments(locals()) arguments = ParseArguments(locals())
output = Tensor.CreateOperator(nout=1, op_type='BN', **arguments) output = Tensor.CreateOperator(nout=1, op_type='FusedBatchNorm', **arguments)
if inputs[0].shape is not None: if inputs[0].shape is not None:
output.shape = inputs[0].shape[:] output.shape = inputs[0].shape[:]
...@@ -140,17 +160,17 @@ def BN(inputs, momentum=0.9, eps=1e-3, use_stats=-1, **kwargs): ...@@ -140,17 +160,17 @@ def BN(inputs, momentum=0.9, eps=1e-3, use_stats=-1, **kwargs):
return output return output
def InstanceNorm(inputs, eps=1e-3, inplace=False, **kwargs): def InstanceNorm(inputs, axis=-1, eps=1e-3, **kwargs):
"""Instance Normalization, introduced by `[Ulyanov et.al, 2016] <https://arxiv.org/abs/1607.08022>`_ """Instance Normalization, introduced by `[Ulyanov et.al, 2016] <https://arxiv.org/abs/1607.08022>`_
Parameters Parameters
---------- ----------
inputs : Tensor inputs : Tensor
The input tensor. The input tensor.
axis : int
The channel axis.
eps : float eps : float
The eps. The eps.
inplace : boolean
Whether to share input for the output.
Returns Returns
------- -------
......
...@@ -74,7 +74,9 @@ def Conv2d(inputs, num_output, kernel_size, ...@@ -74,7 +74,9 @@ def Conv2d(inputs, num_output, kernel_size,
if inputs[0].shape is not None: if inputs[0].shape is not None:
output.shape = inputs[0].shape[:] output.shape = inputs[0].shape[:]
output.shape[1] = num_output channel_axis = 1 if data_format == 'NCHW' else -1
spatial_axis = 2 if data_format == 'NCHW' else 1
output.shape[channel_axis] = num_output
for i in xrange(2): for i in xrange(2):
k = arguments['kernel_size'][i] if i < len(arguments['kernel_size']) \ k = arguments['kernel_size'][i] if i < len(arguments['kernel_size']) \
else arguments['kernel_size'][-1] else arguments['kernel_size'][-1]
...@@ -85,7 +87,12 @@ def Conv2d(inputs, num_output, kernel_size, ...@@ -85,7 +87,12 @@ def Conv2d(inputs, num_output, kernel_size,
d = arguments['dilation'][i] if i < len(arguments['dilation']) \ d = arguments['dilation'][i] if i < len(arguments['dilation']) \
else arguments['dilation'][-1] else arguments['dilation'][-1]
dk = d * (k - 1) + 1 dk = d * (k - 1) + 1
output.shape[i + 2] = (output.shape[i + 2] + 2 * p - dk) / s + 1 dp = 2 * p
if padding == 'SAME':
input_size = output.shape[i + spatial_axis]
output_size = (input_size + s - 1) / float(s)
dp = int(max(0, (output_size - 1) * s + k - input_size))
output.shape[i + spatial_axis] = (output.shape[i + spatial_axis] + dp - dk) / s + 1
return output return output
...@@ -226,7 +233,7 @@ def Pool2d(inputs, kernel_size, stride, pad=0, padding='VALID', ...@@ -226,7 +233,7 @@ def Pool2d(inputs, kernel_size, stride, pad=0, padding='VALID',
if inputs.shape is not None: if inputs.shape is not None:
output.shape = inputs.shape[:] output.shape = inputs.shape[:]
axis = 2 if data_format == 'NCHW' else 1 spatial_axis = 2 if data_format == 'NCHW' else 1
for i in xrange(2): for i in xrange(2):
k = arguments['kernel_size'][i] if i < len(arguments['kernel_size']) \ k = arguments['kernel_size'][i] if i < len(arguments['kernel_size']) \
else arguments['kernel_size'][-1] else arguments['kernel_size'][-1]
...@@ -234,17 +241,18 @@ def Pool2d(inputs, kernel_size, stride, pad=0, padding='VALID', ...@@ -234,17 +241,18 @@ def Pool2d(inputs, kernel_size, stride, pad=0, padding='VALID',
else arguments['stride'][-1] else arguments['stride'][-1]
p = arguments['pad'][i] if i < len(arguments['pad']) \ p = arguments['pad'][i] if i < len(arguments['pad']) \
else arguments['pad'][-1] else arguments['pad'][-1]
if padding == 'SAME':
input_size = output.shape[i + axis]
output_size = (input_size + s - 1) / float(s)
padding_needed = max(0, (output_size - 1) * s + k - input_size)
p_l = padding_needed / 2
p_r = padding_needed - p_l
p = min(p_l, p_r)
if not global_pooling: if not global_pooling:
output.shape[i + axis] = int(math.ceil(float(output.shape[i + axis] + 2 * p - k) / s) + 1) if padding != 'SAME':
input_size = output.shape[i + spatial_axis]
output_size = int(math.ceil(float(output.shape[i + spatial_axis] + 2 * p - k) / s) + 1)
if ((output_size - 1) * s >= input_size + p):
output_size = output_size - 1
output.shape[i + spatial_axis] = output_size
else:
output.shape[i + spatial_axis] = \
int((output.shape[i + spatial_axis] + s - 1) / float(s))
else: else:
output.shape[i + axis] = 1 output.shape[i + spatial_axis] = 1
return output return output
......
...@@ -87,7 +87,7 @@ GramMatrix = math.GramMatrix ...@@ -87,7 +87,7 @@ GramMatrix = math.GramMatrix
# normalization # normalization
BatchNorm = norm.BatchNorm BatchNorm = norm.BatchNorm
BatchRenorm = norm.BatchRenorm BatchRenorm = norm.BatchRenorm
BN = norm.BN FusedBatchNorm = norm.FusedBatchNorm
InstanceNorm = norm.InstanceNorm InstanceNorm = norm.InstanceNorm
L2Norm = norm.L2Norm L2Norm = norm.L2Norm
......
...@@ -329,7 +329,9 @@ class BatchNormLayer(Layer): ...@@ -329,7 +329,9 @@ class BatchNormLayer(Layer):
self._param = {'use_stats': int(param.use_global_stats) self._param = {'use_stats': int(param.use_global_stats)
if param.HasField('use_global_stats') else -1, if param.HasField('use_global_stats') else -1,
'momentum': param.moving_average_fraction, 'momentum': param.moving_average_fraction,
'eps': param.eps} 'eps': param.eps,
'axis': 1,
'mode': 'CAFFE'}
# mean, var, factor are set to 0 in order to do statistics # mean, var, factor are set to 0 in order to do statistics
mean = Tensor(LayerParameter.name + '@param0').Constant(value=0.0) mean = Tensor(LayerParameter.name + '@param0').Constant(value=0.0)
var = Tensor(LayerParameter.name + '@param1').Constant(value=0.0) var = Tensor(LayerParameter.name + '@param1').Constant(value=0.0)
...@@ -373,7 +375,9 @@ class BatchRenormLayer(Layer): ...@@ -373,7 +375,9 @@ class BatchRenormLayer(Layer):
'eps': param.eps, 'eps': param.eps,
'r_max': float(param.r_max), 'r_max': float(param.r_max),
'd_max': float(param.d_max), 'd_max': float(param.d_max),
't_delta': float(param.t_delta)} 't_delta': float(param.t_delta),
'axis': 1,
'mode': 'CAFFE'}
mean = Tensor(LayerParameter.name + '@param0').Constant(value=0.0) mean = Tensor(LayerParameter.name + '@param0').Constant(value=0.0)
var = Tensor(LayerParameter.name + '@param1').Constant(value=0.0) var = Tensor(LayerParameter.name + '@param1').Constant(value=0.0)
factor = Tensor(LayerParameter.name + '@param2').Constant(value=0.0) factor = Tensor(LayerParameter.name + '@param2').Constant(value=0.0)
...@@ -394,6 +398,7 @@ class InstanceNormLayer(Layer): ...@@ -394,6 +398,7 @@ class InstanceNormLayer(Layer):
""" """
def __init__(self, LayerParameter): def __init__(self, LayerParameter):
super(InstanceNormLayer, self).__init__(LayerParameter) super(InstanceNormLayer, self).__init__(LayerParameter)
self._param = {'axis': 1}
def Setup(self, bottom): def Setup(self, bottom):
super(InstanceNormLayer, self).Setup(bottom) super(InstanceNormLayer, self).Setup(bottom)
...@@ -464,7 +469,8 @@ class BNLayer(Layer): ...@@ -464,7 +469,8 @@ class BNLayer(Layer):
self._param = {'use_stats': int(bn_param.use_global_stats) self._param = {'use_stats': int(bn_param.use_global_stats)
if bn_param.HasField('use_global_stats') else -1, if bn_param.HasField('use_global_stats') else -1,
'momentum': bn_param.moving_average_fraction, 'momentum': bn_param.moving_average_fraction,
'eps': bn_param.eps} 'eps': bn_param.eps,
'axis': 1}
mean = Tensor(LayerParameter.name + '@param0').Constant(value=0.0) mean = Tensor(LayerParameter.name + '@param0').Constant(value=0.0)
var = Tensor(LayerParameter.name + '@param1').Constant(value=0.0) var = Tensor(LayerParameter.name + '@param1').Constant(value=0.0)
scale = Tensor(LayerParameter.name + '@param2') scale = Tensor(LayerParameter.name + '@param2')
...@@ -485,7 +491,7 @@ class BNLayer(Layer): ...@@ -485,7 +491,7 @@ class BNLayer(Layer):
def Setup(self, bottom): def Setup(self, bottom):
super(BNLayer, self).Setup(bottom) super(BNLayer, self).Setup(bottom)
return ops.BN(bottom + [blob['data'] for blob in self._blobs], **self._param) return ops.FusedBatchNorm(bottom + [blob['data'] for blob in self._blobs], **self._param)
class NormalizeLayer(Layer): class NormalizeLayer(Layer):
......
...@@ -20,7 +20,7 @@ class SoftmaxWithLossLayer(Layer): ...@@ -20,7 +20,7 @@ class SoftmaxWithLossLayer(Layer):
normalization : NormalizationMode normalization : NormalizationMode
The normalization. Refer `LossParameter.normalization`_. The normalization. Refer `LossParameter.normalization`_.
normalize : boolean normalize : boolean
Wheter to normalize. Refer `LossParameter.normalize`_. Whether to normalize. Refer `LossParameter.normalize`_.
""" """
def __init__(self, LayerParameter): def __init__(self, LayerParameter):
...@@ -51,16 +51,16 @@ class SigmoidCrossEntropyLossLayer(Layer): ...@@ -51,16 +51,16 @@ class SigmoidCrossEntropyLossLayer(Layer):
normalization : NormalizationMode normalization : NormalizationMode
The normalization. Refer `LossParameter.normalization`_. The normalization. Refer `LossParameter.normalization`_.
normalize : boolean normalize : boolean
Wheter to normalize. Refer `LossParameter.normalize`_. Whether to normalize. Refer `LossParameter.normalize`_.
""" """
def __init__(self, LayerParameter): def __init__(self, LayerParameter):
super(SigmoidCrossEntropyLossLayer, self).__init__(LayerParameter) super(SigmoidCrossEntropyLossLayer, self).__init__(LayerParameter)
param = LayerParameter.loss_param param = LayerParameter.loss_param
norm_mode = {0: 'FULL', 1: 'FULL', 2: 'BATCH_SIZE', 3: 'NONE'} norm_mode = {0: 'FULL', 1: 'BATCH_SIZE', 2: 'BATCH_SIZE', 3: 'NONE'}
normalization = 'FULL' normalization = 'BATCH_SIZE'
if param.HasField('normalize'): if param.HasField('normalize'):
if not param.normalize: normalization = 'BATCH_SIZE' if param.normalize: normalization = 'FULL'
else: normalization = norm_mode[param.normalization] else: normalization = norm_mode[param.normalization]
self._param = { 'normalization': normalization } self._param = { 'normalization': normalization }
...@@ -78,14 +78,18 @@ class L2LossLayer(Layer): ...@@ -78,14 +78,18 @@ class L2LossLayer(Layer):
normalization : NormalizationMode normalization : NormalizationMode
The normalization. Refer `LossParameter.normalization`_. The normalization. Refer `LossParameter.normalization`_.
normalize : boolean normalize : boolean
Wheter to normalize. Refer `LossParameter.normalize`_. Whether to normalize. Refer `LossParameter.normalize`_.
""" """
def __init__(self, LayerParameter): def __init__(self, LayerParameter):
super(L2LossLayer, self).__init__(LayerParameter) super(L2LossLayer, self).__init__(LayerParameter)
param = LayerParameter.loss_param param = LayerParameter.loss_param
self._param = {'normalize': param.normalize norm_mode = {0: 'FULL', 1: 'BATCH_SIZE', 2: 'BATCH_SIZE', 3: 'NONE'}
if param.HasField('normalize') else True} normalization = 'BATCH_SIZE'
if param.HasField('normalize'):
if param.normalize: normalization = 'FULL'
else: normalization = norm_mode[param.normalization]
self._param = {'normalization': normalization}
def Setup(self, bottom): def Setup(self, bottom):
super(L2LossLayer, self).Setup(bottom) super(L2LossLayer, self).Setup(bottom)
...@@ -104,13 +108,20 @@ class SmoothL1LossLayer(Layer): ...@@ -104,13 +108,20 @@ class SmoothL1LossLayer(Layer):
normalization : NormalizationMode normalization : NormalizationMode
The normalization. Refer `LossParameter.normalization`_. The normalization. Refer `LossParameter.normalization`_.
normalize : boolean normalize : boolean
Wheter to normalize. Refer `LossParameter.normalize`_. Whether to normalize. Refer `LossParameter.normalize`_.
""" """
def __init__(self, LayerParameter): def __init__(self, LayerParameter):
super(SmoothL1LossLayer, self).__init__(LayerParameter) super(SmoothL1LossLayer, self).__init__(LayerParameter)
param = LayerParameter.smooth_l1_loss_param param = LayerParameter.loss_param
self._param = {'sigma': float(param.sigma)} smooth_l1_param = LayerParameter.smooth_l1_loss_param
norm_mode = {0: 'FULL', 1: 'BATCH_SIZE', 2: 'BATCH_SIZE', 3: 'NONE'}
normalization = 'BATCH_SIZE'
if param.HasField('normalize'):
if param.normalize: normalization = 'FULL'
else: normalization = norm_mode[param.normalization]
self._param = {'sigma': float(smooth_l1_param.sigma),
'normalization': normalization}
def Setup(self, bottom): def Setup(self, bottom):
super(SmoothL1LossLayer, self).Setup(bottom) super(SmoothL1LossLayer, self).Setup(bottom)
...@@ -129,11 +140,15 @@ class SoftmaxWithFocalLossLayer(Layer): ...@@ -129,11 +140,15 @@ class SoftmaxWithFocalLossLayer(Layer):
alpha : float alpha : float
The scale on the rare class. Refer `FocalLossParameter.alpha`_. The scale on the rare class. Refer `FocalLossParameter.alpha`_.
gamma : float gamma : float
The exponetial decay. Refer `FocalLossParameter.gamma`_. The exponential decay. Refer `FocalLossParameter.gamma`_.
eps : float eps : float
The eps. Refer `FocalLossParameter.eps`_. The eps. Refer `FocalLossParameter.eps`_.
neg_id : int neg_id : int
The negative id. Refer `FocalLossParameter.neg_id`_. The negative id. Refer `FocalLossParameter.neg_id`_.
normalization : NormalizationMode
The normalization. Refer `LossParameter.normalization`_.
normalize : boolean
Whether to normalize. Refer `LossParameter.normalize`_.
""" """
def __init__(self, LayerParameter): def __init__(self, LayerParameter):
...@@ -144,7 +159,7 @@ class SoftmaxWithFocalLossLayer(Layer): ...@@ -144,7 +159,7 @@ class SoftmaxWithFocalLossLayer(Layer):
norm_mode = {0: 'FULL', 1: 'VALID', 2: 'BATCH_SIZE', 3: 'NONE'} norm_mode = {0: 'FULL', 1: 'VALID', 2: 'BATCH_SIZE', 3: 'NONE'}
normalization = 'VALID' normalization = 'VALID'
if param.HasField('normalize'): if param.HasField('normalize'):
if not param.normalize: normalization='BATCH_SIZE' if not param.normalize: normalization = 'BATCH_SIZE'
else: normalization = norm_mode[param.normalization] else: normalization = norm_mode[param.normalization]
self._param = {'axis': softmax_param.axis, self._param = {'axis': softmax_param.axis,
'normalization': normalization, 'normalization': normalization,
......
...@@ -1487,7 +1487,7 @@ message BatchRenormParameter { ...@@ -1487,7 +1487,7 @@ message BatchRenormParameter {
optional float eps = 3 [default = 1e-3]; optional float eps = 3 [default = 1e-3];
optional float r_max = 4 [default = 3.0]; optional float r_max = 4 [default = 3.0];
optional float d_max = 5 [default = 5.0]; optional float d_max = 5 [default = 5.0];
optional float t_delta = 6 [default = 1.0]; optional float t_delta = 6 [default = 0.001];
} }
message DenseConcatParameter { message DenseConcatParameter {
...@@ -1497,7 +1497,7 @@ message DenseConcatParameter { ...@@ -1497,7 +1497,7 @@ message DenseConcatParameter {
message FocalLossParameter { message FocalLossParameter {
optional float alpha = 1 [default = 0.5]; optional float alpha = 1 [default = 0.5];
optional float gamma = 2 [default = 2.0]; optional float gamma = 2 [default = 0.0];
optional float eps = 3 [default = 1e-10]; optional float eps = 3 [default = 1e-10];
optional int32 neg_id = 4 [default = -1]; optional int32 neg_id = 4 [default = -1];
} }
......
...@@ -19,7 +19,7 @@ _sym_db = _symbol_database.Default() ...@@ -19,7 +19,7 @@ _sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor( DESCRIPTOR = _descriptor.FileDescriptor(
name='caffe.proto', name='caffe.proto',
package='caffe', package='caffe',
serialized_pb=_b('\n\x0b\x63\x61\x66\x66\x65.proto\x12\x05\x63\x61\x66\x66\x65\"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xcc\x01\n\tBlobProto\x12\x1f\n\x05shape\x18\x07 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_data\x18\x08 \x03(\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_diff\x18\t \x03(\x01\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30\"2\n\x0f\x42lobProtoVector\x12\x1f\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\x10.caffe.BlobProto\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse\"\x8a\x02\n\x0f\x46illerParameter\x12\x16\n\x04type\x18\x01 \x01(\t:\x08\x63onstant\x12\x10\n\x05value\x18\x02 \x01(\x02:\x01\x30\x12\x0e\n\x03min\x18\x03 \x01(\x02:\x01\x30\x12\x0e\n\x03max\x18\x04 \x01(\x02:\x01\x31\x12\x0f\n\x04mean\x18\x05 \x01(\x02:\x01\x30\x12\x0e\n\x03std\x18\x06 \x01(\x02:\x01\x31\x12\x12\n\x06sparse\x18\x07 \x01(\x05:\x02-1\x12\x42\n\rvariance_norm\x18\x08 \x01(\x0e\x32#.caffe.FillerParameter.VarianceNorm:\x06\x46\x41N_IN\"4\n\x0cVarianceNorm\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x41VERAGE\x10\x02\"\x8e\x02\n\x0cNetParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12%\n\x0binput_shape\x18\x08 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x11\n\tinput_dim\x18\x04 \x03(\x05\x12\x1d\n\x0e\x66orce_backward\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x05state\x18\x06 \x01(\x0b\x32\x0f.caffe.NetState\x12\x19\n\ndebug_info\x18\x07 \x01(\x08:\x05\x66\x61lse\x12$\n\x05layer\x18\x64 \x03(\x0b\x32\x15.caffe.LayerParameter\x12\'\n\x06layers\x18\x02 \x03(\x0b\x32\x17.caffe.V1LayerParameter\"\xc9\n\n\x0fSolverParameter\x12\x0b\n\x03net\x18\x18 \x01(\t\x12&\n\tnet_param\x18\x19 \x01(\x0b\x32\x13.caffe.NetParameter\x12\x11\n\ttrain_net\x18\x01 \x01(\t\x12\x10\n\x08test_net\x18\x02 \x03(\t\x12,\n\x0ftrain_net_param\x18\x15 \x01(\x0b\x32\x13.caffe.NetParameter\x12+\n\x0etest_net_param\x18\x16 \x03(\x0b\x32\x13.caffe.NetParameter\x12$\n\x0btrain_state\x18\x1a \x01(\x0b\x32\x0f.caffe.NetState\x12#\n\ntest_state\x18\x1b \x03(\x0b\x32\x0f.caffe.NetState\x12\x11\n\ttest_iter\x18\x03 \x03(\x05\x12\x18\n\rtest_interval\x18\x04 \x01(\x05:\x01\x30\x12 \n\x11test_compute_loss\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x13test_initialization\x18 \x01(\x08:\x04true\x12\x0f\n\x07\x62\x61se_lr\x18\x05 \x01(\x02\x12\x10\n\x08stage_lr\x18\x32 \x03(\x02\x12\x12\n\nstage_iter\x18\x33 \x03(\x05\x12\x0f\n\x07\x64isplay\x18\x06 \x01(\x05\x12\x17\n\x0c\x61verage_loss\x18! \x01(\x05:\x01\x31\x12\x10\n\x08max_iter\x18\x07 \x01(\x05\x12\x14\n\titer_size\x18$ \x01(\x05:\x01\x31\x12\x11\n\tlr_policy\x18\x08 \x01(\t\x12\r\n\x05gamma\x18\t \x01(\x02\x12\r\n\x05power\x18\n \x01(\x02\x12\x10\n\x08momentum\x18\x0b \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0c \x01(\x02\x12\x1f\n\x13regularization_type\x18\x1d \x01(\t:\x02L2\x12\x10\n\x08stepsize\x18\r \x01(\x05\x12\x11\n\tstepvalue\x18\" \x03(\x05\x12\x1a\n\x0e\x63lip_gradients\x18# \x01(\x02:\x02-1\x12\x13\n\x08snapshot\x18\x0e \x01(\x05:\x01\x30\x12\x17\n\x0fsnapshot_prefix\x18\x0f \x01(\t\x12\x1c\n\rsnapshot_diff\x18\x10 \x01(\x08:\x05\x66\x61lse\x12K\n\x0fsnapshot_format\x18% \x01(\x0e\x32%.caffe.SolverParameter.SnapshotFormat:\x0b\x42INARYPROTO\x12;\n\x0bsolver_mode\x18\x11 \x01(\x0e\x32!.caffe.SolverParameter.SolverMode:\x03GPU\x12\x14\n\tdevice_id\x18\x12 \x01(\x05:\x01\x30\x12\x17\n\x0brandom_seed\x18\x14 \x01(\x03:\x02-1\x12\x11\n\x04type\x18( \x01(\t:\x03SGD\x12\x15\n\x05\x64\x65lta\x18\x1f \x01(\x02:\x06\x31\x65-008\x12\x18\n\tmomentum2\x18\' \x01(\x02:\x05\x30.999\x12\x17\n\trms_decay\x18& \x01(\x02:\x04\x30.99\x12\x19\n\ndebug_info\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\"\n\x14snapshot_after_train\x18\x1c \x01(\x08:\x04true\x12;\n\x0bsolver_type\x18\x1e \x01(\x0e\x32!.caffe.SolverParameter.SolverType:\x03SGD\"+\n\x0eSnapshotFormat\x12\x08\n\x04HDF5\x10\x00\x12\x0f\n\x0b\x42INARYPROTO\x10\x01\"\x1e\n\nSolverMode\x12\x07\n\x03\x43PU\x10\x00\x12\x07\n\x03GPU\x10\x01\"U\n\nSolverType\x12\x07\n\x03SGD\x10\x00\x12\x0c\n\x08NESTEROV\x10\x01\x12\x0b\n\x07\x41\x44\x41GRAD\x10\x02\x12\x0b\n\x07RMSPROP\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45LTA\x10\x04\x12\x08\n\x04\x41\x44\x41M\x10\x05\"l\n\x0bSolverState\x12\x0c\n\x04iter\x18\x01 \x01(\x05\x12\x13\n\x0blearned_net\x18\x02 \x01(\t\x12!\n\x07history\x18\x03 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x17\n\x0c\x63urrent_step\x18\x04 \x01(\x05:\x01\x30\"N\n\x08NetState\x12!\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase:\x04TEST\x12\x10\n\x05level\x18\x02 \x01(\x05:\x01\x30\x12\r\n\x05stage\x18\x03 \x03(\t\"\x85\x01\n\x0cNetStateRule\x12\x1b\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase\x12\x11\n\tmin_level\x18\x02 \x01(\x05\x12\x11\n\tmax_level\x18\x03 \x01(\x05\x12\r\n\x05stage\x18\x04 \x03(\t\x12\x11\n\tnot_stage\x18\x05 \x03(\t\x12\x10\n\x08mpi_rank\x18\x06 \x03(\r\"\xa3\x01\n\tParamSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\nshare_mode\x18\x02 \x01(\x0e\x32\x1d.caffe.ParamSpec.DimCheckMode\x12\x12\n\x07lr_mult\x18\x03 \x01(\x02:\x01\x31\x12\x15\n\ndecay_mult\x18\x04 \x01(\x02:\x01\x31\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xe6\x18\n\x0eLayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06\x62ottom\x18\x03 \x03(\t\x12\x0b\n\x03top\x18\x04 \x03(\t\x12\x1c\n\x0cmirror_stage\x18\xa2\x01 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x05phase\x18\n \x01(\x0e\x32\x0c.caffe.Phase\x12\x13\n\x0bloss_weight\x18\x05 \x03(\x02\x12\x1f\n\x05param\x18\x06 \x03(\x0b\x32\x10.caffe.ParamSpec\x12\x1f\n\x05\x62lobs\x18\x07 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x16\n\x0epropagate_down\x18\x0b \x03(\x08\x12$\n\x07include\x18\x08 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18\t \x03(\x0b\x32\x13.caffe.NetStateRule\x12\x37\n\x0ftransform_param\x18\x64 \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18\x65 \x01(\x0b\x32\x14.caffe.LossParameter\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x66 \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18g \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12\x34\n\x10\x62\x61tch_norm_param\x18\x8b\x01 \x01(\x0b\x32\x19.caffe.BatchNormParameter\x12)\n\nbias_param\x18\x8d\x01 \x01(\x0b\x32\x14.caffe.BiasParameter\x12,\n\x0c\x63oncat_param\x18h \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18i \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18j \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12)\n\ncrop_param\x18\x90\x01 \x01(\x0b\x32\x14.caffe.CropParameter\x12(\n\ndata_param\x18k \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18l \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18m \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18n \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12\'\n\telu_param\x18\x8c\x01 \x01(\x0b\x32\x13.caffe.ELUParameter\x12+\n\x0b\x65mbed_param\x18\x89\x01 \x01(\x0b\x32\x15.caffe.EmbedParameter\x12&\n\texp_param\x18o \x01(\x0b\x32\x13.caffe.ExpParameter\x12/\n\rflatten_param\x18\x87\x01 \x01(\x0b\x32\x17.caffe.FlattenParameter\x12\x31\n\x0fhdf5_data_param\x18p \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18q \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18r \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18s \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18t \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18u \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12+\n\x0binput_param\x18\x8f\x01 \x01(\x0b\x32\x15.caffe.InputParameter\x12\'\n\tlog_param\x18\x86\x01 \x01(\x0b\x32\x13.caffe.LogParameter\x12&\n\tlrn_param\x18v \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18w \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18x \x01(\x0b\x32\x13.caffe.MVNParameter\x12\x33\n\x0fparameter_param\x18\x91\x01 \x01(\x0b\x32\x19.caffe.ParameterParameter\x12.\n\rpooling_param\x18y \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18z \x01(\x0b\x32\x15.caffe.PowerParameter\x12+\n\x0bprelu_param\x18\x83\x01 \x01(\x0b\x32\x15.caffe.PReLUParameter\x12-\n\x0cpython_param\x18\x82\x01 \x01(\x0b\x32\x16.caffe.PythonParameter\x12\x33\n\x0freduction_param\x18\x88\x01 \x01(\x0b\x32\x19.caffe.ReductionParameter\x12(\n\nrelu_param\x18{ \x01(\x0b\x32\x14.caffe.ReLUParameter\x12/\n\rreshape_param\x18\x85\x01 \x01(\x0b\x32\x17.caffe.ReshapeParameter\x12+\n\x0bscale_param\x18\x8e\x01 \x01(\x0b\x32\x15.caffe.ScaleParameter\x12.\n\rsigmoid_param\x18| \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18} \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12\'\n\tspp_param\x18\x84\x01 \x01(\x0b\x32\x13.caffe.SPPParameter\x12*\n\x0bslice_param\x18~ \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18\x7f \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x33\n\x0fthreshold_param\x18\x80\x01 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12)\n\ntile_param\x18\x8a\x01 \x01(\x0b\x32\x14.caffe.TileParameter\x12\x36\n\x11window_data_param\x18\x81\x01 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x36\n\x11roi_pooling_param\x18\x97\x01 \x01(\x0b\x32\x1a.caffe.ROIPoolingParameter\x12;\n\x14smooth_l1_loss_param\x18\x98\x01 \x01(\x0b\x32\x1c.caffe.SmoothL1LossParameter\x12\'\n\tmpi_param\x18\x99\x01 \x01(\x0b\x32\x13.caffe.MPIParameter\x12/\n\rpermute_param\x18\x9a\x01 \x01(\x0b\x32\x17.caffe.PermuteParameter\x12\x33\n\x0fnormalize_param\x18\x9b\x01 \x01(\x0b\x32\x19.caffe.NormalizeParameter\x12\x31\n\x0eparallel_param\x18\x9d\x01 \x01(\x0b\x32\x18.caffe.ParallelParameter\x12-\n\x0cresize_param\x18\x9e\x01 \x01(\x0b\x32\x16.caffe.ResizeParameter\x12\x36\n\x11\x65xpand_dims_param\x18\x9f\x01 \x01(\x0b\x32\x1a.caffe.ExpandDimsParameter\x12\x31\n\x0eproposal_param\x18\xa0\x01 \x01(\x0b\x32\x18.caffe.ProposalParameter\x12\x38\n\x12\x62\x61tch_renorm_param\x18\xa1\x01 \x01(\x0b\x32\x1b.caffe.BatchRenormParameter\x12\x38\n\x12\x64\x65nse_concat_param\x18\xa3\x01 \x01(\x0b\x32\x1b.caffe.DenseConcatParameter\x12\x34\n\x10\x66ocal_loss_param\x18\xa4\x01 \x01(\x0b\x32\x19.caffe.FocalLossParameter\"\xa7\x02\n\x17TransformationParameter\x12\x10\n\x05scale\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\x06mirror\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\tcrop_size\x18\x03 \x01(\r:\x01\x30\x12\x12\n\x07padding\x18\x0b \x01(\r:\x01\x30\x12\x11\n\tmean_file\x18\x04 \x01(\t\x12\x12\n\nmean_value\x18\x05 \x03(\x02\x12\x1a\n\x0b\x66orce_color\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nforce_gray\x18\x07 \x01(\x08:\x05\x66\x61lse\x12!\n\x12\x63olor_augmentation\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x10min_random_scale\x18\t \x01(\x02:\x01\x31\x12\x1b\n\x10max_random_scale\x18\n \x01(\x02:\x01\x31\"\xeb\x01\n\rLossParameter\x12\x14\n\x0cignore_label\x18\x01 \x01(\x05\x12\x44\n\rnormalization\x18\x03 \x01(\x0e\x32&.caffe.LossParameter.NormalizationMode:\x05VALID\x12\x11\n\tnormalize\x18\x02 \x01(\x08\x1a\'\n\x13\x45xpandDimsParameter\x12\x10\n\x04\x61xis\x18\x01 \x01(\x05:\x02-1\"B\n\x11NormalizationMode\x12\x08\n\x04\x46ULL\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x0e\n\nBATCH_SIZE\x10\x02\x12\x08\n\x04NONE\x10\x03\"L\n\x11\x41\x63\x63uracyParameter\x12\x10\n\x05top_k\x18\x01 \x01(\r:\x01\x31\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x14\n\x0cignore_label\x18\x03 \x01(\x05\"M\n\x0f\x41rgMaxParameter\x12\x1a\n\x0bout_max_val\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05top_k\x18\x02 \x01(\r:\x01\x31\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x05\"9\n\x0f\x43oncatParameter\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x15\n\nconcat_dim\x18\x01 \x01(\r:\x01\x31\"h\n\x12\x42\x61tchNormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12$\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x03\x30.9\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x30.001\"]\n\rBiasParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\"L\n\x18\x43ontrastiveLossParameter\x12\x11\n\x06margin\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x0elegacy_version\x18\x02 \x01(\x08:\x05\x66\x61lse\"\xfc\x03\n\x14\x43onvolutionParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x0b\n\x03pad\x18\x03 \x03(\r\x12\x13\n\x0bkernel_size\x18\x04 \x03(\r\x12\x0e\n\x06stride\x18\x06 \x03(\r\x12\x10\n\x08\x64ilation\x18\x12 \x03(\r\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x10\n\x08kernel_h\x18\x0b \x01(\r\x12\x10\n\x08kernel_w\x18\x0c \x01(\r\x12\x10\n\x08stride_h\x18\r \x01(\r\x12\x10\n\x08stride_w\x18\x0e \x01(\r\x12\x10\n\x05group\x18\x05 \x01(\r:\x01\x31\x12-\n\rweight_filler\x18\x07 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x08 \x01(\x0b\x32\x16.caffe.FillerParameter\x12;\n\x06\x65ngine\x18\x0f \x01(\x0e\x32\".caffe.ConvolutionParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x10 \x01(\x05:\x01\x31\x12\x1e\n\x0f\x66orce_nd_im2col\x18\x11 \x01(\x08:\x05\x66\x61lse\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"0\n\rCropParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x32\x12\x0e\n\x06offset\x18\x02 \x03(\r\"\xa4\x02\n\rDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x31\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0e\x32\x17.caffe.DataParameter.DB:\x07LEVELDB\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_encoded_color\x18\t \x01(\x08:\x05\x66\x61lse\x12\x13\n\x08prefetch\x18\n \x01(\r:\x01\x35\"\x1b\n\x02\x44\x42\x12\x0b\n\x07LEVELDB\x10\x00\x12\x08\n\x04LMDB\x10\x01\"I\n\x10\x44ropoutParameter\x12\x1a\n\rdropout_ratio\x18\x01 \x01(\x02:\x03\x30.5\x12\x19\n\x0bscale_train\x18\x02 \x01(\x08:\x04true\"\xa0\x01\n\x12\x44ummyDataParameter\x12+\n\x0b\x64\x61ta_filler\x18\x01 \x03(\x0b\x32\x16.caffe.FillerParameter\x12\x1f\n\x05shape\x18\x06 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x0b\n\x03num\x18\x02 \x03(\r\x12\x10\n\x08\x63hannels\x18\x03 \x03(\r\x12\x0e\n\x06height\x18\x04 \x03(\r\x12\r\n\x05width\x18\x05 \x03(\r\"\xa5\x01\n\x10\x45ltwiseParameter\x12\x39\n\toperation\x18\x01 \x01(\x0e\x32!.caffe.EltwiseParameter.EltwiseOp:\x03SUM\x12\r\n\x05\x63oeff\x18\x02 \x03(\x02\x12\x1e\n\x10stable_prod_grad\x18\x03 \x01(\x08:\x04true\"\'\n\tEltwiseOp\x12\x08\n\x04PROD\x10\x00\x12\x07\n\x03SUM\x10\x01\x12\x07\n\x03MAX\x10\x02\" \n\x0c\x45LUParameter\x12\x10\n\x05\x61lpha\x18\x01 \x01(\x02:\x01\x31\"\xac\x01\n\x0e\x45mbedParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x11\n\tinput_dim\x18\x02 \x01(\r\x12\x17\n\tbias_term\x18\x03 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"D\n\x0c\x45xpParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"9\n\x10\x46lattenParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x14\n\x08\x65nd_axis\x18\x02 \x01(\x05:\x02-1\"O\n\x11HDF5DataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse\"(\n\x13HDF5OutputParameter\x12\x11\n\tfile_name\x18\x01 \x01(\t\"^\n\x12HingeLossParameter\x12\x30\n\x04norm\x18\x01 \x01(\x0e\x32\x1e.caffe.HingeLossParameter.Norm:\x02L1\"\x16\n\x04Norm\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\"\x97\x02\n\x12ImageDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x15\n\nbatch_size\x18\x04 \x01(\r:\x01\x31\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x16\n\x08is_color\x18\x0b \x01(\x08:\x04true\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\x0c \x01(\t:\x00\"\'\n\x15InfogainLossParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\"\xcb\x01\n\x15InnerProductParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0f\n\x04\x61xis\x18\x05 \x01(\x05:\x01\x31\x12\x18\n\ttranspose\x18\x06 \x01(\x08:\x05\x66\x61lse\"1\n\x0eInputParameter\x12\x1f\n\x05shape\x18\x01 \x03(\x0b\x32\x10.caffe.BlobShape\"D\n\x0cLogParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xb8\x02\n\x0cLRNParameter\x12\x15\n\nlocal_size\x18\x01 \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x02 \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x03 \x01(\x02:\x04\x30.75\x12\x44\n\x0bnorm_region\x18\x04 \x01(\x0e\x32\x1e.caffe.LRNParameter.NormRegion:\x0f\x41\x43ROSS_CHANNELS\x12\x0c\n\x01k\x18\x05 \x01(\x02:\x01\x31\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.LRNParameter.Engine:\x07\x44\x45\x46\x41ULT\"5\n\nNormRegion\x12\x13\n\x0f\x41\x43ROSS_CHANNELS\x10\x00\x12\x12\n\x0eWITHIN_CHANNEL\x10\x01\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xbd\x01\n\x13MemoryDataParameter\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x10\n\x08\x63hannels\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\r\n\x05width\x18\x04 \x01(\r\x12;\n\x05\x64type\x18\x05 \x01(\x0e\x32#.caffe.MemoryDataParameter.DataType:\x07\x46LOAT32\"$\n\x08\x44\x61taType\x12\x0b\n\x07\x46LOAT32\x10\x00\x12\x0b\n\x07\x46LOAT16\x10\x01\"e\n\x0cMVNParameter\x12 \n\x12normalize_variance\x18\x01 \x01(\x08:\x04true\x12\x1e\n\x0f\x61\x63ross_channels\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x13\n\x03\x65ps\x18\x03 \x01(\x02:\x06\x31\x65-009\"5\n\x12ParameterParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\"\xa2\x03\n\x10PoolingParameter\x12\x35\n\x04pool\x18\x01 \x01(\x0e\x32\".caffe.PoolingParameter.PoolMethod:\x03MAX\x12\x0e\n\x03pad\x18\x04 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x02 \x01(\r\x12\x10\n\x08kernel_h\x18\x05 \x01(\r\x12\x10\n\x08kernel_w\x18\x06 \x01(\r\x12\x11\n\x06stride\x18\x03 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\x07 \x01(\r\x12\x10\n\x08stride_w\x18\x08 \x01(\r\x12\x37\n\x06\x65ngine\x18\x0b \x01(\x0e\x32\x1e.caffe.PoolingParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x1d\n\x0eglobal_pooling\x18\x0c \x01(\x08:\x05\x66\x61lse\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Y\n\x13ROIPoolingParameter\x12\x13\n\x08pooled_h\x18\x01 \x01(\r:\x01\x30\x12\x13\n\x08pooled_w\x18\x02 \x01(\r:\x01\x30\x12\x18\n\rspatial_scale\x18\x03 \x01(\x02:\x01\x31\"F\n\x0ePowerParameter\x12\x10\n\x05power\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"g\n\x0fPythonParameter\x12\x0e\n\x06module\x18\x01 \x01(\t\x12\r\n\x05layer\x18\x02 \x01(\t\x12\x13\n\tparam_str\x18\x03 \x01(\t:\x00\x12 \n\x11share_in_parallel\x18\x04 \x01(\x08:\x05\x66\x61lse\"\xad\x01\n\x12ReductionParameter\x12=\n\toperation\x18\x01 \x01(\x0e\x32%.caffe.ReductionParameter.ReductionOp:\x03SUM\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x10\n\x05\x63oeff\x18\x03 \x01(\x02:\x01\x31\"5\n\x0bReductionOp\x12\x07\n\x03SUM\x10\x01\x12\x08\n\x04\x41SUM\x10\x02\x12\t\n\x05SUMSQ\x10\x03\x12\x08\n\x04MEAN\x10\x04\"\x8d\x01\n\rReLUParameter\x12\x19\n\x0enegative_slope\x18\x01 \x01(\x02:\x01\x30\x12\x34\n\x06\x65ngine\x18\x02 \x01(\x0e\x32\x1b.caffe.ReLUParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x10ReshapeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\x08num_axes\x18\x03 \x01(\x05:\x02-1\"\xa5\x01\n\x0eScaleParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x18\n\tbias_term\x18\x04 \x01(\x08:\x05\x66\x61lse\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"x\n\x10SigmoidParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SigmoidParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"L\n\x0eSliceParameter\x12\x0f\n\x04\x61xis\x18\x03 \x01(\x05:\x01\x31\x12\x13\n\x0bslice_point\x18\x02 \x03(\r\x12\x14\n\tslice_dim\x18\x01 \x01(\r:\x01\x31\"\x89\x01\n\x10SoftmaxParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SoftmaxParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"r\n\rTanHParameter\x12\x34\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1b.caffe.TanHParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"T\n\rTileParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\r\n\x05tiles\x18\x02 \x01(\x05\x12#\n\tmultiples\x18\x03 \x01(\x0b\x32\x10.caffe.BlobShape\"*\n\x12ThresholdParameter\x12\x14\n\tthreshold\x18\x01 \x01(\x02:\x01\x30\"\xc1\x02\n\x13WindowDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0c\x66g_threshold\x18\x07 \x01(\x02:\x03\x30.5\x12\x19\n\x0c\x62g_threshold\x18\x08 \x01(\x02:\x03\x30.5\x12\x19\n\x0b\x66g_fraction\x18\t \x01(\x02:\x04\x30.25\x12\x16\n\x0b\x63ontext_pad\x18\n \x01(\r:\x01\x30\x12\x17\n\tcrop_mode\x18\x0b \x01(\t:\x04warp\x12\x1b\n\x0c\x63\x61\x63he_images\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\r \x01(\t:\x00\"\xeb\x01\n\x0cSPPParameter\x12\x16\n\x0epyramid_height\x18\x01 \x01(\r\x12\x31\n\x04pool\x18\x02 \x01(\x0e\x32\x1e.caffe.SPPParameter.PoolMethod:\x03MAX\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.SPPParameter.Engine:\x07\x44\x45\x46\x41ULT\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xe0\x13\n\x10V1LayerParameter\x12\x0e\n\x06\x62ottom\x18\x02 \x03(\t\x12\x0b\n\x03top\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12$\n\x07include\x18 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18! \x03(\x0b\x32\x13.caffe.NetStateRule\x12/\n\x04type\x18\x05 \x01(\x0e\x32!.caffe.V1LayerParameter.LayerType\x12\x1f\n\x05\x62lobs\x18\x06 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x0e\n\x05param\x18\xe9\x07 \x03(\t\x12>\n\x0f\x62lob_share_mode\x18\xea\x07 \x03(\x0e\x32$.caffe.V1LayerParameter.DimCheckMode\x12\x10\n\x08\x62lobs_lr\x18\x07 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x08 \x03(\x02\x12\x13\n\x0bloss_weight\x18# \x03(\x02\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x1b \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18\x17 \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12,\n\x0c\x63oncat_param\x18\t \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18( \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18\n \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12(\n\ndata_param\x18\x0b \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18\x0c \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18\x1a \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18\x18 \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12&\n\texp_param\x18) \x01(\x0b\x32\x13.caffe.ExpParameter\x12\x31\n\x0fhdf5_data_param\x18\r \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18\x0e \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18\x1d \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18\x0f \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18\x10 \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18\x11 \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12&\n\tlrn_param\x18\x12 \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18\x16 \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18\" \x01(\x0b\x32\x13.caffe.MVNParameter\x12.\n\rpooling_param\x18\x13 \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18\x15 \x01(\x0b\x32\x15.caffe.PowerParameter\x12(\n\nrelu_param\x18\x1e \x01(\x0b\x32\x14.caffe.ReLUParameter\x12.\n\rsigmoid_param\x18& \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18\' \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12*\n\x0bslice_param\x18\x1f \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18% \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x32\n\x0fthreshold_param\x18\x19 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12\x35\n\x11window_data_param\x18\x14 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x37\n\x0ftransform_param\x18$ \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18* \x01(\x0b\x32\x14.caffe.LossParameter\x12&\n\x05layer\x18\x01 \x01(\x0b\x32\x17.caffe.V0LayerParameter\"\xd8\x04\n\tLayerType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x41\x42SVAL\x10#\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\n\n\x06\x41RGMAX\x10\x1e\x12\x08\n\x04\x42NLL\x10\x02\x12\n\n\x06\x43ONCAT\x10\x03\x12\x14\n\x10\x43ONTRASTIVE_LOSS\x10%\x12\x0f\n\x0b\x43ONVOLUTION\x10\x04\x12\x08\n\x04\x44\x41TA\x10\x05\x12\x11\n\rDECONVOLUTION\x10\'\x12\x0b\n\x07\x44ROPOUT\x10\x06\x12\x0e\n\nDUMMY_DATA\x10 \x12\x12\n\x0e\x45UCLIDEAN_LOSS\x10\x07\x12\x0b\n\x07\x45LTWISE\x10\x19\x12\x07\n\x03\x45XP\x10&\x12\x0b\n\x07\x46LATTEN\x10\x08\x12\r\n\tHDF5_DATA\x10\t\x12\x0f\n\x0bHDF5_OUTPUT\x10\n\x12\x0e\n\nHINGE_LOSS\x10\x1c\x12\n\n\x06IM2COL\x10\x0b\x12\x0e\n\nIMAGE_DATA\x10\x0c\x12\x11\n\rINFOGAIN_LOSS\x10\r\x12\x11\n\rINNER_PRODUCT\x10\x0e\x12\x07\n\x03LRN\x10\x0f\x12\x0f\n\x0bMEMORY_DATA\x10\x1d\x12\x1d\n\x19MULTINOMIAL_LOGISTIC_LOSS\x10\x10\x12\x07\n\x03MVN\x10\"\x12\x0b\n\x07POOLING\x10\x11\x12\t\n\x05POWER\x10\x1a\x12\x08\n\x04RELU\x10\x12\x12\x0b\n\x07SIGMOID\x10\x13\x12\x1e\n\x1aSIGMOID_CROSS_ENTROPY_LOSS\x10\x1b\x12\x0b\n\x07SILENCE\x10$\x12\x0b\n\x07SOFTMAX\x10\x14\x12\x10\n\x0cSOFTMAX_LOSS\x10\x15\x12\t\n\x05SPLIT\x10\x16\x12\t\n\x05SLICE\x10!\x12\x08\n\x04TANH\x10\x17\x12\x0f\n\x0bWINDOW_DATA\x10\x18\x12\r\n\tTHRESHOLD\x10\x1f\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xfd\x07\n\x10V0LayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x12\n\nnum_output\x18\x03 \x01(\r\x12\x16\n\x08\x62iasterm\x18\x04 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x06 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0e\n\x03pad\x18\x07 \x01(\r:\x01\x30\x12\x12\n\nkernelsize\x18\x08 \x01(\r\x12\x10\n\x05group\x18\t \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\n \x01(\r:\x01\x31\x12\x35\n\x04pool\x18\x0b \x01(\x0e\x32\".caffe.V0LayerParameter.PoolMethod:\x03MAX\x12\x1a\n\rdropout_ratio\x18\x0c \x01(\x02:\x03\x30.5\x12\x15\n\nlocal_size\x18\r \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x0e \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x0f \x01(\x02:\x04\x30.75\x12\x0c\n\x01k\x18\x16 \x01(\x02:\x01\x31\x12\x0e\n\x06source\x18\x10 \x01(\t\x12\x10\n\x05scale\x18\x11 \x01(\x02:\x01\x31\x12\x10\n\x08meanfile\x18\x12 \x01(\t\x12\x11\n\tbatchsize\x18\x13 \x01(\r\x12\x13\n\x08\x63ropsize\x18\x14 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x15 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x05\x62lobs\x18\x32 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x10\n\x08\x62lobs_lr\x18\x33 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x34 \x03(\x02\x12\x14\n\trand_skip\x18\x35 \x01(\r:\x01\x30\x12\x1d\n\x10\x64\x65t_fg_threshold\x18\x36 \x01(\x02:\x03\x30.5\x12\x1d\n\x10\x64\x65t_bg_threshold\x18\x37 \x01(\x02:\x03\x30.5\x12\x1d\n\x0f\x64\x65t_fg_fraction\x18\x38 \x01(\x02:\x04\x30.25\x12\x1a\n\x0f\x64\x65t_context_pad\x18: \x01(\r:\x01\x30\x12\x1b\n\rdet_crop_mode\x18; \x01(\t:\x04warp\x12\x12\n\x07new_num\x18< \x01(\x05:\x01\x30\x12\x17\n\x0cnew_channels\x18= \x01(\x05:\x01\x30\x12\x15\n\nnew_height\x18> \x01(\x05:\x01\x30\x12\x14\n\tnew_width\x18? \x01(\x05:\x01\x30\x12\x1d\n\x0eshuffle_images\x18@ \x01(\x08:\x05\x66\x61lse\x12\x15\n\nconcat_dim\x18\x41 \x01(\r:\x01\x31\x12\x36\n\x11hdf5_output_param\x18\xe9\x07 \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"W\n\x0ePReLUParameter\x12&\n\x06\x66iller\x18\x01 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1d\n\x0e\x63hannel_shared\x18\x02 \x01(\x08:\x05\x66\x61lse\")\n\x15SmoothL1LossParameter\x12\x10\n\x05sigma\x18\x01 \x01(\x02:\x01\x31\"H\n\x0cMPIParameter\x12\x0f\n\x04root\x18\x01 \x01(\r:\x01\x30\x12\x12\n\x07\x63omm_id\x18\x02 \x01(\x04:\x01\x30\x12\x13\n\x08group_id\x18\x03 \x01(\x04:\x01\x30\"!\n\x10PermuteParameter\x12\r\n\x05order\x18\x01 \x03(\r\"\x93\x01\n\x12NormalizeParameter\x12\x1c\n\x0e\x61\x63ross_spatial\x18\x01 \x01(\x08:\x04true\x12,\n\x0cscale_filler\x18\x02 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1c\n\x0e\x63hannel_shared\x18\x03 \x01(\x08:\x04true\x12\x13\n\x03\x65ps\x18\x04 \x01(\x02:\x06\x31\x65-010\"_\n\x11ParallelParameter\x12\x16\n\x07shuffle\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x18\n\tnode_step\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x18\n\tpartition\x18\x03 \x01(\x08:\x05\x66\x61lse\"R\n\x0fResizeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0e\n\x02\x66x\x18\x02 \x01(\x02:\x02-1\x12\x0e\n\x02\x66y\x18\x03 \x01(\x02:\x02-1\"\'\n\x13\x45xpandDimsParameter\x12\x10\n\x04\x61xis\x18\x01 \x01(\x05:\x02-1\"\xc8\x01\n\x11ProposalParameter\x12\x17\n\x0b\x66\x65\x61t_stride\x18\x01 \x01(\r:\x02\x31\x36\x12\x15\n\tbase_size\x18\x02 \x01(\r:\x02\x31\x36\x12\x14\n\x08min_size\x18\x03 \x01(\r:\x02\x31\x36\x12\r\n\x05ratio\x18\x04 \x03(\x02\x12\r\n\x05scale\x18\x05 \x03(\x02\x12\x1a\n\x0cpre_nms_topn\x18\x06 \x01(\r:\x04\x36\x30\x30\x30\x12\x1a\n\rpost_nms_topn\x18\x07 \x01(\r:\x03\x33\x30\x30\x12\x17\n\nnms_thresh\x18\x08 \x01(\x02:\x03\x30.7\"\xa2\x01\n\x14\x42\x61tchRenormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12$\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x03\x30.9\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x30.001\x12\x10\n\x05r_max\x18\x04 \x01(\x02:\x01\x33\x12\x10\n\x05\x64_max\x18\x05 \x01(\x02:\x01\x35\x12\x12\n\x07t_delta\x18\x06 \x01(\x02:\x01\x31\"?\n\x14\x44\x65nseConcatParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x16\n\x0bgrowth_rate\x18\x02 \x01(\x05:\x01\x30\"c\n\x12\x46ocalLossParameter\x12\x12\n\x05\x61lpha\x18\x01 \x01(\x02:\x03\x30.5\x12\x10\n\x05gamma\x18\x02 \x01(\x02:\x01\x32\x12\x13\n\x03\x65ps\x18\x03 \x01(\x02:\x06\x31\x65-010\x12\x12\n\x06neg_id\x18\x04 \x01(\x05:\x02-1*\x1c\n\x05Phase\x12\t\n\x05TRAIN\x10\x00\x12\x08\n\x04TEST\x10\x01') serialized_pb=_b('\n\x0b\x63\x61\x66\x66\x65.proto\x12\x05\x63\x61\x66\x66\x65\"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xcc\x01\n\tBlobProto\x12\x1f\n\x05shape\x18\x07 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_data\x18\x08 \x03(\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_diff\x18\t \x03(\x01\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30\"2\n\x0f\x42lobProtoVector\x12\x1f\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\x10.caffe.BlobProto\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse\"\x8a\x02\n\x0f\x46illerParameter\x12\x16\n\x04type\x18\x01 \x01(\t:\x08\x63onstant\x12\x10\n\x05value\x18\x02 \x01(\x02:\x01\x30\x12\x0e\n\x03min\x18\x03 \x01(\x02:\x01\x30\x12\x0e\n\x03max\x18\x04 \x01(\x02:\x01\x31\x12\x0f\n\x04mean\x18\x05 \x01(\x02:\x01\x30\x12\x0e\n\x03std\x18\x06 \x01(\x02:\x01\x31\x12\x12\n\x06sparse\x18\x07 \x01(\x05:\x02-1\x12\x42\n\rvariance_norm\x18\x08 \x01(\x0e\x32#.caffe.FillerParameter.VarianceNorm:\x06\x46\x41N_IN\"4\n\x0cVarianceNorm\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x41VERAGE\x10\x02\"\x8e\x02\n\x0cNetParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12%\n\x0binput_shape\x18\x08 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x11\n\tinput_dim\x18\x04 \x03(\x05\x12\x1d\n\x0e\x66orce_backward\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x05state\x18\x06 \x01(\x0b\x32\x0f.caffe.NetState\x12\x19\n\ndebug_info\x18\x07 \x01(\x08:\x05\x66\x61lse\x12$\n\x05layer\x18\x64 \x03(\x0b\x32\x15.caffe.LayerParameter\x12\'\n\x06layers\x18\x02 \x03(\x0b\x32\x17.caffe.V1LayerParameter\"\xc9\n\n\x0fSolverParameter\x12\x0b\n\x03net\x18\x18 \x01(\t\x12&\n\tnet_param\x18\x19 \x01(\x0b\x32\x13.caffe.NetParameter\x12\x11\n\ttrain_net\x18\x01 \x01(\t\x12\x10\n\x08test_net\x18\x02 \x03(\t\x12,\n\x0ftrain_net_param\x18\x15 \x01(\x0b\x32\x13.caffe.NetParameter\x12+\n\x0etest_net_param\x18\x16 \x03(\x0b\x32\x13.caffe.NetParameter\x12$\n\x0btrain_state\x18\x1a \x01(\x0b\x32\x0f.caffe.NetState\x12#\n\ntest_state\x18\x1b \x03(\x0b\x32\x0f.caffe.NetState\x12\x11\n\ttest_iter\x18\x03 \x03(\x05\x12\x18\n\rtest_interval\x18\x04 \x01(\x05:\x01\x30\x12 \n\x11test_compute_loss\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x13test_initialization\x18 \x01(\x08:\x04true\x12\x0f\n\x07\x62\x61se_lr\x18\x05 \x01(\x02\x12\x10\n\x08stage_lr\x18\x32 \x03(\x02\x12\x12\n\nstage_iter\x18\x33 \x03(\x05\x12\x0f\n\x07\x64isplay\x18\x06 \x01(\x05\x12\x17\n\x0c\x61verage_loss\x18! \x01(\x05:\x01\x31\x12\x10\n\x08max_iter\x18\x07 \x01(\x05\x12\x14\n\titer_size\x18$ \x01(\x05:\x01\x31\x12\x11\n\tlr_policy\x18\x08 \x01(\t\x12\r\n\x05gamma\x18\t \x01(\x02\x12\r\n\x05power\x18\n \x01(\x02\x12\x10\n\x08momentum\x18\x0b \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0c \x01(\x02\x12\x1f\n\x13regularization_type\x18\x1d \x01(\t:\x02L2\x12\x10\n\x08stepsize\x18\r \x01(\x05\x12\x11\n\tstepvalue\x18\" \x03(\x05\x12\x1a\n\x0e\x63lip_gradients\x18# \x01(\x02:\x02-1\x12\x13\n\x08snapshot\x18\x0e \x01(\x05:\x01\x30\x12\x17\n\x0fsnapshot_prefix\x18\x0f \x01(\t\x12\x1c\n\rsnapshot_diff\x18\x10 \x01(\x08:\x05\x66\x61lse\x12K\n\x0fsnapshot_format\x18% \x01(\x0e\x32%.caffe.SolverParameter.SnapshotFormat:\x0b\x42INARYPROTO\x12;\n\x0bsolver_mode\x18\x11 \x01(\x0e\x32!.caffe.SolverParameter.SolverMode:\x03GPU\x12\x14\n\tdevice_id\x18\x12 \x01(\x05:\x01\x30\x12\x17\n\x0brandom_seed\x18\x14 \x01(\x03:\x02-1\x12\x11\n\x04type\x18( \x01(\t:\x03SGD\x12\x15\n\x05\x64\x65lta\x18\x1f \x01(\x02:\x06\x31\x65-008\x12\x18\n\tmomentum2\x18\' \x01(\x02:\x05\x30.999\x12\x17\n\trms_decay\x18& \x01(\x02:\x04\x30.99\x12\x19\n\ndebug_info\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\"\n\x14snapshot_after_train\x18\x1c \x01(\x08:\x04true\x12;\n\x0bsolver_type\x18\x1e \x01(\x0e\x32!.caffe.SolverParameter.SolverType:\x03SGD\"+\n\x0eSnapshotFormat\x12\x08\n\x04HDF5\x10\x00\x12\x0f\n\x0b\x42INARYPROTO\x10\x01\"\x1e\n\nSolverMode\x12\x07\n\x03\x43PU\x10\x00\x12\x07\n\x03GPU\x10\x01\"U\n\nSolverType\x12\x07\n\x03SGD\x10\x00\x12\x0c\n\x08NESTEROV\x10\x01\x12\x0b\n\x07\x41\x44\x41GRAD\x10\x02\x12\x0b\n\x07RMSPROP\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45LTA\x10\x04\x12\x08\n\x04\x41\x44\x41M\x10\x05\"l\n\x0bSolverState\x12\x0c\n\x04iter\x18\x01 \x01(\x05\x12\x13\n\x0blearned_net\x18\x02 \x01(\t\x12!\n\x07history\x18\x03 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x17\n\x0c\x63urrent_step\x18\x04 \x01(\x05:\x01\x30\"N\n\x08NetState\x12!\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase:\x04TEST\x12\x10\n\x05level\x18\x02 \x01(\x05:\x01\x30\x12\r\n\x05stage\x18\x03 \x03(\t\"\x85\x01\n\x0cNetStateRule\x12\x1b\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase\x12\x11\n\tmin_level\x18\x02 \x01(\x05\x12\x11\n\tmax_level\x18\x03 \x01(\x05\x12\r\n\x05stage\x18\x04 \x03(\t\x12\x11\n\tnot_stage\x18\x05 \x03(\t\x12\x10\n\x08mpi_rank\x18\x06 \x03(\r\"\xa3\x01\n\tParamSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\nshare_mode\x18\x02 \x01(\x0e\x32\x1d.caffe.ParamSpec.DimCheckMode\x12\x12\n\x07lr_mult\x18\x03 \x01(\x02:\x01\x31\x12\x15\n\ndecay_mult\x18\x04 \x01(\x02:\x01\x31\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xe6\x18\n\x0eLayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06\x62ottom\x18\x03 \x03(\t\x12\x0b\n\x03top\x18\x04 \x03(\t\x12\x1c\n\x0cmirror_stage\x18\xa2\x01 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x05phase\x18\n \x01(\x0e\x32\x0c.caffe.Phase\x12\x13\n\x0bloss_weight\x18\x05 \x03(\x02\x12\x1f\n\x05param\x18\x06 \x03(\x0b\x32\x10.caffe.ParamSpec\x12\x1f\n\x05\x62lobs\x18\x07 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x16\n\x0epropagate_down\x18\x0b \x03(\x08\x12$\n\x07include\x18\x08 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18\t \x03(\x0b\x32\x13.caffe.NetStateRule\x12\x37\n\x0ftransform_param\x18\x64 \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18\x65 \x01(\x0b\x32\x14.caffe.LossParameter\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x66 \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18g \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12\x34\n\x10\x62\x61tch_norm_param\x18\x8b\x01 \x01(\x0b\x32\x19.caffe.BatchNormParameter\x12)\n\nbias_param\x18\x8d\x01 \x01(\x0b\x32\x14.caffe.BiasParameter\x12,\n\x0c\x63oncat_param\x18h \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18i \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18j \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12)\n\ncrop_param\x18\x90\x01 \x01(\x0b\x32\x14.caffe.CropParameter\x12(\n\ndata_param\x18k \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18l \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18m \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18n \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12\'\n\telu_param\x18\x8c\x01 \x01(\x0b\x32\x13.caffe.ELUParameter\x12+\n\x0b\x65mbed_param\x18\x89\x01 \x01(\x0b\x32\x15.caffe.EmbedParameter\x12&\n\texp_param\x18o \x01(\x0b\x32\x13.caffe.ExpParameter\x12/\n\rflatten_param\x18\x87\x01 \x01(\x0b\x32\x17.caffe.FlattenParameter\x12\x31\n\x0fhdf5_data_param\x18p \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18q \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18r \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18s \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18t \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18u \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12+\n\x0binput_param\x18\x8f\x01 \x01(\x0b\x32\x15.caffe.InputParameter\x12\'\n\tlog_param\x18\x86\x01 \x01(\x0b\x32\x13.caffe.LogParameter\x12&\n\tlrn_param\x18v \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18w \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18x \x01(\x0b\x32\x13.caffe.MVNParameter\x12\x33\n\x0fparameter_param\x18\x91\x01 \x01(\x0b\x32\x19.caffe.ParameterParameter\x12.\n\rpooling_param\x18y \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18z \x01(\x0b\x32\x15.caffe.PowerParameter\x12+\n\x0bprelu_param\x18\x83\x01 \x01(\x0b\x32\x15.caffe.PReLUParameter\x12-\n\x0cpython_param\x18\x82\x01 \x01(\x0b\x32\x16.caffe.PythonParameter\x12\x33\n\x0freduction_param\x18\x88\x01 \x01(\x0b\x32\x19.caffe.ReductionParameter\x12(\n\nrelu_param\x18{ \x01(\x0b\x32\x14.caffe.ReLUParameter\x12/\n\rreshape_param\x18\x85\x01 \x01(\x0b\x32\x17.caffe.ReshapeParameter\x12+\n\x0bscale_param\x18\x8e\x01 \x01(\x0b\x32\x15.caffe.ScaleParameter\x12.\n\rsigmoid_param\x18| \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18} \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12\'\n\tspp_param\x18\x84\x01 \x01(\x0b\x32\x13.caffe.SPPParameter\x12*\n\x0bslice_param\x18~ \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18\x7f \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x33\n\x0fthreshold_param\x18\x80\x01 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12)\n\ntile_param\x18\x8a\x01 \x01(\x0b\x32\x14.caffe.TileParameter\x12\x36\n\x11window_data_param\x18\x81\x01 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x36\n\x11roi_pooling_param\x18\x97\x01 \x01(\x0b\x32\x1a.caffe.ROIPoolingParameter\x12;\n\x14smooth_l1_loss_param\x18\x98\x01 \x01(\x0b\x32\x1c.caffe.SmoothL1LossParameter\x12\'\n\tmpi_param\x18\x99\x01 \x01(\x0b\x32\x13.caffe.MPIParameter\x12/\n\rpermute_param\x18\x9a\x01 \x01(\x0b\x32\x17.caffe.PermuteParameter\x12\x33\n\x0fnormalize_param\x18\x9b\x01 \x01(\x0b\x32\x19.caffe.NormalizeParameter\x12\x31\n\x0eparallel_param\x18\x9d\x01 \x01(\x0b\x32\x18.caffe.ParallelParameter\x12-\n\x0cresize_param\x18\x9e\x01 \x01(\x0b\x32\x16.caffe.ResizeParameter\x12\x36\n\x11\x65xpand_dims_param\x18\x9f\x01 \x01(\x0b\x32\x1a.caffe.ExpandDimsParameter\x12\x31\n\x0eproposal_param\x18\xa0\x01 \x01(\x0b\x32\x18.caffe.ProposalParameter\x12\x38\n\x12\x62\x61tch_renorm_param\x18\xa1\x01 \x01(\x0b\x32\x1b.caffe.BatchRenormParameter\x12\x38\n\x12\x64\x65nse_concat_param\x18\xa3\x01 \x01(\x0b\x32\x1b.caffe.DenseConcatParameter\x12\x34\n\x10\x66ocal_loss_param\x18\xa4\x01 \x01(\x0b\x32\x19.caffe.FocalLossParameter\"\xa7\x02\n\x17TransformationParameter\x12\x10\n\x05scale\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\x06mirror\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\tcrop_size\x18\x03 \x01(\r:\x01\x30\x12\x12\n\x07padding\x18\x0b \x01(\r:\x01\x30\x12\x11\n\tmean_file\x18\x04 \x01(\t\x12\x12\n\nmean_value\x18\x05 \x03(\x02\x12\x1a\n\x0b\x66orce_color\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nforce_gray\x18\x07 \x01(\x08:\x05\x66\x61lse\x12!\n\x12\x63olor_augmentation\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x10min_random_scale\x18\t \x01(\x02:\x01\x31\x12\x1b\n\x10max_random_scale\x18\n \x01(\x02:\x01\x31\"\xeb\x01\n\rLossParameter\x12\x14\n\x0cignore_label\x18\x01 \x01(\x05\x12\x44\n\rnormalization\x18\x03 \x01(\x0e\x32&.caffe.LossParameter.NormalizationMode:\x05VALID\x12\x11\n\tnormalize\x18\x02 \x01(\x08\x1a\'\n\x13\x45xpandDimsParameter\x12\x10\n\x04\x61xis\x18\x01 \x01(\x05:\x02-1\"B\n\x11NormalizationMode\x12\x08\n\x04\x46ULL\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x0e\n\nBATCH_SIZE\x10\x02\x12\x08\n\x04NONE\x10\x03\"L\n\x11\x41\x63\x63uracyParameter\x12\x10\n\x05top_k\x18\x01 \x01(\r:\x01\x31\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x14\n\x0cignore_label\x18\x03 \x01(\x05\"M\n\x0f\x41rgMaxParameter\x12\x1a\n\x0bout_max_val\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05top_k\x18\x02 \x01(\r:\x01\x31\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x05\"9\n\x0f\x43oncatParameter\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x15\n\nconcat_dim\x18\x01 \x01(\r:\x01\x31\"h\n\x12\x42\x61tchNormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12$\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x03\x30.9\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x30.001\"]\n\rBiasParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\"L\n\x18\x43ontrastiveLossParameter\x12\x11\n\x06margin\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x0elegacy_version\x18\x02 \x01(\x08:\x05\x66\x61lse\"\xfc\x03\n\x14\x43onvolutionParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x0b\n\x03pad\x18\x03 \x03(\r\x12\x13\n\x0bkernel_size\x18\x04 \x03(\r\x12\x0e\n\x06stride\x18\x06 \x03(\r\x12\x10\n\x08\x64ilation\x18\x12 \x03(\r\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x10\n\x08kernel_h\x18\x0b \x01(\r\x12\x10\n\x08kernel_w\x18\x0c \x01(\r\x12\x10\n\x08stride_h\x18\r \x01(\r\x12\x10\n\x08stride_w\x18\x0e \x01(\r\x12\x10\n\x05group\x18\x05 \x01(\r:\x01\x31\x12-\n\rweight_filler\x18\x07 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x08 \x01(\x0b\x32\x16.caffe.FillerParameter\x12;\n\x06\x65ngine\x18\x0f \x01(\x0e\x32\".caffe.ConvolutionParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x10 \x01(\x05:\x01\x31\x12\x1e\n\x0f\x66orce_nd_im2col\x18\x11 \x01(\x08:\x05\x66\x61lse\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"0\n\rCropParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x32\x12\x0e\n\x06offset\x18\x02 \x03(\r\"\xa4\x02\n\rDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x31\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0e\x32\x17.caffe.DataParameter.DB:\x07LEVELDB\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_encoded_color\x18\t \x01(\x08:\x05\x66\x61lse\x12\x13\n\x08prefetch\x18\n \x01(\r:\x01\x35\"\x1b\n\x02\x44\x42\x12\x0b\n\x07LEVELDB\x10\x00\x12\x08\n\x04LMDB\x10\x01\"I\n\x10\x44ropoutParameter\x12\x1a\n\rdropout_ratio\x18\x01 \x01(\x02:\x03\x30.5\x12\x19\n\x0bscale_train\x18\x02 \x01(\x08:\x04true\"\xa0\x01\n\x12\x44ummyDataParameter\x12+\n\x0b\x64\x61ta_filler\x18\x01 \x03(\x0b\x32\x16.caffe.FillerParameter\x12\x1f\n\x05shape\x18\x06 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x0b\n\x03num\x18\x02 \x03(\r\x12\x10\n\x08\x63hannels\x18\x03 \x03(\r\x12\x0e\n\x06height\x18\x04 \x03(\r\x12\r\n\x05width\x18\x05 \x03(\r\"\xa5\x01\n\x10\x45ltwiseParameter\x12\x39\n\toperation\x18\x01 \x01(\x0e\x32!.caffe.EltwiseParameter.EltwiseOp:\x03SUM\x12\r\n\x05\x63oeff\x18\x02 \x03(\x02\x12\x1e\n\x10stable_prod_grad\x18\x03 \x01(\x08:\x04true\"\'\n\tEltwiseOp\x12\x08\n\x04PROD\x10\x00\x12\x07\n\x03SUM\x10\x01\x12\x07\n\x03MAX\x10\x02\" \n\x0c\x45LUParameter\x12\x10\n\x05\x61lpha\x18\x01 \x01(\x02:\x01\x31\"\xac\x01\n\x0e\x45mbedParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x11\n\tinput_dim\x18\x02 \x01(\r\x12\x17\n\tbias_term\x18\x03 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"D\n\x0c\x45xpParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"9\n\x10\x46lattenParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x14\n\x08\x65nd_axis\x18\x02 \x01(\x05:\x02-1\"O\n\x11HDF5DataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse\"(\n\x13HDF5OutputParameter\x12\x11\n\tfile_name\x18\x01 \x01(\t\"^\n\x12HingeLossParameter\x12\x30\n\x04norm\x18\x01 \x01(\x0e\x32\x1e.caffe.HingeLossParameter.Norm:\x02L1\"\x16\n\x04Norm\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\"\x97\x02\n\x12ImageDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x15\n\nbatch_size\x18\x04 \x01(\r:\x01\x31\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x16\n\x08is_color\x18\x0b \x01(\x08:\x04true\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\x0c \x01(\t:\x00\"\'\n\x15InfogainLossParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\"\xcb\x01\n\x15InnerProductParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0f\n\x04\x61xis\x18\x05 \x01(\x05:\x01\x31\x12\x18\n\ttranspose\x18\x06 \x01(\x08:\x05\x66\x61lse\"1\n\x0eInputParameter\x12\x1f\n\x05shape\x18\x01 \x03(\x0b\x32\x10.caffe.BlobShape\"D\n\x0cLogParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xb8\x02\n\x0cLRNParameter\x12\x15\n\nlocal_size\x18\x01 \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x02 \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x03 \x01(\x02:\x04\x30.75\x12\x44\n\x0bnorm_region\x18\x04 \x01(\x0e\x32\x1e.caffe.LRNParameter.NormRegion:\x0f\x41\x43ROSS_CHANNELS\x12\x0c\n\x01k\x18\x05 \x01(\x02:\x01\x31\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.LRNParameter.Engine:\x07\x44\x45\x46\x41ULT\"5\n\nNormRegion\x12\x13\n\x0f\x41\x43ROSS_CHANNELS\x10\x00\x12\x12\n\x0eWITHIN_CHANNEL\x10\x01\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xbd\x01\n\x13MemoryDataParameter\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x10\n\x08\x63hannels\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\r\n\x05width\x18\x04 \x01(\r\x12;\n\x05\x64type\x18\x05 \x01(\x0e\x32#.caffe.MemoryDataParameter.DataType:\x07\x46LOAT32\"$\n\x08\x44\x61taType\x12\x0b\n\x07\x46LOAT32\x10\x00\x12\x0b\n\x07\x46LOAT16\x10\x01\"e\n\x0cMVNParameter\x12 \n\x12normalize_variance\x18\x01 \x01(\x08:\x04true\x12\x1e\n\x0f\x61\x63ross_channels\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x13\n\x03\x65ps\x18\x03 \x01(\x02:\x06\x31\x65-009\"5\n\x12ParameterParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\"\xa2\x03\n\x10PoolingParameter\x12\x35\n\x04pool\x18\x01 \x01(\x0e\x32\".caffe.PoolingParameter.PoolMethod:\x03MAX\x12\x0e\n\x03pad\x18\x04 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x02 \x01(\r\x12\x10\n\x08kernel_h\x18\x05 \x01(\r\x12\x10\n\x08kernel_w\x18\x06 \x01(\r\x12\x11\n\x06stride\x18\x03 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\x07 \x01(\r\x12\x10\n\x08stride_w\x18\x08 \x01(\r\x12\x37\n\x06\x65ngine\x18\x0b \x01(\x0e\x32\x1e.caffe.PoolingParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x1d\n\x0eglobal_pooling\x18\x0c \x01(\x08:\x05\x66\x61lse\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Y\n\x13ROIPoolingParameter\x12\x13\n\x08pooled_h\x18\x01 \x01(\r:\x01\x30\x12\x13\n\x08pooled_w\x18\x02 \x01(\r:\x01\x30\x12\x18\n\rspatial_scale\x18\x03 \x01(\x02:\x01\x31\"F\n\x0ePowerParameter\x12\x10\n\x05power\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"g\n\x0fPythonParameter\x12\x0e\n\x06module\x18\x01 \x01(\t\x12\r\n\x05layer\x18\x02 \x01(\t\x12\x13\n\tparam_str\x18\x03 \x01(\t:\x00\x12 \n\x11share_in_parallel\x18\x04 \x01(\x08:\x05\x66\x61lse\"\xad\x01\n\x12ReductionParameter\x12=\n\toperation\x18\x01 \x01(\x0e\x32%.caffe.ReductionParameter.ReductionOp:\x03SUM\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x10\n\x05\x63oeff\x18\x03 \x01(\x02:\x01\x31\"5\n\x0bReductionOp\x12\x07\n\x03SUM\x10\x01\x12\x08\n\x04\x41SUM\x10\x02\x12\t\n\x05SUMSQ\x10\x03\x12\x08\n\x04MEAN\x10\x04\"\x8d\x01\n\rReLUParameter\x12\x19\n\x0enegative_slope\x18\x01 \x01(\x02:\x01\x30\x12\x34\n\x06\x65ngine\x18\x02 \x01(\x0e\x32\x1b.caffe.ReLUParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x10ReshapeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\x08num_axes\x18\x03 \x01(\x05:\x02-1\"\xa5\x01\n\x0eScaleParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x18\n\tbias_term\x18\x04 \x01(\x08:\x05\x66\x61lse\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"x\n\x10SigmoidParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SigmoidParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"L\n\x0eSliceParameter\x12\x0f\n\x04\x61xis\x18\x03 \x01(\x05:\x01\x31\x12\x13\n\x0bslice_point\x18\x02 \x03(\r\x12\x14\n\tslice_dim\x18\x01 \x01(\r:\x01\x31\"\x89\x01\n\x10SoftmaxParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SoftmaxParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"r\n\rTanHParameter\x12\x34\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1b.caffe.TanHParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"T\n\rTileParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\r\n\x05tiles\x18\x02 \x01(\x05\x12#\n\tmultiples\x18\x03 \x01(\x0b\x32\x10.caffe.BlobShape\"*\n\x12ThresholdParameter\x12\x14\n\tthreshold\x18\x01 \x01(\x02:\x01\x30\"\xc1\x02\n\x13WindowDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0c\x66g_threshold\x18\x07 \x01(\x02:\x03\x30.5\x12\x19\n\x0c\x62g_threshold\x18\x08 \x01(\x02:\x03\x30.5\x12\x19\n\x0b\x66g_fraction\x18\t \x01(\x02:\x04\x30.25\x12\x16\n\x0b\x63ontext_pad\x18\n \x01(\r:\x01\x30\x12\x17\n\tcrop_mode\x18\x0b \x01(\t:\x04warp\x12\x1b\n\x0c\x63\x61\x63he_images\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\r \x01(\t:\x00\"\xeb\x01\n\x0cSPPParameter\x12\x16\n\x0epyramid_height\x18\x01 \x01(\r\x12\x31\n\x04pool\x18\x02 \x01(\x0e\x32\x1e.caffe.SPPParameter.PoolMethod:\x03MAX\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.SPPParameter.Engine:\x07\x44\x45\x46\x41ULT\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xe0\x13\n\x10V1LayerParameter\x12\x0e\n\x06\x62ottom\x18\x02 \x03(\t\x12\x0b\n\x03top\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12$\n\x07include\x18 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18! \x03(\x0b\x32\x13.caffe.NetStateRule\x12/\n\x04type\x18\x05 \x01(\x0e\x32!.caffe.V1LayerParameter.LayerType\x12\x1f\n\x05\x62lobs\x18\x06 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x0e\n\x05param\x18\xe9\x07 \x03(\t\x12>\n\x0f\x62lob_share_mode\x18\xea\x07 \x03(\x0e\x32$.caffe.V1LayerParameter.DimCheckMode\x12\x10\n\x08\x62lobs_lr\x18\x07 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x08 \x03(\x02\x12\x13\n\x0bloss_weight\x18# \x03(\x02\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x1b \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18\x17 \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12,\n\x0c\x63oncat_param\x18\t \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18( \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18\n \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12(\n\ndata_param\x18\x0b \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18\x0c \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18\x1a \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18\x18 \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12&\n\texp_param\x18) \x01(\x0b\x32\x13.caffe.ExpParameter\x12\x31\n\x0fhdf5_data_param\x18\r \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18\x0e \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18\x1d \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18\x0f \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18\x10 \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18\x11 \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12&\n\tlrn_param\x18\x12 \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18\x16 \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18\" \x01(\x0b\x32\x13.caffe.MVNParameter\x12.\n\rpooling_param\x18\x13 \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18\x15 \x01(\x0b\x32\x15.caffe.PowerParameter\x12(\n\nrelu_param\x18\x1e \x01(\x0b\x32\x14.caffe.ReLUParameter\x12.\n\rsigmoid_param\x18& \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18\' \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12*\n\x0bslice_param\x18\x1f \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18% \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x32\n\x0fthreshold_param\x18\x19 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12\x35\n\x11window_data_param\x18\x14 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x37\n\x0ftransform_param\x18$ \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18* \x01(\x0b\x32\x14.caffe.LossParameter\x12&\n\x05layer\x18\x01 \x01(\x0b\x32\x17.caffe.V0LayerParameter\"\xd8\x04\n\tLayerType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x41\x42SVAL\x10#\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\n\n\x06\x41RGMAX\x10\x1e\x12\x08\n\x04\x42NLL\x10\x02\x12\n\n\x06\x43ONCAT\x10\x03\x12\x14\n\x10\x43ONTRASTIVE_LOSS\x10%\x12\x0f\n\x0b\x43ONVOLUTION\x10\x04\x12\x08\n\x04\x44\x41TA\x10\x05\x12\x11\n\rDECONVOLUTION\x10\'\x12\x0b\n\x07\x44ROPOUT\x10\x06\x12\x0e\n\nDUMMY_DATA\x10 \x12\x12\n\x0e\x45UCLIDEAN_LOSS\x10\x07\x12\x0b\n\x07\x45LTWISE\x10\x19\x12\x07\n\x03\x45XP\x10&\x12\x0b\n\x07\x46LATTEN\x10\x08\x12\r\n\tHDF5_DATA\x10\t\x12\x0f\n\x0bHDF5_OUTPUT\x10\n\x12\x0e\n\nHINGE_LOSS\x10\x1c\x12\n\n\x06IM2COL\x10\x0b\x12\x0e\n\nIMAGE_DATA\x10\x0c\x12\x11\n\rINFOGAIN_LOSS\x10\r\x12\x11\n\rINNER_PRODUCT\x10\x0e\x12\x07\n\x03LRN\x10\x0f\x12\x0f\n\x0bMEMORY_DATA\x10\x1d\x12\x1d\n\x19MULTINOMIAL_LOGISTIC_LOSS\x10\x10\x12\x07\n\x03MVN\x10\"\x12\x0b\n\x07POOLING\x10\x11\x12\t\n\x05POWER\x10\x1a\x12\x08\n\x04RELU\x10\x12\x12\x0b\n\x07SIGMOID\x10\x13\x12\x1e\n\x1aSIGMOID_CROSS_ENTROPY_LOSS\x10\x1b\x12\x0b\n\x07SILENCE\x10$\x12\x0b\n\x07SOFTMAX\x10\x14\x12\x10\n\x0cSOFTMAX_LOSS\x10\x15\x12\t\n\x05SPLIT\x10\x16\x12\t\n\x05SLICE\x10!\x12\x08\n\x04TANH\x10\x17\x12\x0f\n\x0bWINDOW_DATA\x10\x18\x12\r\n\tTHRESHOLD\x10\x1f\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xfd\x07\n\x10V0LayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x12\n\nnum_output\x18\x03 \x01(\r\x12\x16\n\x08\x62iasterm\x18\x04 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x06 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0e\n\x03pad\x18\x07 \x01(\r:\x01\x30\x12\x12\n\nkernelsize\x18\x08 \x01(\r\x12\x10\n\x05group\x18\t \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\n \x01(\r:\x01\x31\x12\x35\n\x04pool\x18\x0b \x01(\x0e\x32\".caffe.V0LayerParameter.PoolMethod:\x03MAX\x12\x1a\n\rdropout_ratio\x18\x0c \x01(\x02:\x03\x30.5\x12\x15\n\nlocal_size\x18\r \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x0e \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x0f \x01(\x02:\x04\x30.75\x12\x0c\n\x01k\x18\x16 \x01(\x02:\x01\x31\x12\x0e\n\x06source\x18\x10 \x01(\t\x12\x10\n\x05scale\x18\x11 \x01(\x02:\x01\x31\x12\x10\n\x08meanfile\x18\x12 \x01(\t\x12\x11\n\tbatchsize\x18\x13 \x01(\r\x12\x13\n\x08\x63ropsize\x18\x14 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x15 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x05\x62lobs\x18\x32 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x10\n\x08\x62lobs_lr\x18\x33 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x34 \x03(\x02\x12\x14\n\trand_skip\x18\x35 \x01(\r:\x01\x30\x12\x1d\n\x10\x64\x65t_fg_threshold\x18\x36 \x01(\x02:\x03\x30.5\x12\x1d\n\x10\x64\x65t_bg_threshold\x18\x37 \x01(\x02:\x03\x30.5\x12\x1d\n\x0f\x64\x65t_fg_fraction\x18\x38 \x01(\x02:\x04\x30.25\x12\x1a\n\x0f\x64\x65t_context_pad\x18: \x01(\r:\x01\x30\x12\x1b\n\rdet_crop_mode\x18; \x01(\t:\x04warp\x12\x12\n\x07new_num\x18< \x01(\x05:\x01\x30\x12\x17\n\x0cnew_channels\x18= \x01(\x05:\x01\x30\x12\x15\n\nnew_height\x18> \x01(\x05:\x01\x30\x12\x14\n\tnew_width\x18? \x01(\x05:\x01\x30\x12\x1d\n\x0eshuffle_images\x18@ \x01(\x08:\x05\x66\x61lse\x12\x15\n\nconcat_dim\x18\x41 \x01(\r:\x01\x31\x12\x36\n\x11hdf5_output_param\x18\xe9\x07 \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"W\n\x0ePReLUParameter\x12&\n\x06\x66iller\x18\x01 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1d\n\x0e\x63hannel_shared\x18\x02 \x01(\x08:\x05\x66\x61lse\")\n\x15SmoothL1LossParameter\x12\x10\n\x05sigma\x18\x01 \x01(\x02:\x01\x31\"H\n\x0cMPIParameter\x12\x0f\n\x04root\x18\x01 \x01(\r:\x01\x30\x12\x12\n\x07\x63omm_id\x18\x02 \x01(\x04:\x01\x30\x12\x13\n\x08group_id\x18\x03 \x01(\x04:\x01\x30\"!\n\x10PermuteParameter\x12\r\n\x05order\x18\x01 \x03(\r\"\x93\x01\n\x12NormalizeParameter\x12\x1c\n\x0e\x61\x63ross_spatial\x18\x01 \x01(\x08:\x04true\x12,\n\x0cscale_filler\x18\x02 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1c\n\x0e\x63hannel_shared\x18\x03 \x01(\x08:\x04true\x12\x13\n\x03\x65ps\x18\x04 \x01(\x02:\x06\x31\x65-010\"_\n\x11ParallelParameter\x12\x16\n\x07shuffle\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x18\n\tnode_step\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x18\n\tpartition\x18\x03 \x01(\x08:\x05\x66\x61lse\"R\n\x0fResizeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0e\n\x02\x66x\x18\x02 \x01(\x02:\x02-1\x12\x0e\n\x02\x66y\x18\x03 \x01(\x02:\x02-1\"\'\n\x13\x45xpandDimsParameter\x12\x10\n\x04\x61xis\x18\x01 \x01(\x05:\x02-1\"\xc8\x01\n\x11ProposalParameter\x12\x17\n\x0b\x66\x65\x61t_stride\x18\x01 \x01(\r:\x02\x31\x36\x12\x15\n\tbase_size\x18\x02 \x01(\r:\x02\x31\x36\x12\x14\n\x08min_size\x18\x03 \x01(\r:\x02\x31\x36\x12\r\n\x05ratio\x18\x04 \x03(\x02\x12\r\n\x05scale\x18\x05 \x03(\x02\x12\x1a\n\x0cpre_nms_topn\x18\x06 \x01(\r:\x04\x36\x30\x30\x30\x12\x1a\n\rpost_nms_topn\x18\x07 \x01(\r:\x03\x33\x30\x30\x12\x17\n\nnms_thresh\x18\x08 \x01(\x02:\x03\x30.7\"\xa6\x01\n\x14\x42\x61tchRenormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12$\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x03\x30.9\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x30.001\x12\x10\n\x05r_max\x18\x04 \x01(\x02:\x01\x33\x12\x10\n\x05\x64_max\x18\x05 \x01(\x02:\x01\x35\x12\x16\n\x07t_delta\x18\x06 \x01(\x02:\x05\x30.001\"?\n\x14\x44\x65nseConcatParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x16\n\x0bgrowth_rate\x18\x02 \x01(\x05:\x01\x30\"c\n\x12\x46ocalLossParameter\x12\x12\n\x05\x61lpha\x18\x01 \x01(\x02:\x03\x30.5\x12\x10\n\x05gamma\x18\x02 \x01(\x02:\x01\x30\x12\x13\n\x03\x65ps\x18\x03 \x01(\x02:\x06\x31\x65-010\x12\x12\n\x06neg_id\x18\x04 \x01(\x05:\x02-1*\x1c\n\x05Phase\x12\t\n\x05TRAIN\x10\x00\x12\x08\n\x04TEST\x10\x01')
) )
_sym_db.RegisterFileDescriptor(DESCRIPTOR) _sym_db.RegisterFileDescriptor(DESCRIPTOR)
...@@ -40,8 +40,8 @@ _PHASE = _descriptor.EnumDescriptor( ...@@ -40,8 +40,8 @@ _PHASE = _descriptor.EnumDescriptor(
], ],
containing_type=None, containing_type=None,
options=None, options=None,
serialized_start=17294, serialized_start=17298,
serialized_end=17322, serialized_end=17326,
) )
_sym_db.RegisterEnumDescriptor(_PHASE) _sym_db.RegisterEnumDescriptor(_PHASE)
...@@ -5729,7 +5729,7 @@ _BATCHRENORMPARAMETER = _descriptor.Descriptor( ...@@ -5729,7 +5729,7 @@ _BATCHRENORMPARAMETER = _descriptor.Descriptor(
_descriptor.FieldDescriptor( _descriptor.FieldDescriptor(
name='t_delta', full_name='caffe.BatchRenormParameter.t_delta', index=5, name='t_delta', full_name='caffe.BatchRenormParameter.t_delta', index=5,
number=6, type=2, cpp_type=6, label=1, number=6, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1, has_default_value=True, default_value=0.001,
message_type=None, enum_type=None, containing_type=None, message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None, is_extension=False, extension_scope=None,
options=None), options=None),
...@@ -5745,7 +5745,7 @@ _BATCHRENORMPARAMETER = _descriptor.Descriptor( ...@@ -5745,7 +5745,7 @@ _BATCHRENORMPARAMETER = _descriptor.Descriptor(
oneofs=[ oneofs=[
], ],
serialized_start=16964, serialized_start=16964,
serialized_end=17126, serialized_end=17130,
) )
...@@ -5781,8 +5781,8 @@ _DENSECONCATPARAMETER = _descriptor.Descriptor( ...@@ -5781,8 +5781,8 @@ _DENSECONCATPARAMETER = _descriptor.Descriptor(
extension_ranges=[], extension_ranges=[],
oneofs=[ oneofs=[
], ],
serialized_start=17128, serialized_start=17132,
serialized_end=17191, serialized_end=17195,
) )
...@@ -5803,7 +5803,7 @@ _FOCALLOSSPARAMETER = _descriptor.Descriptor( ...@@ -5803,7 +5803,7 @@ _FOCALLOSSPARAMETER = _descriptor.Descriptor(
_descriptor.FieldDescriptor( _descriptor.FieldDescriptor(
name='gamma', full_name='caffe.FocalLossParameter.gamma', index=1, name='gamma', full_name='caffe.FocalLossParameter.gamma', index=1,
number=2, type=2, cpp_type=6, label=1, number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=2, has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None, message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None, is_extension=False, extension_scope=None,
options=None), options=None),
...@@ -5832,8 +5832,8 @@ _FOCALLOSSPARAMETER = _descriptor.Descriptor( ...@@ -5832,8 +5832,8 @@ _FOCALLOSSPARAMETER = _descriptor.Descriptor(
extension_ranges=[], extension_ranges=[],
oneofs=[ oneofs=[
], ],
serialized_start=17193, serialized_start=17197,
serialized_end=17292, serialized_end=17296,
) )
_BLOBPROTO.fields_by_name['shape'].message_type = _BLOBSHAPE _BLOBPROTO.fields_by_name['shape'].message_type = _BLOBSHAPE
......
...@@ -362,13 +362,13 @@ void Graph::RecomputingAware(const GraphDef& optimized_graph, Workspace* ws) { ...@@ -362,13 +362,13 @@ void Graph::RecomputingAware(const GraphDef& optimized_graph, Workspace* ws) {
// prepare resources // prepare resources
for (auto& ops : ops_) ops->set_recompute_map(recompute_map); for (auto& ops : ops_) ops->set_recompute_map(recompute_map);
Tensor* head = ws->CreateTensor("_t_mirror_stage_head"); Tensor* head = ws->CreateTensor("/opt/mirror_stage/head");
head->Reshape(vector<TIndex>(1, WORKSPACE_MAX_CORRUPTED_SIZE)); head->Reshape(vector<TIndex>(1, WORKSPACE_MAX_CORRUPTED_SIZE));
Tensor* recompute_flag = ws->CreateTensor("_t_global_recompute_flag"); Tensor* recompute_flag = ws->CreateTensor("/opt/mirror_stage/recompute_flag");
recompute_flag->Reshape(vector<TIndex>(1, 1)); recompute_flag->Reshape(vector<TIndex>(1, 1));
recompute_flag->mutable_data<bool, CPUContext>()[0] = false; recompute_flag->mutable_data<bool, CPUContext>()[0] = false;
for (int i = 0; i < WORKSPACE_MAX_CORRUPTED_SIZE; i++) { for (int i = 0; i < WORKSPACE_MAX_CORRUPTED_SIZE; i++) {
string name = "_t_mirror_stage_buffer_" + dragon_cast<string, int>(i); string name = "/opt/mirror_stage/buffer_" + dragon_cast<string, int>(i);
Tensor* buffer = ws->CreateTensor(name); Tensor* buffer = ws->CreateTensor(name);
head->mutable_data<string, CPUContext>()[i] = ""; head->mutable_data<string, CPUContext>()[i] = "";
} }
......
...@@ -88,7 +88,7 @@ void MixedMemory::async_cuda_data(const cudaStream_t& stream) { ...@@ -88,7 +88,7 @@ void MixedMemory::async_cuda_data(const cudaStream_t& stream) {
MixedMemory::~MixedMemory() { MixedMemory::~MixedMemory() {
bool use_cudahost_mem = false; bool use_cudahost_mem = false;
#ifdef WITH_CUDA_HOST_MEN #ifdef WITH_CUDA_HOST_MEM
use_cudahost_mem = true; use_cudahost_mem = true;
#endif #endif
if (cpu_ptr_ && !use_cudahost_mem) { if (cpu_ptr_ && !use_cudahost_mem) {
......
...@@ -20,6 +20,19 @@ OperatorBase::OperatorBase(const OperatorDef& op_def, Workspace* ws) ...@@ -20,6 +20,19 @@ OperatorBase::OperatorBase(const OperatorDef& op_def, Workspace* ws)
outputs_.push_back(tensor); outputs_.push_back(tensor);
} }
} }
inline Tensor& OperatorBase::input(int idx) {
CHECK_LT(idx, (int)inputs_.size());
CHECK_GE(idx, -(int)inputs_.size());
if (idx >= 0) return *(ws()->SearchAvatar(inputs_[idx]));
else return *(ws()->SearchAvatar(inputs_[idx + inputs_.size()]));
}
inline Tensor* OperatorBase::output(int idx) {
CHECK_LT(idx, (int)outputs_.size());
CHECK_GE(idx, -(int)outputs_.size());
if (idx >= 0) return ws()->SearchAvatar(outputs_[idx]);
else return ws()->SearchAvatar(outputs_[idx + outputs_.size()]);
}
OperatorBase* TryCreateOperator(const string& key, const OperatorDef& op_def, Workspace* ws) { OperatorBase* TryCreateOperator(const string& key, const OperatorDef& op_def, Workspace* ws) {
switch (op_def.device_option().device_type()) { switch (op_def.device_option().device_type()) {
...@@ -49,11 +62,11 @@ Gradient MakeGradientForOp(const OperatorDef& def, const vector<string>& g_outpu ...@@ -49,11 +62,11 @@ Gradient MakeGradientForOp(const OperatorDef& def, const vector<string>& g_outpu
if (maker.get() == nullptr) if (maker.get() == nullptr)
LOG(FATAL) << "Gradient maker for operator " << def.type() << "not implemented."; LOG(FATAL) << "Gradient maker for operator " << def.type() << "not implemented.";
Gradient grad = maker->Make(); Gradient grad = maker->Make();
// copy device option, engine, and arguments if needed. // copy device option, engine, and arguments if needed
if (maker->CopyDeviceOption() && def.has_device_option()) if (maker->CopyDeviceOption() && def.has_device_option())
for (auto& grad_def : grad.ops) for (auto& grad_def : grad.ops)
grad_def.mutable_device_option()->CopyFrom(def.device_option()); grad_def.mutable_device_option()->CopyFrom(def.device_option());
// copy arguments if needed. // copy arguments if needed
if (maker->CopyArguments() && def.arg_size()) if (maker->CopyArguments() && def.arg_size())
for (auto& grad_def : grad.ops) grad_def.mutable_arg()->MergeFrom(def.arg()); for (auto& grad_def : grad.ops) grad_def.mutable_arg()->MergeFrom(def.arg());
return grad; return grad;
...@@ -63,7 +76,7 @@ template <class Context> ...@@ -63,7 +76,7 @@ template <class Context>
void Operator<Context>::ElimateCorruption() { void Operator<Context>::ElimateCorruption() {
Set<string> all_heads; Set<string> all_heads;
queue<int> safe_heads; queue<int> safe_heads;
Tensor* head = ws()->GetTensor("_t_mirror_stage_head"); Tensor* head = ws()->GetTensor("/opt/mirror_stage/head");
string* head_data = head->mutable_data<string, CPUContext>(); string* head_data = head->mutable_data<string, CPUContext>();
for (int i = 0; i < head->count(); i++) all_heads.insert(head_data[i]); for (int i = 0; i < head->count(); i++) all_heads.insert(head_data[i]);
// sub-graph run // sub-graph run
...@@ -71,7 +84,7 @@ void Operator<Context>::ElimateCorruption() { ...@@ -71,7 +84,7 @@ void Operator<Context>::ElimateCorruption() {
if (input(i).is_corrupted()) { if (input(i).is_corrupted()) {
if (all_heads.count(input(i).name())) continue; if (all_heads.count(input(i).name())) continue;
LOG(DEBUG) << "Tensor(" << input(i).name() << ") is corrupted, recompute... "; LOG(DEBUG) << "Tensor(" << input(i).name() << ") is corrupted, recompute... ";
Tensor* recompute_flag = ws()->GetTensor("_t_global_recompute_flag"); Tensor* recompute_flag = ws()->GetTensor("/opt/mirror_stage/recompute_flag");
vector<OperatorBase*>& list = recompute_map()[input(i).name()]; vector<OperatorBase*>& list = recompute_map()[input(i).name()];
recompute_flag->mutable_data<bool, CPUContext>()[0] = true; recompute_flag->mutable_data<bool, CPUContext>()[0] = true;
for (int j = 0; j < list.size(); j++) list[j]->Run(); for (int j = 0; j < list.size(); j++) list[j]->Run();
...@@ -101,7 +114,7 @@ void Operator<Context>::ElimateCorruption() { ...@@ -101,7 +114,7 @@ void Operator<Context>::ElimateCorruption() {
<< "\nadd WORKSPACE_MAX_CORRUPTED_SIZE for more powerful mirror stage ?"; << "\nadd WORKSPACE_MAX_CORRUPTED_SIZE for more powerful mirror stage ?";
int idx = safe_heads.front(); int idx = safe_heads.front();
safe_heads.pop(); safe_heads.pop();
Tensor* buffer = ws()->GetTensor("_t_mirror_stage_buffer_" + dragon_cast<string, int>(idx)); Tensor* buffer = ws()->GetTensor("/opt/mirror_stage/buffer_" + dragon_cast<string, int>(idx));
output(i)->Move(buffer->memory()); output(i)->Move(buffer->memory());
head_data[idx] = output(i)->name(); head_data[idx] = output(i)->name();
} }
...@@ -113,7 +126,7 @@ void Operator<Context>::ShareGradient() { ...@@ -113,7 +126,7 @@ void Operator<Context>::ShareGradient() {
// TODO(PhyscalX): we preset input(-1)->output(0) to share // TODO(PhyscalX): we preset input(-1)->output(0) to share
if (output(0)->name() != "ignore") { if (output(0)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(0)->Replace(*dX); ws()->CreateAvatar(output(0), dX);
} }
} }
...@@ -127,12 +140,12 @@ template <class Context> ...@@ -127,12 +140,12 @@ template <class Context>
void Operator<Context>::CleanResource() { void Operator<Context>::CleanResource() {
// post-process for mirror stage // post-process for mirror stage
Map<string, int> head_to_idx; Map<string, int> head_to_idx;
Tensor* head = ws()->GetTensor("_t_mirror_stage_head"); Tensor* head = ws()->GetTensor("/opt/mirror_stage/head");
string* head_data = head->mutable_data<string, CPUContext>(); string* head_data = head->mutable_data<string, CPUContext>();
for (int i = 0; i < head->count(); i++) head_to_idx[head_data[i]] = i; for (int i = 0; i < head->count(); i++) head_to_idx[head_data[i]] = i;
for (int i = 0; i < OutputSize(); i++) { for (int i = 0; i < OutputSize(); i++) {
if (output(i)->is_corrupted() && head_to_idx.count(output(i)->name())) { if (output(i)->is_corrupted() && head_to_idx.count(output(i)->name())) {
string used = "_t_mirror_stage_buffer_" + dragon_cast<string, int>(head_to_idx[output(i)->name()]); string used = "/opt/mirror_stage/buffer_" + dragon_cast<string, int>(head_to_idx[output(i)->name()]);
Tensor* buffer = ws()->GetTensor(used); Tensor* buffer = ws()->GetTensor(used);
if (output(i)->memory() != buffer->memory()) buffer->Move(output(i)->memory()); if (output(i)->memory() != buffer->memory()) buffer->Move(output(i)->memory());
} }
......
...@@ -16,7 +16,7 @@ GraphBase* Workspace::CreateGraph(const GraphDef& meta_graph) { ...@@ -16,7 +16,7 @@ GraphBase* Workspace::CreateGraph(const GraphDef& meta_graph) {
Workspace::~Workspace() { Workspace::~Workspace() {
for (int i = 0; i < WORKSPACE_MAX_CORRUPTED_SIZE; i++) { for (int i = 0; i < WORKSPACE_MAX_CORRUPTED_SIZE; i++) {
string name = "_t_mirror_stage_buffer_" + dragon_cast<string, int>(i); string name = "/opt/mirror_stage/buffer_" + dragon_cast<string, int>(i);
if (tensor_map_.count(name) > 0) { if (tensor_map_.count(name) > 0) {
MixedMemory* mem = tensor_map_[name]->memory(); MixedMemory* mem = tensor_map_[name]->memory();
if (mem != nullptr) delete mem; if (mem != nullptr) delete mem;
......
...@@ -27,7 +27,7 @@ void DropoutOp<Context>::RunWithType() { ...@@ -27,7 +27,7 @@ void DropoutOp<Context>::RunWithType() {
template <class Context> template <class Context>
void DropoutOp<Context>::RunOnDevice() { void DropoutOp<Context>::RunOnDevice() {
output(0)->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0));
mask = ws()->CreateTensor("_t_" + anchor() + "_dropout_mask"); mask = ws()->CreateTensor("/mnt/" + anchor() + "/dropout_mask");
mask->ReshapeLike(input(0)); mask->ReshapeLike(input(0));
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) RunWithType<float>();
...@@ -42,8 +42,7 @@ OPERATOR_SCHEMA(Dropout).NumInputs(1).NumOutputs(1).Inplace({ { 0, 0 } }); ...@@ -42,8 +42,7 @@ OPERATOR_SCHEMA(Dropout).NumInputs(1).NumOutputs(1).Inplace({ { 0, 0 } });
template <class Context> template <typename T> template <class Context> template <typename T>
void DropoutGradientOp<Context>::RunWithType() { void DropoutGradientOp<Context>::RunWithType() {
mask = ws()->GetTensor("_t_" + anchor() + "_dropout_mask"); mask = ws()->GetTensor("/mnt/" + anchor() + "/dropout_mask");
auto* dYdata = input(-1).template data<T, Context>(); auto* dYdata = input(-1).template data<T, Context>();
auto* dXdata = output(0)->template mutable_data<T, Context>(); auto* dXdata = output(0)->template mutable_data<T, Context>();
auto* Mdata = mask->template data<uint32_t, Context>(); auto* Mdata = mask->template data<uint32_t, Context>();
...@@ -56,9 +55,8 @@ void DropoutGradientOp<Context>::RunWithType() { ...@@ -56,9 +55,8 @@ void DropoutGradientOp<Context>::RunWithType() {
Mdata, Mdata,
dXdata); dXdata);
} else if (this->phase() == "TEST") { } else if (this->phase() == "TEST") { NOT_IMPLEMENTED; }
NOT_IMPLEMENTED; mask->Reset();
}
} }
template <class Context> template <class Context>
...@@ -69,12 +67,6 @@ void DropoutGradientOp<Context>::RunOnDevice() { ...@@ -69,12 +67,6 @@ void DropoutGradientOp<Context>::RunOnDevice() {
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
} }
template <class Context>
void DropoutGradientOp<Context>::CleanResource() {
Operator<Context>::CleanResource();
ws()->ReleaseBuffer(mask, "Common", true);
}
DEPLOY_CPU(DropoutGradient); DEPLOY_CPU(DropoutGradient);
#ifdef WITH_CUDA #ifdef WITH_CUDA
DEPLOY_CUDA(DropoutGradient); DEPLOY_CUDA(DropoutGradient);
......
...@@ -26,7 +26,7 @@ void SoftmaxOp<Context>::RunWithType() { ...@@ -26,7 +26,7 @@ void SoftmaxOp<Context>::RunWithType() {
template <class Context> template <class Context>
void SoftmaxOp<Context>::RunOnDevice() { void SoftmaxOp<Context>::RunOnDevice() {
if (axis == -1) axis = (int)input(0).ndim() - 1; if (axis == -1) axis = (int)input(0).ndim() - 1;
scale = ws()->CreateTensor("_t_softmax_scale"); scale = ws()->CreateTensor("/share/softmax_scale");
scale->ReshapeLike(input(0)); scale->ReshapeLike(input(0));
outer_dim = input(0).count(0, axis); outer_dim = input(0).count(0, axis);
inner_dim = input(0).count(axis + 1); inner_dim = input(0).count(axis + 1);
...@@ -64,7 +64,7 @@ void SoftmaxGradientOp<Context>::RunWithType() { ...@@ -64,7 +64,7 @@ void SoftmaxGradientOp<Context>::RunWithType() {
template <class Context> template <class Context>
void SoftmaxGradientOp<Context>::RunOnDevice() { void SoftmaxGradientOp<Context>::RunOnDevice() {
if (axis == -1) axis = (int)input(0).ndim() - 1; if (axis == -1) axis = (int)input(0).ndim() - 1;
scale = ws()->CreateTensor("_t_softmax_scale"); scale = ws()->CreateTensor("/share/softmax_scale");
scale->ReshapeLike(input(0)); scale->ReshapeLike(input(0));
outer_dim = input(0).count(0, axis); outer_dim = input(0).count(0, axis);
inner_dim = input(0).count(axis + 1); inner_dim = input(0).count(axis + 1);
......
...@@ -176,7 +176,7 @@ void AddGradientOp<Context>::ShareGradient() { ...@@ -176,7 +176,7 @@ void AddGradientOp<Context>::ShareGradient() {
for (int i = 0; i < OutputSize(); i++) { for (int i = 0; i < OutputSize(); i++) {
if (output(i)->name() != "ignore") { if (output(i)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(i)->Replace(*dX); ws()->CreateAvatar(output(i), dX);
break; break;
} }
} }
......
...@@ -30,8 +30,7 @@ void BiasAddOp<Context>::RunOnDevice() { ...@@ -30,8 +30,7 @@ void BiasAddOp<Context>::RunOnDevice() {
dim = input(0).dim(-1); dim = input(0).dim(-1);
inner_dim = input(0).count(1) / dim; inner_dim = input(0).count(1) / dim;
} else LOG(FATAL) << "Unknown data format: " << data_format; } else LOG(FATAL) << "Unknown data format: " << data_format;
output(0)->ReshapeLike(input(0)); ws()->CreateAvatar(output(0), &input(0));
output(0)->Share(input(0));
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) RunWithType<float>();
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
...@@ -70,8 +69,7 @@ void BiasAddGradientOp<Context>::RunWithType() { ...@@ -70,8 +69,7 @@ void BiasAddGradientOp<Context>::RunWithType() {
} }
if (output(0)->name() != "ignore") { if (output(0)->name() != "ignore") {
output(0)->ReshapeLike(input(-1)); ws()->CreateAvatar(output(0), &input(-1));
output(0)->Share(input(-1));
} }
} }
......
...@@ -16,8 +16,7 @@ void ClipOp<Context>::RunWithType() { ...@@ -16,8 +16,7 @@ void ClipOp<Context>::RunWithType() {
template <class Context> template <class Context>
void ClipOp<Context>::RunOnDevice() { void ClipOp<Context>::RunOnDevice() {
output(0)->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0));
output(0)->Share(input(0)); mask = ws()->CreateTensor("/mnt/" + anchor() + "/clip_mask");
mask = ws()->CreateTensor("_t_" + anchor() + "_clip_mask");
mask->ReshapeLike(input(0)); mask->ReshapeLike(input(0));
if (input(0).template IsType<float>()) return RunWithType<float>(); if (input(0).template IsType<float>()) return RunWithType<float>();
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
...@@ -27,7 +26,7 @@ DEPLOY_CPU(Clip); ...@@ -27,7 +26,7 @@ DEPLOY_CPU(Clip);
#ifdef WITH_CUDA #ifdef WITH_CUDA
DEPLOY_CUDA(Clip); DEPLOY_CUDA(Clip);
#endif #endif
OPERATOR_SCHEMA(Clip).NumInputs(1).NumOutputs(1); OPERATOR_SCHEMA(Clip).NumInputs(1).NumOutputs(1).Inplace({ { 0, 0 } });
template <class Context> template <typename T> template <class Context> template <typename T>
void ClipGradientOp<Context>::RunWithType() { void ClipGradientOp<Context>::RunWithType() {
...@@ -39,8 +38,7 @@ void ClipGradientOp<Context>::RunWithType() { ...@@ -39,8 +38,7 @@ void ClipGradientOp<Context>::RunWithType() {
template <class Context> template <class Context>
void ClipGradientOp<Context>::RunOnDevice() { void ClipGradientOp<Context>::RunOnDevice() {
output(0)->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0));
output(0)->Share(input(-1)); mask = ws()->GetTensor("/mnt/" + anchor() + "/clip_mask");
mask = ws()->GetTensor("_t_" + anchor() + "_clip_mask");
if (input(0).template IsType<float>()) return RunWithType<float>(); if (input(0).template IsType<float>()) return RunWithType<float>();
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
} }
...@@ -49,14 +47,14 @@ DEPLOY_CPU(ClipGradient); ...@@ -49,14 +47,14 @@ DEPLOY_CPU(ClipGradient);
#ifdef WITH_CUDA #ifdef WITH_CUDA
DEPLOY_CUDA(ClipGradient); DEPLOY_CUDA(ClipGradient);
#endif #endif
OPERATOR_SCHEMA(ClipGradient).NumInputs(2).NumOutputs(1); OPERATOR_SCHEMA(ClipGradient).NumInputs(2).NumOutputs(1).Inplace({ { 1, 0 } });
class GetClipGradient final : public GradientMakerBase { class GetClipGradient final : public GradientMakerBase {
public: public:
GRADIENT_MAKER_CTOR(GetClipGradient); GRADIENT_MAKER_CTOR(GetClipGradient);
vector<OperatorDef> MakeDefs() override { vector<OperatorDef> MakeDefs() override {
return SingleDef(def.type() + "Gradient", "", return SingleDef(def.type() + "Gradient", "",
vector<string> {I(0), GO(0)}, vector<string> {O(0), GO(0)},
vector<string> {GI(0)}); vector<string> {GI(0)});
} }
}; };
......
...@@ -122,7 +122,7 @@ void DivGradientOp<Context>::BroadcastRunWithType(int type) { ...@@ -122,7 +122,7 @@ void DivGradientOp<Context>::BroadcastRunWithType(int type) {
} }
if (output(1)->name() != "ignore") { if (output(1)->name() != "ignore") {
Tensor* buffer = ws()->CreateTensor("_t_buffer_0"); Tensor* buffer = ws()->GetBuffer();
buffer->ReshapeLike(input(1)); buffer->ReshapeLike(input(1));
auto* X1data = input(0).template data<T, Context>(); auto* X1data = input(0).template data<T, Context>();
auto* X2data = input(1).template data<T, Context>(); auto* X2data = input(1).template data<T, Context>();
...@@ -147,6 +147,7 @@ void DivGradientOp<Context>::BroadcastRunWithType(int type) { ...@@ -147,6 +147,7 @@ void DivGradientOp<Context>::BroadcastRunWithType(int type) {
dX1data, BMul_data, 0.0, Bdata); dX1data, BMul_data, 0.0, Bdata);
} }
math::Mul<T, Context>(input(1).count(), Bdata, dX2data, dX2data); math::Mul<T, Context>(input(1).count(), Bdata, dX2data, dX2data);
ws()->ReleaseBuffer(buffer);
} }
if (output(0)->name() != "ignore") { if (output(0)->name() != "ignore") {
...@@ -207,7 +208,7 @@ void DivGradientOp<Context>::ShareGradient() { ...@@ -207,7 +208,7 @@ void DivGradientOp<Context>::ShareGradient() {
for (int i = 0; i < OutputSize(); i++) { for (int i = 0; i < OutputSize(); i++) {
if (output(i)->name() != "ignore") { if (output(i)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(i)->Replace(*dX); ws()->CreateAvatar(output(i), dX);
break; break;
} }
} }
......
...@@ -183,7 +183,7 @@ void DotGradientOp<Context>::ShareGradient() { ...@@ -183,7 +183,7 @@ void DotGradientOp<Context>::ShareGradient() {
for (int i = 0; i < OutputSize(); i++) { for (int i = 0; i < OutputSize(); i++) {
if (output(i)->name() != "ignore") { if (output(i)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(i)->Replace(*dX); ws()->CreateAvatar(output(i), dX);
break; break;
} }
} }
......
...@@ -130,7 +130,7 @@ void EltwiseGradientOp<Context>::ShareGradient() { ...@@ -130,7 +130,7 @@ void EltwiseGradientOp<Context>::ShareGradient() {
for (int i = 0; i < OutputSize(); i++) { for (int i = 0; i < OutputSize(); i++) {
if (output(i)->name() != "ignore") { if (output(i)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(i)->Replace(*dX); ws()->CreateAvatar(output(i), dX);
break; break;
} }
} }
......
...@@ -118,7 +118,7 @@ void MatmulGradientOp<Context>::ShareGradient() { ...@@ -118,7 +118,7 @@ void MatmulGradientOp<Context>::ShareGradient() {
for (int i = 0; i < OutputSize(); i++) { for (int i = 0; i < OutputSize(); i++) {
if (output(i)->name() != "ignore") { if (output(i)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(i)->Replace(*dX); ws()->CreateAvatar(output(i), dX);
break; break;
} }
} }
......
...@@ -195,7 +195,7 @@ void MulGradientOp<Context>::ShareGradient() { ...@@ -195,7 +195,7 @@ void MulGradientOp<Context>::ShareGradient() {
for (int i = 0; i < OutputSize(); i++) { for (int i = 0; i < OutputSize(); i++) {
if (output(i)->name() != "ignore") { if (output(i)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(i)->Replace(*dX); ws()->CreateAvatar(output(i), dX);
break; break;
} }
} }
......
...@@ -176,7 +176,7 @@ void RAddGradientOp<Context>::ShareGradient() { ...@@ -176,7 +176,7 @@ void RAddGradientOp<Context>::ShareGradient() {
for (int i = (int)OutputSize() - 1; i >= 0; i--) { for (int i = (int)OutputSize() - 1; i >= 0; i--) {
if (output(i)->name() != "ignore") { if (output(i)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(i)->Replace(*dX); ws()->CreateAvatar(output(i), dX);
break; break;
} }
} }
......
...@@ -201,7 +201,7 @@ void RDivGradientOp<Context>::ShareGradient() { ...@@ -201,7 +201,7 @@ void RDivGradientOp<Context>::ShareGradient() {
for (int i = (int)OutputSize() - 1; i >= 0; i--) { for (int i = (int)OutputSize() - 1; i >= 0; i--) {
if (output(i)->name() != "ignore") { if (output(i)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(i)->Replace(*dX); ws()->CreateAvatar(output(i), dX);
break; break;
} }
} }
......
...@@ -193,7 +193,7 @@ void RMulGradientOp<Context>::ShareGradient() { ...@@ -193,7 +193,7 @@ void RMulGradientOp<Context>::ShareGradient() {
for (int i = (int)OutputSize() - 1; i >= 0; i--) { for (int i = (int)OutputSize() - 1; i >= 0; i--) {
if (output(i)->name() != "ignore") { if (output(i)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(i)->Replace(*dX); ws()->CreateAvatar(output(i), dX);
break; break;
} }
} }
......
...@@ -177,7 +177,7 @@ void RSubGradientOp<Context>::ShareGradient() { ...@@ -177,7 +177,7 @@ void RSubGradientOp<Context>::ShareGradient() {
for (int i = (int)OutputSize() - 1; i >= 0; i--) { for (int i = (int)OutputSize() - 1; i >= 0; i--) {
if (output(i)->name() != "ignore") { if (output(i)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(i)->Replace(*dX); ws()->CreateAvatar(output(i), dX);
break; break;
} }
} }
......
...@@ -7,26 +7,31 @@ namespace dragon { ...@@ -7,26 +7,31 @@ namespace dragon {
template <class Context> template <typename T> template <class Context> template <typename T>
void ScaleOp<Context>::RunWithType() { void ScaleOp<Context>::RunWithType() {
CHECK_LT(axis, (int)input(0).ndim()); start_axis = axis;
const vector<TIndex>::const_iterator& dim_start = if (start_axis < 0) start_axis += (int)input(0).ndim();
input(0).dims().begin() + axis; if (num_axes == -1) num_axes = (int)input(0).ndim() - start_axis;
if (num_axes == -1) num_axes = (int)input(0).ndim() - axis; else if (num_axes == 0) num_axes = 1;
CHECK_LE(axis + num_axes, (int)input(0).ndim());
CHECK_LT(start_axis, (int)input(0).ndim());
CHECK_LE(start_axis + num_axes, (int)input(0).ndim());
const vector<TIndex>::const_iterator& dim_start = input(0).dims().begin() + start_axis;
const vector<TIndex>::const_iterator& dim_end = dim_start + num_axes; const vector<TIndex>::const_iterator& dim_end = dim_start + num_axes;
vector<TIndex> param_dims(dim_start, dim_end); vector<TIndex> param_dims(dim_start, dim_end);
TENSOR_FILL(input(1), param_dims); TENSOR_FILL(input(1), param_dims);
if (InputSize() > 2) { if (InputSize() > 2) {
TENSOR_FILL(input(2), param_dims); TENSOR_FILL(input(2), param_dims);
inner_dim = input(0).count(axis + num_axes); inner_dim = input(0).count(start_axis + num_axes);
INIT_MULTIPLIER(bias_multiplier, inner_dim); INIT_MULTIPLIER(bias_multiplier, inner_dim);
} }
if (InputSize() > 2) { if (InputSize() > 2) {
kernel::Scale<T, Context>(axis, &input(0), &input(1), kernel::Scale<T, Context>(start_axis, &input(0), &input(1),
&input(2), bias_multiplier, &input(2), bias_multiplier,
output(0)); output(0));
} else { } else {
kernel::Scale<T, Context>(axis, &input(0), &input(1), kernel::Scale<T, Context>(start_axis, &input(0), &input(1),
nullptr, nullptr, nullptr, nullptr,
output(0)); output(0));
} }
...@@ -118,14 +123,21 @@ void ScaleGradientOp<Context>::ScaleRunWithType() { ...@@ -118,14 +123,21 @@ void ScaleGradientOp<Context>::ScaleRunWithType() {
template <class Context> template <typename T> template <class Context> template <typename T>
void ScaleGradientOp<Context>::RunWithType() { void ScaleGradientOp<Context>::RunWithType() {
output(0)->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0));
kernel::ScaleGrad<float, Context>(axis, &input(-1), &input(1), output(0)); kernel::ScaleGrad<float, Context>(start_axis, &input(-1), &input(1), output(0));
} }
template <class Context> template <class Context>
void ScaleGradientOp<Context>::RunOnDevice() { void ScaleGradientOp<Context>::RunOnDevice() {
if (num_axes == -1) num_axes = (int)input(0).ndim() - axis; start_axis = axis;
outer_dim = input(0).count(0, axis); if (start_axis < 0) start_axis += (int)input(0).ndim();
inner_dim = input(0).count(axis + num_axes); if (num_axes == -1) num_axes = (int)input(0).ndim() - start_axis;
else if (num_axes == 0) num_axes = 1;
CHECK_LT(start_axis, (int)input(0).ndim());
CHECK_LE(start_axis + num_axes, (int)input(0).ndim());
outer_dim = input(0).count(0, start_axis);
inner_dim = input(0).count(start_axis + num_axes);
scale_dim = input(1).count(); scale_dim = input(1).count();
sum_dim = std::max(outer_dim, inner_dim); sum_dim = std::max(outer_dim, inner_dim);
dim = scale_dim * inner_dim; dim = scale_dim * inner_dim;
......
...@@ -176,7 +176,7 @@ void SubGradientOp<Context>::ShareGradient() { ...@@ -176,7 +176,7 @@ void SubGradientOp<Context>::ShareGradient() {
for (int i = 0; i < OutputSize(); i++) { for (int i = 0; i < OutputSize(); i++) {
if (output(i)->name() != "ignore") { if (output(i)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(i)->Replace(*dX); ws()->CreateAvatar(output(i), dX);
break; break;
} }
} }
......
...@@ -142,7 +142,7 @@ void ScanOp<Context>::UnrollTemplate() { ...@@ -142,7 +142,7 @@ void ScanOp<Context>::UnrollTemplate() {
new_def.add_target(output(i)->name()); new_def.add_target(output(i)->name());
} }
// upload // upload
Tensor* string_tensor = ws()->CreateTensor("_t_" + anchor() + "_raw_ops"); Tensor* string_tensor = ws()->CreateTensor("/mnt/" + anchor() + "/raw_ops");
string_tensor->Reshape(vector<TIndex>(1, 1)); string_tensor->Reshape(vector<TIndex>(1, 1));
string* data = string_tensor->mutable_data <string, CPUContext>(); string* data = string_tensor->mutable_data <string, CPUContext>();
data[0] = new_def.SerializeAsString(); data[0] = new_def.SerializeAsString();
...@@ -171,7 +171,7 @@ void ScanGradientOp<Context>::MakeGradientOps() { ...@@ -171,7 +171,7 @@ void ScanGradientOp<Context>::MakeGradientOps() {
else if (step_type == "Default") nsteps = input(0).dim(axis); else if (step_type == "Default") nsteps = input(0).dim(axis);
if (graphs.count(nsteps)) return; if (graphs.count(nsteps)) return;
Tensor* ops = ws()->GetTensor("_t_" + anchor() + "_raw_ops"); Tensor* ops = ws()->GetTensor("/mnt/" + anchor() + "/raw_ops");
forward_def.ParseFromString(ops->data<string, CPUContext>()[0]); forward_def.ParseFromString(ops->data<string, CPUContext>()[0]);
vector<string> targets; vector<string> targets;
for (auto& target : forward_def.target()) targets.push_back(target); for (auto& target : forward_def.target()) targets.push_back(target);
......
...@@ -31,7 +31,7 @@ template <class Context> ...@@ -31,7 +31,7 @@ template <class Context>
void L1LossOp<Context>::RunOnDevice() { void L1LossOp<Context>::RunOnDevice() {
CHECK_EQ(input(0).count(), input(1).count()); CHECK_EQ(input(0).count(), input(1).count());
output(0)->Reshape(vector<TIndex>(1, 1)); output(0)->Reshape(vector<TIndex>(1, 1));
diff = ws()->CreateTensor("_t_" + anchor() + "_l1_loss_diff"); diff = ws()->CreateTensor("/mnt/" + anchor() + "/l1_loss_diff");
diff->ReshapeLike(input(0)); diff->ReshapeLike(input(0));
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) RunWithType<float>();
...@@ -67,7 +67,7 @@ void L1LossGradientOp<Context>::RunWithType() { ...@@ -67,7 +67,7 @@ void L1LossGradientOp<Context>::RunWithType() {
template <class Context> template <class Context>
void L1LossGradientOp<Context>::RunOnDevice() { void L1LossGradientOp<Context>::RunOnDevice() {
diff = ws()->GetTensor("_t_" + anchor() + "_l1_loss_diff"); diff = ws()->GetTensor("/mnt/" + anchor() + "/l1_loss_diff");
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) RunWithType<float>();
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
...@@ -78,7 +78,7 @@ void L1LossGradientOp<Context>::ShareGradient() { ...@@ -78,7 +78,7 @@ void L1LossGradientOp<Context>::ShareGradient() {
for (int i = 0; i < OutputSize(); i++) { for (int i = 0; i < OutputSize(); i++) {
if (output(i)->name() != "ignore") { if (output(i)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(i)->Replace(*dX); ws()->CreateAvatar(output(i), dX);
break; break;
} }
} }
......
...@@ -29,7 +29,7 @@ template <class Context> ...@@ -29,7 +29,7 @@ template <class Context>
void L2LossOp<Context>::RunOnDevice() { void L2LossOp<Context>::RunOnDevice() {
CHECK_EQ(input(0).count(), input(1).count()); CHECK_EQ(input(0).count(), input(1).count());
output(0)->Reshape(vector<TIndex>(1, 1)); output(0)->Reshape(vector<TIndex>(1, 1));
diff = ws()->CreateTensor("_t_" + anchor() + "_l2_loss_diff"); diff = ws()->CreateTensor("/mnt/" + anchor() + "/l2_loss_diff");
diff->ReshapeLike(input(0)); diff->ReshapeLike(input(0));
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) RunWithType<float>();
...@@ -64,7 +64,7 @@ void L2LossGradientOp<Context>::RunWithType() { ...@@ -64,7 +64,7 @@ void L2LossGradientOp<Context>::RunWithType() {
template <class Context> template <class Context>
void L2LossGradientOp<Context>::RunOnDevice() { void L2LossGradientOp<Context>::RunOnDevice() {
diff = ws()->GetTensor("_t_" + anchor() + "_l2_loss_diff"); diff = ws()->GetTensor("/mnt/" + anchor() + "/l2_loss_diff");
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) RunWithType<float>();
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
...@@ -75,7 +75,7 @@ void L2LossGradientOp<Context>::ShareGradient() { ...@@ -75,7 +75,7 @@ void L2LossGradientOp<Context>::ShareGradient() {
for (int i = 0; i < OutputSize(); i++) { for (int i = 0; i < OutputSize(); i++) {
if (output(i)->name() != "ignore") { if (output(i)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(i)->Replace(*dX); ws()->CreateAvatar(output(i), dX);
break; break;
} }
} }
......
...@@ -35,7 +35,7 @@ template <class Context> ...@@ -35,7 +35,7 @@ template <class Context>
void SigmoidCrossEntropyOp<Context>::RunOnDevice() { void SigmoidCrossEntropyOp<Context>::RunOnDevice() {
CHECK_EQ(input(0).count(), input(1).count()) CHECK_EQ(input(0).count(), input(1).count())
<< "\nNumber of predictions must match the number of labels."; << "\nNumber of predictions must match the number of labels.";
prob = ws()->CreateTensor("_t_" + anchor() + "_sigmoid_prob"); prob = ws()->CreateTensor("/mnt/" + anchor() + "/sigmoid_prob");
prob->ReshapeLike(input(0)); prob->ReshapeLike(input(0));
losses.ReshapeLike(input(0)); losses.ReshapeLike(input(0));
...@@ -73,7 +73,7 @@ void SigmoidCrossEntropyGradientOp<Context>::RunWithType() { ...@@ -73,7 +73,7 @@ void SigmoidCrossEntropyGradientOp<Context>::RunWithType() {
template <class Context> template <class Context>
void SigmoidCrossEntropyGradientOp<Context>::RunOnDevice() { void SigmoidCrossEntropyGradientOp<Context>::RunOnDevice() {
prob = ws()->GetTensor("_t_" + anchor() + "_sigmoid_prob"); prob = ws()->GetTensor("/mnt/" + anchor() + "/sigmoid_prob");
output(0)->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0));
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) RunWithType<float>();
......
...@@ -23,9 +23,13 @@ void SmoothL1LossOp<Context>::RunWithType() { ...@@ -23,9 +23,13 @@ void SmoothL1LossOp<Context>::RunWithType() {
auto* outside_w_data = input(3).template data<T, Context>(); auto* outside_w_data = input(3).template data<T, Context>();
math::Mul<T, Context>(diff->count(), outside_w_data, error_data, error_data); math::Mul<T, Context>(diff->count(), outside_w_data, error_data, error_data);
} }
Ydata[0] = math::ASum<T, Context>(error->count(), error_data);
T loss = math::ASum<T, Context>(error->count(), error_data); T normalizer;
Ydata[0] = loss / input(0).dim(0); if (normalization == "BATCH_SIZE") normalizer = input(0).dim(0);
else if (normalization == "FULL") normalizer = input(0).count();
else if (normalization == "NONE") normalizer = 1;
Ydata[0] = Ydata[0] / normalizer;
} }
template <class Context> template <class Context>
...@@ -35,8 +39,8 @@ void SmoothL1LossOp<Context>::RunOnDevice() { ...@@ -35,8 +39,8 @@ void SmoothL1LossOp<Context>::RunOnDevice() {
if (InputSize() > 3) CHECK(input(0).dims() == input(3).dims()); if (InputSize() > 3) CHECK(input(0).dims() == input(3).dims());
output(0)->Reshape(vector<TIndex>(1, 1)); output(0)->Reshape(vector<TIndex>(1, 1));
diff = ws()->CreateTensor("_t_" + anchor() + "_smoothl1_loss_diff"); diff = ws()->CreateTensor("/mnt/" + anchor() + "/smoothl1_loss_diff");
error = ws()->CreateTensor("_t_smoothl1_loss_error"); error = ws()->CreateTensor("/share/smoothl1_loss_error");
diff->ReshapeLike(input(0)); diff->ReshapeLike(input(0));
error->ReshapeLike(input(0)); error->ReshapeLike(input(0));
...@@ -54,16 +58,21 @@ template <class Context> template <typename T> ...@@ -54,16 +58,21 @@ template <class Context> template <typename T>
void SmoothL1LossGradientOp<Context>::RunWithType() { void SmoothL1LossGradientOp<Context>::RunWithType() {
auto* diff_data = diff->template mutable_data<T, Context>(); auto* diff_data = diff->template mutable_data<T, Context>();
auto* dYdata = input(-1).template data<T, CPUContext>(); auto* dYdata = input(-1).template data<T, CPUContext>();
kernel::SmoothL1Grad<T, Context>(diff->count(), sigma2, diff_data, diff_data); kernel::SmoothL1Grad<T, Context>(diff->count(), sigma2, diff_data, diff_data);
T alpha = dYdata[0], normalizer;
if (normalization == "BATCH_SIZE") normalizer = input(0).dim(0);
else if (normalization == "FULL") normalizer = input(0).count();
else if (normalization == "NONE") normalizer = 1;
alpha = alpha / normalizer;
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
if (output(i)->name() == "ignore") continue; if (output(i)->name() == "ignore") continue;
output(i)->ReshapeLike(input(i)); output(i)->ReshapeLike(input(i));
auto* dXdata = output(i)->template mutable_data<T, Context>(); auto* dXdata = output(i)->template mutable_data<T, Context>();
const T sign = (i == 0) ? 1 : -1; const T sign = (i == 0) ? 1 : -1;
const T coeff = sign / input(i).dim(0) * dYdata[0]; alpha *= sign;
math::Axpby<T, Context>(output(i)->count(), coeff, diff_data, 0, dXdata); math::Axpby<T, Context>(output(i)->count(), alpha, diff_data, 0, dXdata);
if (InputSize() > 3) { if (InputSize() > 3) {
auto* inside_w_data = input(2).template data<T, Context>(); auto* inside_w_data = input(2).template data<T, Context>();
math::Mul<T, Context>(output(i)->count(), inside_w_data, dXdata, dXdata); math::Mul<T, Context>(output(i)->count(), inside_w_data, dXdata, dXdata);
...@@ -77,7 +86,7 @@ void SmoothL1LossGradientOp<Context>::RunWithType() { ...@@ -77,7 +86,7 @@ void SmoothL1LossGradientOp<Context>::RunWithType() {
template <class Context> template <class Context>
void SmoothL1LossGradientOp<Context>::RunOnDevice() { void SmoothL1LossGradientOp<Context>::RunOnDevice() {
diff = ws()->GetTensor("_t_" + anchor() + "_smoothl1_loss_diff"); diff = ws()->GetTensor("/mnt/" + anchor() + "/smoothl1_loss_diff");
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) RunWithType<float>();
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
......
...@@ -43,7 +43,7 @@ void SoftmaxCrossEntropyOp<Context>::RunOnDevice() { ...@@ -43,7 +43,7 @@ void SoftmaxCrossEntropyOp<Context>::RunOnDevice() {
<< "\nNumber of predictions must match the number of labels."; << "\nNumber of predictions must match the number of labels.";
losses.ReshapeLike(input(0)); losses.ReshapeLike(input(0));
softmax_op->Run(); softmax_op->Run();
prob = ws()->GetTensor("_t_" + anchor() + "_softmax_prob"); prob = ws()->GetTensor("/mnt/" + anchor() + "/softmax_prob");
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) RunWithType<float>();
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
...@@ -85,7 +85,7 @@ void SoftmaxCrossEntropyGradientOp<Context>::RunWithType() { ...@@ -85,7 +85,7 @@ void SoftmaxCrossEntropyGradientOp<Context>::RunWithType() {
template <class Context> template <class Context>
void SoftmaxCrossEntropyGradientOp<Context>::RunOnDevice() { void SoftmaxCrossEntropyGradientOp<Context>::RunOnDevice() {
prob = ws()->GetTensor("_t_" + anchor() + "_softmax_prob"); prob = ws()->GetTensor("/mnt/" + anchor() + "/softmax_prob");
outer_dim = prob->count(0, axis); outer_dim = prob->count(0, axis);
inner_dim = prob->count(axis + 1); inner_dim = prob->count(axis + 1);
output(0)->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0));
......
...@@ -51,7 +51,7 @@ void SparseSoftmaxCrossEntropyOp<Context>::RunOnDevice() { ...@@ -51,7 +51,7 @@ void SparseSoftmaxCrossEntropyOp<Context>::RunOnDevice() {
valid.Reshape(vector<TIndex>(1, outer_dim * inner_dim)); valid.Reshape(vector<TIndex>(1, outer_dim * inner_dim));
losses.Reshape(vector<TIndex>(1, outer_dim * inner_dim)); losses.Reshape(vector<TIndex>(1, outer_dim * inner_dim));
softmax_op->Run(); softmax_op->Run();
prob = ws()->GetTensor("_t_" + anchor() + "_softmax_prob"); prob = ws()->GetTensor("/mnt/" + anchor() + "/softmax_prob");
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) RunWithType<float>();
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
...@@ -100,7 +100,7 @@ void SparseSoftmaxCrossEntropyGradientOp<Context>::RunWithType() { ...@@ -100,7 +100,7 @@ void SparseSoftmaxCrossEntropyGradientOp<Context>::RunWithType() {
template <class Context> template <class Context>
void SparseSoftmaxCrossEntropyGradientOp<Context>::RunOnDevice() { void SparseSoftmaxCrossEntropyGradientOp<Context>::RunOnDevice() {
prob = ws()->GetTensor("_t_" + anchor() + "_softmax_prob"); prob = ws()->GetTensor("/mnt/" + anchor() + "/softmax_prob");
outer_dim = prob->count(0, axis); outer_dim = prob->count(0, axis);
inner_dim = prob->count(axis + 1); inner_dim = prob->count(axis + 1);
output(0)->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0));
......
...@@ -57,8 +57,8 @@ void SparseSoftmaxFocalLossOp<Context>::RunOnDevice() { ...@@ -57,8 +57,8 @@ void SparseSoftmaxFocalLossOp<Context>::RunOnDevice() {
this->valid.Reshape(vector<TIndex>(1, outer_dim * inner_dim)); this->valid.Reshape(vector<TIndex>(1, outer_dim * inner_dim));
this->losses.Reshape(vector<TIndex>(1, outer_dim * inner_dim)); this->losses.Reshape(vector<TIndex>(1, outer_dim * inner_dim));
this->softmax_op->Run(); this->softmax_op->Run();
this->prob = ws()->GetTensor("_t_" + anchor() + "_softmax_prob"); this->prob = ws()->GetTensor("/mnt/" + anchor() + "/softmax_prob");
scale = ws()->CreateTensor("_t_" + anchor() + "_focal_scale"); scale = ws()->CreateTensor("/mnt/" + anchor() + "/focal_scale");
scale->ReshapeLike(*this->prob); scale->ReshapeLike(*this->prob);
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) RunWithType<float>();
...@@ -116,8 +116,8 @@ void SparseSoftmaxFocalLossGradientOp<Context>::RunWithType() { ...@@ -116,8 +116,8 @@ void SparseSoftmaxFocalLossGradientOp<Context>::RunWithType() {
template <class Context> template <class Context>
void SparseSoftmaxFocalLossGradientOp<Context>::RunOnDevice() { void SparseSoftmaxFocalLossGradientOp<Context>::RunOnDevice() {
this->prob = ws()->GetTensor("_t_" + anchor() + "_softmax_prob"); this->prob = ws()->GetTensor("/mnt/" + anchor() + "/softmax_prob");
scale = ws()->GetTensor("_t_" + anchor() + "_focal_scale"); scale = ws()->GetTensor("/mnt/" + anchor() + "/focal_scale");
outer_dim = this->prob->count(0, axis); outer_dim = this->prob->count(0, axis);
inner_dim = this->prob->count(axis + 1); inner_dim = this->prob->count(axis + 1);
output(0)->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0));
......
...@@ -37,9 +37,7 @@ void GradientGatherOp<Context>::RunWithType() { ...@@ -37,9 +37,7 @@ void GradientGatherOp<Context>::RunWithType() {
TIndex count = output(0)->count(); TIndex count = output(0)->count();
for (int i = 1; i < indices.size(); i++) { for (int i = 1; i < indices.size(); i++) {
CHECK(output(0)->dims() == input(indices[i]).dims()); CHECK(output(0)->dims() == input(indices[i]).dims());
math::Add<T, Context>(count, dXdata, math::Add<T, Context>(count, dXdata, input(indices[i]).template data<T, Context>(), dXdata);
input(indices[i]).template data<T, Context>(), dXdata);
// trick: force to release memory
input(indices[i]).Reset(); input(indices[i]).Reset();
} }
} }
...@@ -47,8 +45,7 @@ void GradientGatherOp<Context>::RunWithType() { ...@@ -47,8 +45,7 @@ void GradientGatherOp<Context>::RunWithType() {
template <class Context> template <class Context>
void GradientGatherOp<Context>::RunOnDevice() { void GradientGatherOp<Context>::RunOnDevice() {
if (indices.size() == 0) return; if (indices.size() == 0) return;
output(0)->ReshapeLike(input(indices[0])); ws()->CreateAvatar(output(0), &input(indices[0]));
output(0)->Share(input(indices[0]));
if (input(indices[0]).template IsType<float>()) RunWithType<float>(); if (input(indices[0]).template IsType<float>()) RunWithType<float>();
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
...@@ -63,8 +60,7 @@ NO_GRADIENT(GradientGather); ...@@ -63,8 +60,7 @@ NO_GRADIENT(GradientGather);
template <class Context> template <class Context>
void StopGradientOp<Context>::RunOnDevice() { void StopGradientOp<Context>::RunOnDevice() {
output(0)->ReshapeLike(input(0)); ws()->CreateAvatar(output(0), &input(0));
output(0)->Share(input(0));
} }
DEPLOY_CPU(StopGradient); DEPLOY_CPU(StopGradient);
......
...@@ -109,7 +109,7 @@ void ConcatGradientOp<Context>::ShareGradient() { ...@@ -109,7 +109,7 @@ void ConcatGradientOp<Context>::ShareGradient() {
for (int i = 0; i < OutputSize(); i++) { for (int i = 0; i < OutputSize(); i++) {
if (output(i)->name() != "ignore") { if (output(i)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(i)->Replace(*dX); ws()->CreateAvatar(output(i), dX);
break; break;
} }
} }
......
...@@ -38,19 +38,9 @@ void CropOp<Context>::Setup() { ...@@ -38,19 +38,9 @@ void CropOp<Context>::Setup() {
} }
// make ends // make ends
if (ends.size() > 0) { if (shape.size() + shape_like.size() != 0) {
// static crop
CHECK_EQ(ends.size(), input(0).ndim())
<< "\nThe cropping is performed on " << ends.size() << " dimensions, "
<< "but the num of dimensions of input is " << input(0).ndim() << "."; \
// fix end if necessary
for (int i = 0; i < ends.size(); i++)
if (ends[i] == 0) ends[i] = input(0).dim(i);
} else {
CHECK(shape.size() * shape_like.size() == 0) CHECK(shape.size() * shape_like.size() == 0)
<< "\nCan not set shape and shape_like both."; << "\nCan not set shape and shape_like both.";
CHECK(shape.size() + shape_like.size() != 0)
<< "\nMust set shape and shape_like either.";
ends.resize(input(0).ndim(), 0); ends.resize(input(0).ndim(), 0);
for (int i = 0; i < ends.size(); i++) { for (int i = 0; i < ends.size(); i++) {
// dynamic crop 1: keep unchanged // dynamic crop 1: keep unchanged
...@@ -73,6 +63,14 @@ void CropOp<Context>::Setup() { ...@@ -73,6 +63,14 @@ void CropOp<Context>::Setup() {
ends[i] = starts[i] + like->dim(i); ends[i] = starts[i] + like->dim(i);
} }
} }
} else {
// static crop
CHECK_EQ(ends.size(), input(0).ndim())
<< "\nThe cropping is performed on " << ends.size() << " dimensions, "
<< "but the num of dimensions of input is " << input(0).ndim() << ".";
// fix end if necessary
for (int i = 0; i < ends.size(); i++)
if (ends[i] == 0) ends[i] = input(0).dim(i);
} }
// check starts and ends // check starts and ends
...@@ -157,19 +155,9 @@ void CropGradientOp<Context>::Setup() { ...@@ -157,19 +155,9 @@ void CropGradientOp<Context>::Setup() {
} }
// make ends // make ends
if (ends.size() > 0) { if (shape.size() + shape_like.size() != 0) {
// static crop
CHECK_EQ(ends.size(), input(0).ndim())
<< "\nThe cropping is performed on " << ends.size() << " dimensions, "
<< "but the num of dimensions of input is " << input(0).ndim() << "."; \
// fix end if necessary
for (int i = 0; i < ends.size(); i++)
if (ends[i] == 0) ends[i] = input(0).dim(i);
} else {
CHECK(shape.size() * shape_like.size() == 0) CHECK(shape.size() * shape_like.size() == 0)
<< "\nCan not set shape and shape_like both."; << "\nCan not set shape and shape_like both.";
CHECK(shape.size() + shape_like.size() != 0)
<< "\nMust set shape and shape_like either.";
ends.resize(input(0).ndim(), 0); ends.resize(input(0).ndim(), 0);
for (int i = 0; i < ends.size(); i++) { for (int i = 0; i < ends.size(); i++) {
// dynamic crop 1: keep unchanged // dynamic crop 1: keep unchanged
...@@ -192,6 +180,14 @@ void CropGradientOp<Context>::Setup() { ...@@ -192,6 +180,14 @@ void CropGradientOp<Context>::Setup() {
ends[i] = starts[i] + like->dim(i); ends[i] = starts[i] + like->dim(i);
} }
} }
} else {
// static crop
CHECK_EQ(ends.size(), input(0).ndim())
<< "\nThe cropping is performed on " << ends.size() << " dimensions, "
<< "but the num of dimensions of input is " << input(0).ndim() << "."; \
// fix end if necessary
for (int i = 0; i < ends.size(); i++)
if (ends[i] == 0) ends[i] = input(0).dim(i);
} }
// check starts and ends // check starts and ends
......
...@@ -33,7 +33,7 @@ void RandomPickOp<Context>::RunOnDevice() { ...@@ -33,7 +33,7 @@ void RandomPickOp<Context>::RunOnDevice() {
inner_dim = input(0).count(axis + 1); inner_dim = input(0).count(axis + 1);
output(0)->Reshape(output_dims); output(0)->Reshape(output_dims);
pick_indices = ws()->CreateTensor("_t_" + anchor() + "_pick_indices"); pick_indices = ws()->CreateTensor("/mnt/" + anchor() + "/pick_indices");
pick_indices->Reshape(vector<TIndex>(1, max_samples)); pick_indices->Reshape(vector<TIndex>(1, max_samples));
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) RunWithType<float>();
...@@ -68,7 +68,7 @@ void RandomPickGradientOp<Context>::RunWithType() { ...@@ -68,7 +68,7 @@ void RandomPickGradientOp<Context>::RunWithType() {
template <class Context> template <class Context>
void RandomPickGradientOp<Context>::RunOnDevice() { void RandomPickGradientOp<Context>::RunOnDevice() {
pick_indices = ws()->GetTensor("_t_" + anchor() + "_pick_indices"); pick_indices = ws()->GetTensor("/mnt/" + anchor() + "/pick_indices");
x_slice_dim = input(0).dim(axis); x_slice_dim = input(0).dim(axis);
y_slice_dim = pick_indices->count(); y_slice_dim = pick_indices->count();
......
...@@ -108,7 +108,7 @@ void StackGradientOp<Context>::ShareGradient() { ...@@ -108,7 +108,7 @@ void StackGradientOp<Context>::ShareGradient() {
for (int i = 0; i < OutputSize(); i++) { for (int i = 0; i < OutputSize(); i++) {
if (output(i)->name() != "ignore") { if (output(i)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad"); Tensor* dX = ws()->GetBuffer("Grad");
output(i)->Replace(*dX); ws()->CreateAvatar(output(i), dX);
break; break;
} }
} }
......
...@@ -27,9 +27,9 @@ void TransposeOp<Context>::RunOnDevice() { ...@@ -27,9 +27,9 @@ void TransposeOp<Context>::RunOnDevice() {
<< "\nbut Tensor(" << input(0).name() << ")'s dims are " << "\nbut Tensor(" << input(0).name() << ")'s dims are "
<< input(0).dim_string(); << input(0).dim_string();
vector<TIndex> output_dims; vector<TIndex> output_dims;
order = ws()->CreateTensor("_t_" + anchor() + "_order"); order = ws()->CreateTensor("/mnt/" + anchor() + "/transpose_order");
old_steps = ws()->CreateTensor("_t_" + anchor() + "_old_steps"); old_steps = ws()->CreateTensor("/mnt/" + anchor() + "/transpose_old_steps");
new_steps = ws()->CreateTensor("_t_" + anchor() + "_new_steps"); new_steps = ws()->CreateTensor("/mnt/" + anchor() + "/transpose_new_steps");
order->Reshape(vector<TIndex>(1, perms.size())); order->Reshape(vector<TIndex>(1, perms.size()));
old_steps->Reshape(vector<TIndex>(1, perms.size())); old_steps->Reshape(vector<TIndex>(1, perms.size()));
new_steps->Reshape(vector<TIndex>(1, perms.size())); new_steps->Reshape(vector<TIndex>(1, perms.size()));
...@@ -76,9 +76,9 @@ void TransposeGradientOp<Context>::RunWithType() { ...@@ -76,9 +76,9 @@ void TransposeGradientOp<Context>::RunWithType() {
template <class Context> template <class Context>
void TransposeGradientOp<Context>::RunOnDevice() { void TransposeGradientOp<Context>::RunOnDevice() {
output(0)->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0));
order = ws()->GetTensor("_t_" + anchor() + "_order"); order = ws()->GetTensor("/mnt/" + anchor() + "/transpose_order");
old_steps = ws()->GetTensor("_t_" + anchor() + "_old_steps"); old_steps = ws()->GetTensor("/mnt/" + anchor() + "/transpose_old_steps");
new_steps = ws()->GetTensor("_t_" + anchor() + "_new_steps"); new_steps = ws()->GetTensor("/mnt/" + anchor() + "/transpose_new_steps");
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) RunWithType<float>();
#ifdef WITH_CUDA_FP16 #ifdef WITH_CUDA_FP16
......
...@@ -6,129 +6,228 @@ ...@@ -6,129 +6,228 @@
namespace dragon { namespace dragon {
template <class Context> template <typename T> template <class Context> template <typename T>
void BatchNormOp<Context>::RunWithType() { void BatchNormOp<Context>::TrainingRunWithType() {
INIT_MULTIPLIER(num_multiplier, num); INIT_MULTIPLIER(multiplier, NS);
INIT_MULTIPLIER(spatial_multiplier, spatial_dim); INIT_MULTIPLIER(num_multiplier, N);
TENSOR_FILL(input(1), vector<TIndex>(1, channels)); // history_mean INIT_MULTIPLIER(spatial_multiplier, S);
TENSOR_FILL(input(2), vector<TIndex>(1, channels)); // history_var TENSOR_FILL(input(1), vector<TIndex>(1, C)); // history_mean
TENSOR_FILL(input(3), vector<TIndex>(1, 1)); // history_factor TENSOR_FILL(input(2), vector<TIndex>(1, C)); // history_var
// get buffer
stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0));
auto* hMean_data = input(1).template mutable_data<T, Context>(); auto* hMean_data = input(1).template mutable_data<T, Context>();
auto* hVar_data = input(2).template mutable_data<T, Context>(); auto* hVar_data = input(2).template mutable_data<T, Context>();
auto* hFact_data = input(3).template mutable_data<T, CPUContext>();
auto* tMean_data = mean.template mutable_data<T, Context>(); auto* tMean_data = mean.template mutable_data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>(); auto* tVar_data = var->template mutable_data<T, Context>();
auto* Xdata = input(0).template data<T, Context>(); auto* Xdata = input(0).template data<T, Context>();
auto* Ydata = output(0)->template mutable_data<T, Context>(); auto* Ydata = output(0)->template mutable_data<T, Context>();
auto* NSMul_data = multiplier->template data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>(); auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* NMul_data = num_multiplier->template data<T, Context>(); auto* NMul_data = num_multiplier->template data<T, Context>();
auto* NByC_data = num_by_chans.template mutable_data<T, Context>(); auto* NC_data = num_by_chans.template mutable_data<T, Context>();
auto* Std_data = stddev->template mutable_data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>();
ctx().template Copy<T, Context, Context>(output(0)->count(), Ydata, Xdata);
if (use_global_stats) {
const float factor = dragon_cast<float, T>(hFact_data[0]); // compute mean
const float scale = factor == 0 ? 0 : 1.0 / factor; if (data_format == "NCHW") {
math::Scale<T, Context>(mean.count(), scale, hMean_data, tMean_data); math::Gemv<T, Context>(CblasNoTrans, NC, S,
math::Scale<T, Context>(mean.count(), scale, hVar_data, tVar_data); 1.0 / NS, Xdata, SMul_data,
} else { 0, NC_data);
math::Gemv<T, Context>(CblasNoTrans, nbychans, spatial_dim, math::Gemv<T, Context>(CblasTrans, N, C,
1.0 / (num * spatial_dim), 1.0, NC_data, NMul_data,
Xdata, SMul_data, 0, tMean_data);
0, } else if (data_format == "NHWC") {
NByC_data); math::Gemv<T, Context>(CblasTrans, NS, C,
math::Gemv<T, Context>(CblasTrans, num, channels, 1.0 / NS, Xdata, NSMul_data,
1.0, 0, tMean_data);
NByC_data, NMul_data,
0,
tMean_data);
} }
if (!inplace) { // subtract mean
ctx().template Copy<T, Context, Context>(input(0).count(), Ydata, Xdata); if (data_format == "NCHW") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
1.0, NMul_data, tMean_data,
0.0, NC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
-1.0, NC_data, SMul_data,
1.0, Ydata);
} else if (data_format == "NHWC") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
-1.0, NSMul_data, tMean_data,
1.0, Ydata);
} }
// subtract mean // compute variance
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1, // note that we use VAR(X) = E((X - EX) ^ 2)
1.0,
NMul_data, tMean_data,
0.0,
NByC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1,
-1.0,
NByC_data, SMul_data,
1.0,
Ydata);
if (!use_global_stats && !is_recomputing) {
// Var(X) = E((X - EX) ^ 2)
math::Square<T, Context>(output(0)->count(), Ydata, Std_data); math::Square<T, Context>(output(0)->count(), Ydata, Std_data);
math::Gemv<T, Context>(CblasNoTrans, nbychans, spatial_dim, if (data_format == "NCHW") {
1.0 / (num * spatial_dim), math::Gemv<T, Context>(CblasNoTrans, NC, S,
Std_data, SMul_data, 1.0 / NS, Std_data, SMul_data,
0.0, NByC_data); 0.0, NC_data);
math::Gemv<T, Context>(CblasTrans, num, channels, math::Gemv<T, Context>(CblasTrans, N, C,
1.0, 1.0, NC_data, NMul_data,
NByC_data, NMul_data, 0.0, tVar_data);
0.0, } else if (data_format == "NHWC") {
tVar_data); math::Gemv<T, Context>(CblasTrans, NS, C,
// handle moving average 1.0 / NS, Std_data, NSMul_data,
0.0, tVar_data);
}
// compute moving average
if (!is_recomputing) {
if (mode == "CAFFE") {
CHECK_EQ(InputSize(), 4)
<< "\nThe number of inputs should be 4 if use CAFFE mode.";
TENSOR_FILL(input(3), vector<TIndex>(1, 1));
auto* hFact_data = input(3).template mutable_data<T, CPUContext>();
float factor = dragon_cast<float, T>(hFact_data[0]); float factor = dragon_cast<float, T>(hFact_data[0]);
factor *= momentum; factor += 1; factor *= momentum; factor += 1;
hFact_data[0] = dragon_cast<T, float>(factor); hFact_data[0] = dragon_cast<T, float>(factor);
int m = input(0).count() / channels; int m = input(0).count() / C;
float coeff = m > 1 ? float(m) / (m - 1) : 1; float coeff = m > 1 ? float(m) / (m - 1) : 1;
// History(X) = Cur(X) + momentum * History(X) // History(X) = Cur(X) + momentum * History(X)
math::Axpby<T, Context>(mean.count(), 1.0, tMean_data, momentum, hMean_data); math::Axpby<T, Context>(mean.count(), 1.0, tMean_data, momentum, hMean_data);
math::Axpby<T, Context>(mean.count(), coeff, tVar_data, momentum, hVar_data); math::Axpby<T, Context>(var->count(), coeff, tVar_data, momentum, hVar_data);
} else {
// History(X) = (1 - momentum) * Cur(X) + momentum * History(X)
math::Axpby<T, Context>(mean.count(), 1.0 - momentum, tMean_data, momentum, hMean_data);
math::Axpby<T, Context>(var->count(), 1.0 - momentum, tVar_data, momentum, hVar_data);
}
} }
// normalize var // compute stddev
math::AddScalar<T, Context>(mean.count(), eps, tVar_data); math::AddScalar<T, Context>(var->count(), eps, tVar_data);
math::Sqrt<T, Context>(mean.count(), tVar_data, tVar_data); math::Sqrt<T, Context>(var->count(), tVar_data, tVar_data);
// divide by stddev // divide by stddev
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1, if (data_format == "NCHW") {
1.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
NMul_data, tVar_data, 1.0, NMul_data, tVar_data,
0.0, 0.0, NC_data);
NByC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, 1.0, NC_data, SMul_data,
1.0, 0.0, Std_data);
NByC_data, SMul_data, } else if (data_format == "NHWC") {
0.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
Std_data); 1.0, NSMul_data, tVar_data,
0.0, Std_data);
}
math::Div<T, Context>(output(0)->count(), Ydata, Std_data, Ydata); math::Div<T, Context>(output(0)->count(), Ydata, Std_data, Ydata);
// release buffer
ws()->ReleaseBuffer(stddev); ws()->ReleaseBuffer(stddev);
} }
template <class Context> template <class Context> template <typename T>
void BatchNormOp<Context>::RunOnDevice() { void BatchNormOp<Context>::InferenceRunWithType() {
num = input(0).dim(0); channels = input(0).dim(1); INIT_MULTIPLIER(multiplier, NS);
spatial_dim = input(0).count(2); nbychans = num * channels; INIT_MULTIPLIER(num_multiplier, N);
vector<TIndex> dims(1, channels); INIT_MULTIPLIER(spatial_multiplier, S);
var = ws()->CreateTensor("_t_" + anchor() + "_bn_var"); TENSOR_FILL(input(1), vector<TIndex>(1, C)); // history_mean
mean.Reshape(dims); var->Reshape(dims); TENSOR_FILL(input(2), vector<TIndex>(1, C)); // history_var
num_by_chans.Reshape(vector<TIndex>(1, nbychans));
output(0)->ReshapeLike(input(0)); auto* hMean_data = input(1).template mutable_data<T, Context>();
auto* hVar_data = input(2).template mutable_data<T, Context>();
auto* tMean_data = mean.template mutable_data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>();
auto* Xdata = input(0).template data<T, Context>();
auto* Ydata = output(0)->template mutable_data<T, Context>();
auto* NSMul_data = multiplier->template data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* NMul_data = num_multiplier->template data<T, Context>();
auto* NC_data = num_by_chans.template mutable_data<T, Context>();
auto* Std_data = stddev->template mutable_data<T, Context>();
ctx().template Copy<T, Context, Context>(input(0).count(), Ydata, Xdata);
// scale the mean and variance if necessary
if (mode == "CAFFE") {
CHECK_EQ(InputSize(), 4)
<< "\nThe number of inputs should be 4 if use CAFFE mode.";
TENSOR_FILL(input(3), vector<TIndex>(1, 1));
auto* hFact_data = input(3).template mutable_data<T, CPUContext>();
const float factor = dragon_cast<float, T>(hFact_data[0]);
const float scale = factor == 0 ? 0 : 1.0 / factor;
math::Scale<T, Context>(mean.count(), scale, hMean_data, tMean_data);
math::Scale<T, Context>(var->count(), scale, hVar_data, tVar_data);
} else {
ctx().template Copy<T, Context, Context>(mean.count(), tMean_data, hMean_data);
ctx().template Copy<T, Context, Context>(var->count(), tVar_data, hVar_data);
}
// subtract mean
if (data_format == "NCHW") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
1.0, NMul_data, tMean_data,
0.0, NC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
-1.0, NC_data, SMul_data,
1.0, Ydata);
} else if (data_format == "NHWC") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
-1.0, NSMul_data, tMean_data,
1.0, Ydata);
}
// compute stddev
math::AddScalar<T, Context>(var->count(), eps, tVar_data);
math::Sqrt<T, Context>(var->count(), tVar_data, tVar_data);
// divide by stddev
if (data_format == "NCHW") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
1.0, NMul_data, tVar_data,
0.0, NC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
1.0, NC_data, SMul_data,
0.0, Std_data);
} else if (data_format == "NHWC") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
1.0, NSMul_data, tVar_data,
0.0, Std_data);
}
math::Div<T, Context>(output(0)->count(), Ydata, Std_data, Ydata);
ws()->ReleaseBuffer(stddev);
}
template <class Context>
void BatchNormOp<Context>::Setup() {
// determine the mode
if (use_stats == -1) use_global_stats = phase() == "TEST" ? true : false; if (use_stats == -1) use_global_stats = phase() == "TEST" ? true : false;
else use_global_stats = use_stats == 1 ? true : false; else use_global_stats = use_stats == 1 ? true : false;
is_recomputing = ws()->GetTensor("_t_global_recompute_flag") is_recomputing = ws()->GetTensor("/opt/mirror_stage/recompute_flag")
->template data<bool, CPUContext>()[0]; ->template data<bool, CPUContext>()[0];
// if true, Act/Exp/Pow/Norm Ops can not exist before when train
if (inplace) output(0)->Share(input(0));
// determine the data format
TIndex channel_axis = axis;
data_format = "NCHW";
if (channel_axis == -1) channel_axis += (int)input(0).ndim();
if (channel_axis + 1 == (int)input(0).ndim()) data_format = "NHWC";
N = input(0).dim(0);
C = input(0).dim(channel_axis);
NC = N * C;
S = input(0).count() / NC;
NS = N * S;
// make resource
var = ws()->CreateTensor("/mnt/" + anchor() + "/bn_var");
stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0));
if (input(0).template IsType<float>()) RunWithType<float>(); // reshape
mean.Reshape(vector<TIndex>(1, C));
var->Reshape(vector<TIndex>(1, C));
num_by_chans.Reshape(vector<TIndex>(1, NC));
output(0)->ReshapeLike(input(0));
}
template <class Context>
void BatchNormOp<Context>::RunOnDevice() {
Setup();
if (input(0).template IsType<float>()) {
if (use_global_stats) InferenceRunWithType<float>();
else TrainingRunWithType<float>();
}
#ifdef WITH_CUDA_FP16 #ifdef WITH_CUDA_FP16
else if (input(0).template IsType<float16>()) RunWithType<float16>(); else if (input(0).template IsType<float16>()) {
if (use_global_stats) InferenceRunWithType<float16>();
else TrainingRunWithType<float16>();
}
#endif #endif
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
} }
...@@ -137,120 +236,169 @@ DEPLOY_CPU(BatchNorm); ...@@ -137,120 +236,169 @@ DEPLOY_CPU(BatchNorm);
#ifdef WITH_CUDA #ifdef WITH_CUDA
DEPLOY_CUDA(BatchNorm); DEPLOY_CUDA(BatchNorm);
#endif #endif
OPERATOR_SCHEMA(BatchNorm).NumInputs(4).NumOutputs(1); OPERATOR_SCHEMA(BatchNorm).NumInputs(3, 4).NumOutputs(1);
template <class Context> template <typename T> template <class Context> template <typename T>
void BatchNormGradientOp<Context>::RunWithType() { void BatchNormGradientOp<Context>::TrainingRunWithType() {
INIT_MULTIPLIER(num_multiplier, num); INIT_MULTIPLIER(multiplier, NS);
INIT_MULTIPLIER(spatial_multiplier, spatial_dim); INIT_MULTIPLIER(num_multiplier, N);
INIT_MULTIPLIER(spatial_multiplier, S);
// get buffer
stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0));
auto* dYdata = input(-1).template data<T, Context>(); auto* dYdata = input(-1).template data<T, Context>();
auto* dXdata = output(0)->template mutable_data<T, Context>(); auto* dXdata = output(0)->template mutable_data<T, Context>();
auto* Std_data = stddev->template mutable_data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>(); auto* tVar_data = var->template mutable_data<T, Context>();
auto* NSMul_data = multiplier->template data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>(); auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* NMul_data = num_multiplier->template data<T, Context>(); auto* NMul_data = num_multiplier->template data<T, Context>();
auto* NByC_data = num_by_chans.template mutable_data<T, Context>(); auto* NC_data = num_by_chans.template mutable_data<T, Context>();
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1, if (data_format == "NCHW") {
1.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
NMul_data, tVar_data, 1.0, NMul_data, tVar_data,
0.0, 0.0, NC_data);
NByC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, 1.0, NC_data, SMul_data,
1.0, 0.0, Std_data);
NByC_data, SMul_data, } else if (data_format == "NHWC") {
0.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
Std_data); 1.0, NSMul_data, tVar_data,
0.0, Std_data);
if (use_global_stats) {
math::Div<T, Context>(output(0)->count(), dYdata, Std_data, dXdata);
ws()->ReleaseBuffer(stddev);
return;
} }
auto* Ydata = input(-2).template data<T, Context>(); auto* Ydata = input(1).template data<T, Context>();
math::Mul<T, Context>(output(0)->count(), Ydata, dYdata, dXdata); math::Mul<T, Context>(output(0)->count(), Ydata, dYdata, dXdata);
// sum(dE/dY \cdot Y) // sum(dE/dY \cdot Y)
math::Gemv<T, Context>(CblasNoTrans, nbychans, spatial_dim, if (data_format == "NCHW") {
1.0, math::Gemv<T, Context>(CblasNoTrans, NC, S,
dXdata, SMul_data, 1.0, dXdata, SMul_data,
0.0, 0.0, NC_data);
NByC_data); math::Gemv<T, Context>(CblasTrans, N, C,
math::Gemv<T, Context>(CblasTrans, num, channels, 1.0, NC_data, NMul_data,
1.0, 0.0, tVar_data);
NByC_data, NMul_data, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
0.0, 1.0, NMul_data, tVar_data,
tVar_data); 0.0, NC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
1.0, 1.0, NC_data, SMul_data,
NMul_data, tVar_data, 0.0, dXdata);
0.0, } else if (data_format == "NHWC") {
NByC_data); math::Gemv<T, Context>(CblasTrans, NS, C,
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, 1.0, dXdata, NSMul_data,
1.0, 0.0, tVar_data);
NByC_data, SMul_data, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
0.0, 1.0, NSMul_data, tVar_data,
dXdata); 0.0, dXdata);
}
// sum(dE/dY \cdot Y) \cdot Y // sum(dE/dY \cdot Y) \cdot Y
math::Mul<T, Context>(output(0)->count(), Ydata, dXdata, dXdata); math::Mul<T, Context>(output(0)->count(), Ydata, dXdata, dXdata);
// sum(dE/dY) + sum(dE/dY \cdot Y) \cdot Y // sum(dE/dY) + sum(dE/dY \cdot Y) \cdot Y
math::Gemv<T, Context>(CblasNoTrans, nbychans, spatial_dim, if (data_format == "NCHW") {
1.0, math::Gemv<T, Context>(CblasNoTrans, NC, S,
dYdata, SMul_data, 1.0, dYdata, SMul_data,
0.0, 0.0, NC_data);
NByC_data); math::Gemv<T, Context>(CblasTrans, N, C,
math::Gemv<T, Context>(CblasTrans, num, channels, 1.0, NC_data, NMul_data,
1.0, 0.0, tVar_data);
NByC_data, NMul_data, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
0.0, 1.0, NMul_data, tVar_data,
tVar_data); 0.0, NC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
1.0, 1.0, NC_data, SMul_data,
NMul_data, tVar_data, 1.0, dXdata);
0.0, } else if (data_format == "NHWC") {
NByC_data); math::Gemv<T, Context>(CblasTrans, NS, C,
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, 1.0, dYdata, NSMul_data,
1.0, 0.0, tVar_data);
NByC_data, SMul_data, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
1.0, 1.0, NSMul_data, tVar_data,
dXdata); 1.0, dXdata);
}
// dE/dY - mean(dE/dY)- mean(dE/dY \cdot Y) \cdot Y // dE/dY - mean(dE/dY)- mean(dE/dY \cdot Y) \cdot Y
// = dE/dY - mean(sum(dE/dY) + sum(dE/dY \cdot Y) \cdot Y) // = dE/dY - mean(sum(dE/dY) + sum(dE/dY \cdot Y) \cdot Y)
math::Axpby<T, Context>(output(0)->count(), 1.0, dYdata, math::Axpby<T, Context>(output(0)->count(), 1.0, dYdata, -1.0 / NS, dXdata);
-1.0 / (num * spatial_dim),
dXdata);
// divide by stddev // divide by stddev
math::Div<T, Context>(output(0)->count(), dXdata, Std_data, dXdata); math::Div<T, Context>(output(0)->count(), dXdata, Std_data, dXdata);
ws()->ReleaseBuffer(stddev);
}
// release buffer template <class Context> template <typename T>
void BatchNormGradientOp<Context>::InferenceRunWithType() {
INIT_MULTIPLIER(multiplier, NS);
INIT_MULTIPLIER(num_multiplier, N);
INIT_MULTIPLIER(spatial_multiplier, S);
auto* dYdata = input(-1).template data<T, Context>();
auto* dXdata = output(0)->template mutable_data<T, Context>();
auto* Std_data = stddev->template mutable_data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>();
auto* NSMul_data = multiplier->template data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* NMul_data = num_multiplier->template data<T, Context>();
auto* NC_data = num_by_chans.template mutable_data<T, Context>();
if (data_format == "NCHW") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
1.0, NMul_data, tVar_data,
0.0, NC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
1.0, NC_data, SMul_data,
0.0, Std_data);
} else if (data_format == "NHWC") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
1.0, NSMul_data, tVar_data,
0.0, Std_data);
}
math::Div<T, Context>(output(0)->count(), dYdata, Std_data, dXdata);
ws()->ReleaseBuffer(stddev); ws()->ReleaseBuffer(stddev);
} }
template <class Context> template <class Context>
void BatchNormGradientOp<Context>::RunOnDevice() { void BatchNormGradientOp<Context>::Setup() {
num = input(0).dim(0); channels = input(0).dim(1); // determine the mode
spatial_dim = input(0).count(2); nbychans = num * channels; if (use_stats == -1) use_global_stats = phase() == "TEST" ? true : false;
var = ws()->GetTensor("_t_" + anchor() + "_bn_var"); else use_global_stats = use_stats == 1 ? true : false;
num_by_chans.Reshape(vector<TIndex>(1, nbychans));
// determine the data format
TIndex channel_axis = axis;
data_format = "NCHW";
if (channel_axis == -1) channel_axis += (int)input(0).ndim();
if (channel_axis + 1 == (int)input(0).ndim()) data_format = "NHWC";
N = input(0).dim(0);
C = input(0).dim(channel_axis);
NC = N * C;
S = input(0).count() / NC;
NS = N * S;
// make resource
var = ws()->GetTensor("/mnt/" + anchor() + "/bn_var");
stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0));
// reshape
num_by_chans.Reshape(vector<TIndex>(1, NC));
output(0)->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0));
}
if (use_stats == -1) use_global_stats = phase() == "TEST" ? true : false; template <class Context>
else use_global_stats = use_stats == 1 ? true : false; void BatchNormGradientOp<Context>::RunOnDevice() {
Setup();
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) {
if (use_global_stats) InferenceRunWithType<float>();
else TrainingRunWithType<float>();
}
#ifdef WITH_CUDA_FP16 #ifdef WITH_CUDA_FP16
else if (input(0).template IsType<float16>()) RunWithType<float16>(); else if (input(0).template IsType<float16>()) {
if (use_global_stats) InferenceRunWithType<float16>();
else TrainingRunWithType<float16>();
}
#endif #endif
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
} }
...@@ -272,15 +420,4 @@ class GetBatchNormGradient final : public GradientMakerBase { ...@@ -272,15 +420,4 @@ class GetBatchNormGradient final : public GradientMakerBase {
}; };
REGISTER_GRADIENT(BatchNorm, GetBatchNormGradient); REGISTER_GRADIENT(BatchNorm, GetBatchNormGradient);
class GetBNGradient final : public GradientMakerBase {
public:
GRADIENT_MAKER_CTOR(GetBNGradient);
vector<OperatorDef> MakeDefs() override {
return SingleDef(def.type() + "Gradient", "",
vector<string> {I(0), I(1), I(2), I(3), GO(0)},
vector<string> {GI(0), GI(3), GI(4)});
}
};
REGISTER_GRADIENT(BN, GetBNGradient);
} // namespace dragon } // namespace dragon
\ No newline at end of file
...@@ -6,105 +6,140 @@ ...@@ -6,105 +6,140 @@
namespace dragon { namespace dragon {
template <class Context> template <typename T> template <class Context> template <typename T>
void BatchRenormOp<Context>::RunWithType() { void BatchRenormOp<Context>::TrainingRunWithType() {
INIT_MULTIPLIER(num_multiplier, num); INIT_MULTIPLIER(multiplier, NS);
INIT_MULTIPLIER(spatial_multiplier, spatial_dim); INIT_MULTIPLIER(num_multiplier, N);
TENSOR_FILL(input(1), vector<TIndex>(1, channels)); // history_mean INIT_MULTIPLIER(spatial_multiplier, S);
TENSOR_FILL(input(2), vector<TIndex>(1, channels)); // history_var TENSOR_FILL(input(1), vector<TIndex>(1, C)); // history_mean
TENSOR_FILL(input(3), vector<TIndex>(1, 1)); // history_factor TENSOR_FILL(input(2), vector<TIndex>(1, C)); // history_var
// get buffer
stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0));
auto* hMean_data = input(1).template mutable_data<T, Context>(); auto* hMean_data = input(1).template mutable_data<T, Context>();
auto* hVar_data = input(2).template mutable_data<T, Context>(); auto* hVar_data = input(2).template mutable_data<T, Context>();
auto* hFact_data = input(3).template mutable_data<T, CPUContext>();
auto* tMean_data = mean.template mutable_data<T, Context>(); auto* tMean_data = mean.template mutable_data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>(); auto* tVar_data = var->template mutable_data<T, Context>();
auto* tDdata = d.template mutable_data<T, Context>();
auto* tRdata = r->template mutable_data<T, Context>();
auto* Xdata = input(0).template data<T, Context>(); auto* Xdata = input(0).template data<T, Context>();
auto* Ydata = output(0)->template mutable_data<T, Context>(); auto* Ydata = output(0)->template mutable_data<T, Context>();
auto* NSMul_data = multiplier->template data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>(); auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* NMul_data = num_multiplier->template data<T, Context>(); auto* NMul_data = num_multiplier->template data<T, Context>();
auto* NByC_data = num_by_chans.template mutable_data<T, Context>(); auto* NC_data = num_by_chans.template mutable_data<T, Context>();
auto* Std_data = stddev->template mutable_data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>();
T* thMean_data = nullptr; ctx().template Copy<T, Context, Context>(input(0).count(), Ydata, Xdata);
T* thVar_data = nullptr;
T* XNorm_data = nullptr;
const T scale = hFact_data[0] == 0 ? 0 : 1.0 / hFact_data[0];
if (use_global_stats) { auto* tDdata = d.template mutable_data<T, Context>();
math::Scale<T, Context>(mean.count(), scale, hMean_data, tMean_data); auto* tRdata = r->template mutable_data<T, Context>();
math::Scale<T, Context>(mean.count(), scale, hVar_data, tVar_data); auto* thMean_data = t_h_mean.template mutable_data<T, Context>();
} else { auto* thVar_data = t_h_var.template mutable_data<T, Context>();
thMean_data = t_h_mean.template mutable_data<T, Context>();
thVar_data = t_h_var.template mutable_data<T, Context>(); // scale the mean and variance if necessary
if (mode == "CAFFE") {
CHECK_EQ(InputSize(), 4)
<< "\nThe number of inputs should be 4 if use CAFFE mode.";
TENSOR_FILL(input(3), vector<TIndex>(1, 1));
auto* hFact_data = input(3).template mutable_data<T, CPUContext>();
const float factor = dragon_cast<float, T>(hFact_data[0]);
const float scale = factor == 0 ? 0 : 1.0 / factor;
math::Scale<T, Context>(mean.count(), scale, hMean_data, thMean_data); math::Scale<T, Context>(mean.count(), scale, hMean_data, thMean_data);
math::Scale<T, Context>(mean.count(), scale, hVar_data, thVar_data); math::Scale<T, Context>(mean.count(), scale, hVar_data, thVar_data);
math::Gemv<T, Context>(CblasNoTrans, nbychans, spatial_dim, } else {
1.0 / (num * spatial_dim), ctx().template Copy<T, Context, Context>(mean.count(), thMean_data, hMean_data);
Xdata, SMul_data, ctx().template Copy<T, Context, Context>(var->count(), thVar_data, hVar_data);
0,
NByC_data);
math::Gemv<T, Context>(CblasTrans, num, channels,
1.0,
NByC_data, NMul_data,
0,
tMean_data);
} }
if (!inplace) { // compute mean
ctx().template Copy<T, Context, Context>(input(0).count(), Ydata, Xdata); if (data_format == "NCHW") {
math::Gemv<T, Context>(CblasNoTrans, NC, S,
1.0 / NS, Xdata, SMul_data,
0, NC_data);
math::Gemv<T, Context>(CblasTrans, N, C,
1.0, NC_data, NMul_data,
0, tMean_data);
} else if (data_format == "NHWC") {
math::Gemv<T, Context>(CblasTrans, NS, C,
1.0 / NS, Xdata, NSMul_data,
0, tMean_data);
} }
// subtract mean // subtract mean
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1, if (data_format == "NCHW") {
1.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
NMul_data, tMean_data, 1.0, NMul_data, tMean_data,
0.0, 0.0, NC_data);
NByC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, -1.0, NC_data, SMul_data,
-1.0, 1.0, Ydata);
NByC_data, SMul_data, } else if (data_format == "NHWC") {
1.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
Ydata); -1.0, NSMul_data, tMean_data,
1.0, Ydata);
if (!use_global_stats && !is_recomputing) { }
// Var(X) = E((X - EX) ^ 2)
math::Pow<T, Context>(stddev->count(), 2, Ydata, Std_data); // compute variance
math::Gemv<T, Context>(CblasNoTrans, nbychans, spatial_dim, // note that we use VAR(X) = E((X - EX) ^ 2)
1.0 / (num * spatial_dim), math::Square<T, Context>(output(0)->count(), Ydata, Std_data);
Std_data, SMul_data, if (data_format == "NCHW") {
0.0, math::Gemv<T, Context>(CblasNoTrans, NC, S,
NByC_data); 1.0 / NS, Std_data, SMul_data,
math::Gemv<T, Context>(CblasTrans, num, channels, 0.0, NC_data);
1.0, math::Gemv<T, Context>(CblasTrans, N, C,
NByC_data, NMul_data, 1.0, NC_data, NMul_data,
0.0, 0.0, tVar_data);
tVar_data); } else if (data_format == "NHWC") {
// update moving average math::Gemv<T, Context>(CblasTrans, NS, C,
hFact_data[0] *= momentum; hFact_data[0] += 1; 1.0 / NS, Std_data, NSMul_data,
int m = input(0).count() / channels; 0.0, tVar_data);
T factor = m > 1 ? T(m) / (m - 1) : 1; }
// compute moving average
if (!is_recomputing) {
if (mode == "CAFFE") {
CHECK_EQ(InputSize(), 4)
<< "\nThe number of inputs should be 4 if use CAFFE mode.";
TENSOR_FILL(input(3), vector<TIndex>(1, 1));
auto* hFact_data = input(3).template mutable_data<T, CPUContext>();
float factor = dragon_cast<float, T>(hFact_data[0]);
factor *= momentum; factor += 1;
hFact_data[0] = dragon_cast<T, float>(factor);
int m = input(0).count() / C;
float coeff = m > 1 ? float(m) / (m - 1) : 1;
// History(X) = Cur(X) + momentum * History(X)
math::Axpby<T, Context>(mean.count(), 1.0, tMean_data, momentum, hMean_data); math::Axpby<T, Context>(mean.count(), 1.0, tMean_data, momentum, hMean_data);
math::Axpby<T, Context>(mean.count(), factor, tVar_data, momentum, hVar_data); math::Axpby<T, Context>(var->count(), coeff, tVar_data, momentum, hVar_data);
} else {
// History(X) = (1 - momentum) * Cur(X) + momentum * History(X)
math::Axpby<T, Context>(mean.count(), 1.0 - momentum, tMean_data, momentum, hMean_data);
math::Axpby<T, Context>(var->count(), 1.0 - momentum, tVar_data, momentum, hVar_data);
}
} }
// normalize var // compute stddev
math::AddScalar<T, Context>(mean.count(), eps, tVar_data); math::AddScalar<T, Context>(var->count(), eps, tVar_data);
math::Pow<T, Context>(mean.count(), 0.5, tVar_data, tVar_data); math::Sqrt<T, Context>(var->count(), tVar_data, tVar_data);
// divide by stddev
if (data_format == "NCHW") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
1.0, NMul_data, tVar_data,
0.0, NC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
1.0, NC_data, SMul_data,
0.0, Std_data);
} else if (data_format == "NHWC") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
1.0, NSMul_data, tVar_data,
0.0, Std_data);
}
math::Div<T, Context>(output(0)->count(), Ydata, Std_data, Ydata);
if (!use_global_stats && !is_recomputing) { // compute renorm
// normalize history var if (!is_recomputing) {
math::AddScalar<T, Context>(mean.count(), eps, thVar_data); // compute history stddev
math::Pow<T, Context>(mean.count(), 0.5, thVar_data, thVar_data); math::AddScalar<T, Context>(var->count(), eps, thVar_data);
math::Sqrt<T, Context>(var->count(), thVar_data, thVar_data);
// compute r // compute r
math::Div<T, Context>(mean.count(), tVar_data, thVar_data, tRdata); math::Div<T, Context>(var->count(), tVar_data, thVar_data, tRdata);
math::Clip<T, Context>(mean.count(), 1.0 / t_r_max, t_r_max, tRdata); math::Clip<T, Context>(var->count(), 1.0 / t_r_max, t_r_max, tRdata);
// compute d // compute d
math::Sub<T, Context>(mean.count(), tMean_data, thMean_data, tDdata); math::Sub<T, Context>(mean.count(), tMean_data, thMean_data, tDdata);
...@@ -117,78 +152,159 @@ void BatchRenormOp<Context>::RunWithType() { ...@@ -117,78 +152,159 @@ void BatchRenormOp<Context>::RunWithType() {
t_val += t_delta; t_val += t_delta;
} }
// divide by var // apply renorm
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1,
1.0,
NMul_data, tVar_data,
0.0,
NByC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1,
1.0,
NByC_data, SMul_data,
0.0,
Std_data);
math::Div<T, Context>(stddev->count(), Ydata, Std_data, Ydata);
if (!use_global_stats) {
// store x_norm for backward // store x_norm for backward
XNorm_data = x_norm->template mutable_data<T, Context>(); auto* XNorm_data = x_norm->template mutable_data<T, Context>();
ctx().template Copy<T, Context, Context>(output(0)->count(), XNorm_data, Ydata); ctx().template Copy<T, Context, Context>(output(0)->count(), XNorm_data, Ydata);
// correction: mul by r // correction: mul by r
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1, if (data_format == "NCHW") {
1.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
NMul_data, tRdata, 1.0, NMul_data, tRdata,
0.0, 0.0, NC_data);
NByC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, 1.0, NC_data, SMul_data,
1.0, 0.0, Std_data);
NByC_data, SMul_data, } else if (data_format == "NHWC") {
0.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
Std_data); 1.0, NSMul_data, tRdata,
0.0, Std_data);
}
math::Mul<T, Context>(output(0)->count(), Ydata, Std_data, Ydata); math::Mul<T, Context>(output(0)->count(), Ydata, Std_data, Ydata);
// correction: add by d // correction: add by d
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1, if (data_format == "NCHW") {
1.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
NMul_data, tDdata, 1.0, NMul_data, tDdata,
0.0, 0.0, NC_data);
NByC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, 1.0, NC_data, SMul_data,
1.0, 1.0, Ydata);
NByC_data, SMul_data, } else if (data_format == "NHWC") {
1.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
Ydata); 1.0, NSMul_data, tDdata,
1.0, Ydata);
} }
// release buffer
ws()->ReleaseBuffer(stddev); ws()->ReleaseBuffer(stddev);
} }
template <class Context> template <class Context> template <typename T>
void BatchRenormOp<Context>::RunOnDevice() { void BatchRenormOp<Context>::InferenceRunWithType() {
num = input(0).dim(0); channels = input(0).dim(1); INIT_MULTIPLIER(multiplier, NS);
spatial_dim = input(0).count(2); nbychans = num * channels; INIT_MULTIPLIER(num_multiplier, N);
vector<TIndex> dims(1, channels); INIT_MULTIPLIER(spatial_multiplier, S);
var = ws()->CreateTensor("_t_" + anchor() + "_bn_var"); TENSOR_FILL(input(1), vector<TIndex>(1, C)); // history_mean
r = ws()->CreateTensor("_t_" + anchor() + "_bn_r"); TENSOR_FILL(input(2), vector<TIndex>(1, C)); // history_var
mean.Reshape(dims); var->Reshape(dims);
d.Reshape(dims); r->Reshape(dims);
t_h_mean.Reshape(dims); t_h_var.Reshape(dims);
num_by_chans.Reshape(vector<TIndex>(1, nbychans));
x_norm = ws()->CreateTensor("_t_" + anchor() + "_bn_x_norm");
x_norm->ReshapeLike(input(0));
output(0)->ReshapeLike(input(0)); auto* hMean_data = input(1).template mutable_data<T, Context>();
auto* hVar_data = input(2).template mutable_data<T, Context>();
auto* tMean_data = mean.template mutable_data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>();
auto* Xdata = input(0).template data<T, Context>();
auto* Ydata = output(0)->template mutable_data<T, Context>();
auto* NSMul_data = multiplier->template data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* NMul_data = num_multiplier->template data<T, Context>();
auto* NC_data = num_by_chans.template mutable_data<T, Context>();
auto* Std_data = stddev->template mutable_data<T, Context>();
ctx().template Copy<T, Context, Context>(input(0).count(), Ydata, Xdata);
// scale the mean and variance if necessary
if (mode == "CAFFE") {
CHECK_EQ(InputSize(), 4)
<< "\nThe number of inputs should be 4 if use CAFFE mode.";
TENSOR_FILL(input(3), vector<TIndex>(1, 1));
auto* hFact_data = input(3).template mutable_data<T, CPUContext>();
const float factor = dragon_cast<float, T>(hFact_data[0]);
const float scale = factor == 0 ? 0 : 1.0 / factor;
math::Scale<T, Context>(mean.count(), scale, hMean_data, tMean_data);
math::Scale<T, Context>(var->count(), scale, hVar_data, tVar_data);
} else {
ctx().template Copy<T, Context, Context>(mean.count(), tMean_data, hMean_data);
ctx().template Copy<T, Context, Context>(var->count(), tVar_data, hVar_data);
}
// subtract mean
if (data_format == "NCHW") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
1.0, NMul_data, tMean_data,
0.0, NC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
-1.0, NC_data, SMul_data,
1.0, Ydata);
} else if (data_format == "NHWC") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
-1.0, NSMul_data, tMean_data,
1.0, Ydata);
}
// compute stddev
math::AddScalar<T, Context>(var->count(), eps, tVar_data);
math::Sqrt<T, Context>(var->count(), tVar_data, tVar_data);
// divide by stddev
if (data_format == "NCHW") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
1.0, NMul_data, tVar_data,
0.0, NC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
1.0, NC_data, SMul_data,
0.0, Std_data);
} else if (data_format == "NHWC") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
1.0, NSMul_data, tVar_data,
0.0, Std_data);
}
math::Div<T, Context>(output(0)->count(), Ydata, Std_data, Ydata);
ws()->ReleaseBuffer(stddev);
}
template <class Context>
void BatchRenormOp<Context>::Setup() {
// determine the mode
if (use_stats == -1) use_global_stats = phase() == "TEST" ? true : false; if (use_stats == -1) use_global_stats = phase() == "TEST" ? true : false;
else use_global_stats = use_stats == 1 ? true : false; else use_global_stats = use_stats == 1 ? true : false;
is_recomputing = ws()->GetTensor("_t_global_recompute_flag") is_recomputing = ws()->GetTensor("/opt/mirror_stage/recompute_flag")
->template data<bool, CPUContext>()[0]; ->template data<bool, CPUContext>()[0];
// if true, Act/Exp/Pow/Norm Ops can not exist before when train
if (inplace) output(0)->Share(input(0));
if (input(0).template IsType<float>()) RunWithType<float>(); // determine the data format
TIndex channel_axis = axis;
data_format = "NCHW";
if (channel_axis == -1) channel_axis += (int)input(0).ndim();
if (channel_axis + 1 == (int)input(0).ndim()) data_format = "NHWC";
N = input(0).dim(0);
C = input(0).dim(channel_axis);
NC = N * C;
S = input(0).count() / NC;
NS = N * S;
// make resource
var = ws()->CreateTensor("/mnt/" + anchor() + "/bn_var");
r = ws()->CreateTensor("/mnt/" + anchor() + "/bn_r");
x_norm = ws()->CreateTensor("/mnt/" + anchor() + "/bn_x_norm");
stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0));
// reshape
mean.Reshape(vector<TIndex>(1, C));
var->Reshape(vector<TIndex>(1, C));
d.Reshape(vector<TIndex>(1, C));
r->Reshape(vector<TIndex>(1, C));
t_h_mean.Reshape(vector<TIndex>(1, C));
t_h_var.Reshape(vector<TIndex>(1, C));
num_by_chans.Reshape(vector<TIndex>(1, NC));
x_norm->ReshapeLike(input(0));
output(0)->ReshapeLike(input(0));
}
template <class Context>
void BatchRenormOp<Context>::RunOnDevice() {
Setup();
if (input(0).template IsType<float>()) {
if (use_global_stats) InferenceRunWithType<float>();
else TrainingRunWithType<float>();
}
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
} }
...@@ -196,145 +312,185 @@ DEPLOY_CPU(BatchRenorm); ...@@ -196,145 +312,185 @@ DEPLOY_CPU(BatchRenorm);
#ifdef WITH_CUDA #ifdef WITH_CUDA
DEPLOY_CUDA(BatchRenorm); DEPLOY_CUDA(BatchRenorm);
#endif #endif
OPERATOR_SCHEMA(BatchRenorm).NumInputs(4).NumOutputs(1); OPERATOR_SCHEMA(BatchRenorm).NumInputs(3, 4).NumOutputs(1);
template <class Context> template <typename T> template <class Context> template <typename T>
void BatchRenormGradientOp<Context>::RunWithType() { void BatchRenormGradientOp<Context>::TrainingRunWithType() {
INIT_MULTIPLIER(num_multiplier, num); INIT_MULTIPLIER(multiplier, NS);
INIT_MULTIPLIER(spatial_multiplier, spatial_dim); INIT_MULTIPLIER(num_multiplier, N);
INIT_MULTIPLIER(spatial_multiplier, S);
// get buffer
stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0));
auto* dYdata = input(-1).template data<T, Context>(); auto* dYdata = input(-1).template data<T, Context>();
auto* dXdata = output(0)->template mutable_data<T, Context>(); auto* dXdata = output(0)->template mutable_data<T, Context>();
auto* Std_data = stddev->template mutable_data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>();
auto* tMean_data = mean.template mutable_data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>(); auto* tVar_data = var->template mutable_data<T, Context>();
auto* NSMul_data = multiplier->template data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>(); auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* NMul_data = num_multiplier->template data<T, Context>(); auto* NMul_data = num_multiplier->template data<T, Context>();
auto* NByC_data = num_by_chans.template mutable_data<T, Context>(); auto* NC_data = num_by_chans.template mutable_data<T, Context>();
if (use_global_stats) {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1,
1.0,
NMul_data, tVar_data,
0.0,
NByC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1,
1.0,
NByC_data, SMul_data,
0.0,
Std_data);
math::Div<T, Context>(output(0)->count(), dYdata, Std_data, dXdata);
ws()->ReleaseBuffer(stddev);
return;
}
auto* tRdata = r->template data<T, Context>(); auto* tRdata = r->template data<T, Context>();
auto* XNorm_data = x_norm->template data<T, Context>(); auto* XNorm_data = x_norm->template data<T, Context>();
auto* tMean_data = mean.template mutable_data<T, Context>();
// buffer <- dE/dY \cdot r // buffer <- dE/dY \cdot r
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1, if (data_format == "NCHW") {
1.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
NMul_data, tRdata, 1.0, NMul_data, tRdata,
0.0, 0.0, NC_data);
NByC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, 1.0, NC_data, SMul_data,
1.0, 0.0, Std_data);
NByC_data, SMul_data, } else if (data_format == "NWHC") {
0.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
Std_data); 1.0, NSMul_data, tRdata,
0.0, Std_data);
}
math::Mul<T, Context>(output(0)->count(), dYdata, Std_data, Std_data); math::Mul<T, Context>(output(0)->count(), dYdata, Std_data, Std_data);
// sum(dE/dY \cdot Y) // sum(dE/dY \cdot Y)
math::Mul<T, Context>(output(0)->count(), XNorm_data, Std_data, dXdata); math::Mul<T, Context>(output(0)->count(), XNorm_data, Std_data, dXdata);
math::Gemv<T, Context>(CblasNoTrans, nbychans, spatial_dim, if (data_format == "NCHW") {
1.0, math::Gemv<T, Context>(CblasNoTrans, NC, S,
dXdata, SMul_data, 1.0, dXdata, SMul_data,
0.0, 0.0, NC_data);
NByC_data); math::Gemv<T, Context>(CblasTrans, N, C,
math::Gemv<T, Context>(CblasTrans, num, channels, 1.0, NC_data, NMul_data,
1.0, 0.0, tMean_data);
NByC_data, NMul_data, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
0.0, 1.0, NMul_data, tMean_data,
tMean_data); 0.0, NC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
1.0, 1.0, NC_data, SMul_data,
NMul_data, tMean_data, 0.0, dXdata);
0.0, } else if (data_format == "NHWC") {
NByC_data); math::Gemv<T, Context>(CblasTrans, NS, C,
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, 1.0, dXdata, NSMul_data,
1.0, 0.0, tMean_data);
NByC_data, SMul_data, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
0.0, 1.0, NSMul_data, tMean_data,
dXdata); 0.0, dXdata);
}
// sum(dE/dY \cdot Y) \cdot Y // sum(dE/dY \cdot Y) \cdot Y
math::Mul<T, Context>(output(0)->count(), XNorm_data, dXdata, dXdata); math::Mul<T, Context>(output(0)->count(), XNorm_data, dXdata, dXdata);
// sum(dE/dY) + sum(dE/dY \cdot Y) \cdot Y // sum(dE/dY) + sum(dE/dY \cdot Y) \cdot Y
math::Gemv<T, Context>(CblasNoTrans, nbychans, spatial_dim, if (data_format == "NCHW") {
1.0, math::Gemv<T, Context>(CblasNoTrans, NC, S,
Std_data, SMul_data, 1.0, Std_data, SMul_data,
0.0, 0.0, NC_data);
NByC_data); math::Gemv<T, Context>(CblasTrans, N, C,
math::Gemv<T, Context>(CblasTrans, num, channels, 1.0, NC_data, NMul_data,
1.0, 0.0, tMean_data);
NByC_data, NMul_data, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
0.0, 1.0, NMul_data, tMean_data,
tMean_data); 0.0, NC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
1.0, 1.0, NC_data, SMul_data,
NMul_data, tMean_data, 1.0, dXdata);
0.0, } else if (data_format == "NHWC") {
NByC_data); math::Gemv<T, Context>(CblasTrans, NS, C,
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, 1.0, Std_data, NSMul_data,
1.0, 0.0, tMean_data);
NByC_data, SMul_data, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
1.0, NSMul_data, tMean_data,
1.0, dXdata); 1.0, dXdata);
}
// dE/dY - mean(dE/dY)- mean(dE/dY \cdot Y) \cdot Y // dE/dY - mean(dE/dY)- mean(dE/dY \cdot Y) \cdot Y
// = dE/dY - mean(sum(dE/dY) + sum(dE/dY \cdot Y) \cdot Y) // = dE/dY - mean(sum(dE/dY) + sum(dE/dY \cdot Y) \cdot Y)
math::Axpby<T, Context>(output(0)->count(), 1.0, Std_data, math::Axpby<T, Context>(output(0)->count(), 1.0, Std_data, -1.0 / NS, dXdata);
-1.0 / (num * spatial_dim),
dXdata);
// divide var // divide var
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1, if (data_format == "NCHW") {
1.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
NMul_data, tVar_data, 1.0, NMul_data, tVar_data,
0.0, 0.0, NC_data);
NByC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, 1.0, NC_data, SMul_data,
1.0, 0.0, Std_data);
NByC_data, SMul_data, } else if (data_format == "NHWC") {
0.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
Std_data); 1.0, NSMul_data, tVar_data,
0.0, Std_data);
}
math::Div<T, Context>(output(0)->count(), dXdata, Std_data, dXdata); math::Div<T, Context>(output(0)->count(), dXdata, Std_data, dXdata);
ws()->ReleaseBuffer(stddev);
x_norm->Reset();
}
template <class Context> template <typename T>
void BatchRenormGradientOp<Context>::InferenceRunWithType() {
INIT_MULTIPLIER(multiplier, NS);
INIT_MULTIPLIER(num_multiplier, N);
INIT_MULTIPLIER(spatial_multiplier, S);
// release buffer auto* dYdata = input(-1).template data<T, Context>();
auto* dXdata = output(0)->template mutable_data<T, Context>();
auto* Std_data = stddev->template mutable_data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>();
auto* NSMul_data = multiplier->template data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* NMul_data = num_multiplier->template data<T, Context>();
auto* NC_data = num_by_chans.template mutable_data<T, Context>();
if (data_format == "NCHW") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
1.0, NMul_data, tVar_data,
0.0, NC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
1.0, NC_data, SMul_data,
0.0, Std_data);
} else if (data_format == "NHWC") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
1.0, NSMul_data, tVar_data,
0.0, Std_data);
}
math::Div<T, Context>(output(0)->count(), dYdata, Std_data, dXdata);
ws()->ReleaseBuffer(stddev); ws()->ReleaseBuffer(stddev);
ws()->ReleaseBuffer(x_norm, "Common", true);
} }
template <class Context> template <class Context>
void BatchRenormGradientOp<Context>::RunOnDevice() { void BatchRenormGradientOp<Context>::Setup() {
num = input(0).dim(0); channels = input(0).dim(1); // determine the mode
nbychans = num * channels; spatial_dim = input(0).count(2); if (use_stats == -1) use_global_stats = phase() == "TEST" ? true : false;
var = ws()->GetTensor("_t_" + anchor() + "_bn_var"); else use_global_stats = use_stats == 1 ? true : false;
r = ws()->GetTensor("_t_" + anchor() + "_bn_r");
mean.ReshapeLike(*var);
num_by_chans.Reshape(vector<TIndex>(1, nbychans));
x_norm = ws()->GetTensor("_t_" + anchor() + "_bn_x_norm"); // determine the data format
TIndex channel_axis = axis;
data_format = "NCHW";
if (channel_axis == -1) channel_axis += (int)input(0).ndim();
if (channel_axis + 1 == (int)input(0).ndim()) data_format = "NHWC";
N = input(0).dim(0);
C = input(0).dim(channel_axis);
NC = N * C;
S = input(0).count() / NC;
NS = N * S;
// make resource
var = ws()->GetTensor("/mnt/" + anchor() + "/bn_var");
r = ws()->GetTensor("/mnt/" + anchor() + "/bn_r");
x_norm = ws()->GetTensor("/mnt/" + anchor() + "/bn_x_norm");
stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0));
// reshape
mean.ReshapeLike(*var);
num_by_chans.Reshape(vector<TIndex>(1, NC));
output(0)->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0));
}
if (use_stats == -1) use_global_stats = phase() == "TEST" ? true : false; template <class Context>
else use_global_stats = use_stats == 1 ? true : false; void BatchRenormGradientOp<Context>::RunOnDevice() {
Setup();
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) {
if (use_global_stats) InferenceRunWithType<float>();
else TrainingRunWithType<float>();
}
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
} }
......
#include "operators/norm/batch_norm_op.h"
#include "core/workspace.h"
#include "utils/filler.h"
#ifdef WITH_CUDNN
#if CUDNN_VERSION_MIN(5, 0, 0)
namespace dragon {
template <class Context> template <typename T>
void CuDNNBatchNormOp<Context>::RunWithType() {
// determine the bn desc
if (input(0).ndim() == 2) {
bn_mode = CUDNN_BATCHNORM_PER_ACTIVATION;
Tensor x_reshape;
x_reshape.Reshape(vector<TIndex>({ N, C, 1, 1 }));
cudnnSetTensorDesc<T>(&input_desc, &x_reshape);
cudnnSetTensorDesc<T>(&output_desc, &x_reshape);
} else {
CHECK_GE((int)input(0).ndim(), 3)
<< "The number of dimensions should be at least 3.";
bn_mode = CUDNN_BATCHNORM_SPATIAL;
#if CUDNN_VERSION_MIN(7, 0, 0)
if (!this->use_global_stats)
bn_mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#endif
if (data_format == "NCHW") {
cudnnSetTensorDesc<T>(&input_desc, &input(0));
cudnnSetTensorDesc<T>(&output_desc, output(0));
}
else if (data_format == "NHWC") {
switch (input(0).ndim()) {
case 3:
cudnnSetTensor3dDesc<T>(&input_desc, data_format, &input(0));
cudnnSetTensor3dDesc<T>(&output_desc, data_format, output(0));
case 4:
cudnnSetTensor4dDesc<T>(&input_desc, data_format, &input(0));
cudnnSetTensor4dDesc<T>(&output_desc, data_format, output(0));
case 5:
cudnnSetTensor5dDesc<T>(&input_desc, data_format, &input(0));
cudnnSetTensor5dDesc<T>(&output_desc, data_format, output(0));
default:
LOG(FATAL) << "Only support the 3d/4d/5d input at NHWC mode.";
}
}
}
// derive the bn desc
CUDNN_CHECK(cudnnDeriveBNTensorDescriptor(bn_desc, input_desc, bn_mode));
TENSOR_FILL(input(1), vector<TIndex>(1, C)); // history_mean
TENSOR_FILL(input(2), vector<TIndex>(1, C)); // history_var
TENSOR_FILL(input(3), vector<TIndex>(1, C)); // scale
TENSOR_FILL(input(4), vector<TIndex>(1, C)); // bias
auto* Xdata = input(0).template data<T, Context>();
auto* Ydata = output(0)->template mutable_data<T, Context>();
auto* hMean_data = input(1).template mutable_data<T, Context>();
auto* hVar_data = input(2).template mutable_data<T, Context>();
auto* Sdata = input(3).template data<T, Context>();
auto* Bdata = input(4).template data<T, Context>();
if (this->use_global_stats) {
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(cudnn_handle(),
bn_mode,
CUDNNType<T>::one,
CUDNNType<T>::zero,
input_desc, Xdata,
output_desc, Ydata,
bn_desc,
Sdata,
Bdata,
hMean_data,
hVar_data,
this->eps));
} else {
auto* tMean_data = mean->template mutable_data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>();
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(cudnn_handle(),
bn_mode,
CUDNNType<T>::one,
CUDNNType<T>::zero,
input_desc, Xdata,
output_desc, Ydata,
bn_desc,
Sdata,
Bdata,
this->is_recomputing ? 0.0 : 1.0 - this->momentum,
hMean_data,
hVar_data,
this->eps,
tMean_data,
tVar_data));
}
}
template <class Context>
void CuDNNBatchNormOp<Context>::Setup() {
// determine the mode
if (this->use_stats == -1) {
this->use_global_stats = phase() == "TEST" ? true : false;
} else {
this->use_global_stats = this->use_stats == 1 ? true : false;
}
this->is_recomputing = ws()->GetTensor("/opt/mirror_stage/recompute_flag")
->template data<bool, CPUContext>()[0];
// determine the data format
TIndex channel_axis = this->axis;
data_format = "NCHW";
if (channel_axis == -1) channel_axis += (int)input(0).ndim();
if (channel_axis + 1 == (int)input(0).ndim()) data_format = "NHWC";
N = input(0).dim(0);
C = input(0).dim(channel_axis);
// make resource
mean = ws()->CreateTensor("/mnt/" + anchor() + "/bn_mean");
var = ws()->CreateTensor("/mnt/" + anchor() + "/bn_var");
// reshape
mean->Reshape(vector<TIndex>(1, C));
var->Reshape(vector<TIndex>(1, C));
output(0)->ReshapeLike(input(0));
}
template <class Context>
void CuDNNBatchNormOp<Context>::RunOnDevice() {
Setup();
if (input(0).template IsType<float>()) RunWithType<float>();
#ifdef WITH_CUDA_FP16
else if (input(0).template IsType<float16>()) RunWithType<float16>();
#endif
else LOG(FATAL) << "Unsupported input types.";
}
REGISTER_CUDNN_OPERATOR(FusedBatchNorm, CuDNNBatchNormOp<CUDAContext>);
INSTANTIATE_CUDNN_OPERATOR(BatchNorm);
template <class Context>
void CuDNNBatchNormGradientOp<Context>::Setup() {
// determine the mode
if (this->use_stats == -1) {
this->use_global_stats = phase() == "TEST" ? true : false;
} else {
this->use_global_stats = this->use_stats == 1 ? true : false;
}
// determine the data format
TIndex channel_axis = this->axis;
data_format = "NCHW";
if (channel_axis == -1) channel_axis += (int)input(0).ndim();
if (channel_axis + 1 == (int)input(0).ndim()) data_format = "NHWC";
N = input(0).dim(0);
C = input(0).dim(channel_axis);
NC = N * C;
S = input(0).count() / NC;
NS = N * S;
// make resource
mean = ws()->GetTensor("/mnt/" + anchor() + "/bn_mean");
var = ws()->GetTensor("/mnt/" + anchor() + "/bn_var");
// reshape
mean->Reshape(vector<TIndex>(1, C));
var->Reshape(vector<TIndex>(1, C));
num_by_chans.Reshape(vector<TIndex>(1, NC));
output(0)->ReshapeLike(input(0)); // dX
output(1)->ReshapeLike(input(3)); // dScale
output(2)->ReshapeLike(input(3)); // dBias
}
template <class Context> template <typename T>
void CuDNNBatchNormGradientOp<Context>::InferenceRunWithType() {
if (output(0)->name() != "ignore") {
INIT_MULTIPLIER(multiplier, NS);
INIT_MULTIPLIER(num_multiplier, N);
INIT_MULTIPLIER(spatial_multiplier, S);
stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0));
auto* dYdata = input(-1).template data<T, Context>();
auto* dXdata = output(0)->template mutable_data<T, Context>();
auto* Std_data = stddev->template mutable_data<T, Context>();
auto* Sdata = input(3).template data<T, Context>();
auto* hVar_data = input(2).template data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>();
auto* NSMul_data = multiplier->template data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* NMul_data = num_multiplier->template data<T, Context>();
auto* NC_data = num_by_chans.template mutable_data<T, Context>();
// compute stddev
ctx().template Copy<T, Context, Context>(var->count(), tVar_data, hVar_data);
math::AddScalar<T, Context>(var->count(), this->eps, tVar_data);
math::Sqrt<T, Context>(var->count(), tVar_data, tVar_data);
// divide scale by stddev
math::Div<T, Context>(var->count(), Sdata, tVar_data, tVar_data);
// compute dE/dY \cot (scale / std(X))
if (data_format == "NCHW") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1,
1.0, NMul_data, tVar_data,
0.0, NC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
1.0, NC_data, SMul_data,
0.0, Std_data);
} else if (data_format == "NHWC") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1,
1.0, NSMul_data, tVar_data,
0.0, Std_data);
}
math::Mul<T, Context>(output(0)->count(), dYdata, Std_data, dXdata);
ws()->ReleaseBuffer(stddev);
}
}
template <class Context> template <typename T>
void CuDNNBatchNormGradientOp<Context>::TrainingRunWithType() {
// determine the bn desc
if (input(0).ndim() == 2) {
bn_mode = CUDNN_BATCHNORM_PER_ACTIVATION;
Tensor x_reshape;
x_reshape.Reshape(vector<TIndex>({ N, C, 1, 1 }));
cudnnSetTensorDesc<T>(&input_desc, &x_reshape);
cudnnSetTensorDesc<T>(&output_desc, &x_reshape);
} else {
CHECK_GE((int)input(0).ndim(), 3)
<< "The number of dimensions should be at least 3.";
bn_mode = CUDNN_BATCHNORM_SPATIAL;
#if CUDNN_VERSION_MIN(7, 0, 0)
if (!this->use_global_stats)
bn_mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#endif
if (data_format == "NCHW") {
cudnnSetTensorDesc<T>(&input_desc, &input(-1));
cudnnSetTensorDesc<T>(&output_desc, output(0));
} else if (data_format == "NHWC") {
switch (input(0).ndim()) {
case 3:
cudnnSetTensor3dDesc<T>(&input_desc, data_format, &input(-1));
cudnnSetTensor3dDesc<T>(&output_desc, data_format, output(0));
case 4:
cudnnSetTensor4dDesc<T>(&input_desc, data_format, &input(-1));
cudnnSetTensor4dDesc<T>(&output_desc, data_format, output(0));
case 5:
cudnnSetTensor5dDesc<T>(&input_desc, data_format, &input(-1));
cudnnSetTensor5dDesc<T>(&output_desc, data_format, output(0));
default:
LOG(FATAL) << "Only support the 3d/4d/5d input at NHWC mode.";
}
}
}
// derive the bn desc
CUDNN_CHECK(cudnnDeriveBNTensorDescriptor(bn_desc, input_desc, bn_mode));
if (output(0)->name() != "ignore" ||
output(1)->name() != "ignore" ||
output(2)->name() != "ignore") {
auto* dYdata = input(-1).template data<T, Context>();
auto* dXdata = output(0)->template mutable_data<T, Context>();
auto* Xdata = input(0).template data<T, Context>();
auto* Sdata = input(3).template data<T, Context>();
auto* dSdata = output(1)->template mutable_data<T, Context>();
auto* dBdata = output(2)->template mutable_data<T, Context>();
auto* tMean_data = mean->template data<T, Context>();
auto* tVar_data = var->template data<T, Context>();
CUDNN_CHECK(cudnnBatchNormalizationBackward(cudnn_handle(),
bn_mode,
CUDNNType<T>::one,
CUDNNType<T>::zero,
CUDNNType<T>::one,
CUDNNType<T>::one,
output_desc, Xdata,
input_desc, dYdata,
output_desc, dXdata,
bn_desc,
Sdata,
dSdata,
dBdata,
this->eps,
tMean_data,
tVar_data));
}
}
template <class Context>
void CuDNNBatchNormGradientOp<Context>::RunOnDevice() {
Setup();
if (input(0).template IsType<float>()) {
if (this->use_global_stats) InferenceRunWithType<float>();
else TrainingRunWithType<float>();
}
#ifdef WITH_CUDA_FP16
else if (input(0).template IsType<float16>()) {
if (this->use_global_stats) InferenceRunWithType<float16>();
else TrainingRunWithType<float16>();
}
#endif
else LOG(FATAL) << "Unsupported input types.";
}
REGISTER_CUDNN_OPERATOR(FusedBatchNormGradient, CuDNNBatchNormGradientOp<CUDAContext>);
INSTANTIATE_CUDNN_OPERATOR(BatchNormGradient);
} // namespace dragon
#endif
#endif // WITH_CUDNN
\ No newline at end of file
#include "operators/norm/batch_norm_op.h"
#include "core/workspace.h"
#include "utils/filler.h"
#ifdef WITH_CUDNN
#if CUDNN_VERSION_MIN(5, 0, 0)
namespace dragon {
template <class Context> template <typename T>
void CuDNNBNOp<Context>::SpatialRunWithType() {
cudnnSetTensorDesc<T>(&input_desc, &input(0));
cudnnSetTensorDesc<T>(&output_desc, output(0));
CUDNN_CHECK(cudnnDeriveBNTensorDescriptor(bn_desc, input_desc, CUDNN_BATCHNORM_SPATIAL));
TENSOR_FILL(input(1), vector<TIndex>(1, channels)); // history_mean
TENSOR_FILL(input(2), vector<TIndex>(1, channels)); // history_var
TENSOR_FILL(input(3), vector<TIndex>(1, channels)); // scale
TENSOR_FILL(input(4), vector<TIndex>(1, channels)); // bias
auto* Xdata = input(0).template data<T, Context>();
auto* Ydata = output(0)->template mutable_data<T, Context>();
auto* hMean_data = input(1).template mutable_data<T, Context>();
auto* hVar_data = input(2).template mutable_data<T, Context>();
auto* Sdata = input(3).template data<T, Context>();
auto* Bdata = input(4).template data<T, Context>();
if (use_global_stats) {
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(cudnn_handle(),
CUDNN_BATCHNORM_SPATIAL,
CUDNNType<T>::one,
CUDNNType<T>::zero,
input_desc, Xdata,
output_desc, Ydata,
bn_desc,
Sdata,
Bdata,
hMean_data,
hVar_data,
this->eps));
} else {
auto* tMean_data = mean->template mutable_data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>();
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(cudnn_handle(),
CUDNN_BATCHNORM_SPATIAL,
CUDNNType<T>::one,
CUDNNType<T>::zero,
input_desc, Xdata,
output_desc, Ydata,
bn_desc,
Sdata,
Bdata,
is_recomputing ? 0.0 : 1.0 - this->momentum,
hMean_data,
hVar_data,
this->eps,
tMean_data,
tVar_data));
}
}
template <class Context> template <typename T>
void CuDNNBNOp<Context>::PerActivationRunWithType() {
Tensor x_reshape;
x_reshape.Reshape(vector<TIndex>({ num, channels, 1, 1 }));
cudnnSetTensorDesc<T>(&input_desc, &x_reshape);
cudnnSetTensorDesc<T>(&output_desc, &x_reshape);
CUDNN_CHECK(cudnnDeriveBNTensorDescriptor(bn_desc, input_desc, CUDNN_BATCHNORM_PER_ACTIVATION));
TENSOR_FILL(input(1), vector<TIndex>(1, channels)); // history_mean
TENSOR_FILL(input(2), vector<TIndex>(1, channels)); // history_var
TENSOR_FILL(input(3), vector<TIndex>(1, channels)); // scale
TENSOR_FILL(input(4), vector<TIndex>(1, channels)); // bias
auto* Xdata = input(0).template data<T, Context>();
auto* Ydata = output(0)->template mutable_data<T, Context>();
auto* hMean_data = input(1).template mutable_data<T, Context>();
auto* hVar_data = input(2).template mutable_data<T, Context>();
auto* Sdata = input(3).template data<T, Context>();
auto* Bdata = input(4).template data<T, Context>();
if (use_global_stats) {
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(cudnn_handle(),
CUDNN_BATCHNORM_PER_ACTIVATION,
CUDNNType<T>::one,
CUDNNType<T>::zero,
input_desc, Xdata,
output_desc, Ydata,
bn_desc,
Sdata,
Bdata,
hMean_data,
hVar_data,
this->eps));
} else {
auto* tMean_data = mean->template mutable_data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>();
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(cudnn_handle(),
CUDNN_BATCHNORM_PER_ACTIVATION,
CUDNNType<T>::one,
CUDNNType<T>::zero,
input_desc, Xdata,
output_desc, Ydata,
bn_desc,
Sdata,
Bdata,
is_recomputing ? 0.0 : 1.0 - this->momentum,
hMean_data,
hVar_data,
this->eps,
tMean_data,
tVar_data));
}
}
template <class Context>
void CuDNNBNOp<Context>::RunOnDevice() {
num = input(0).dim(0);
channels = input(0).dim(1);
spatial_dim = input(0).count(2);
mean = ws()->CreateTensor("_t_" + anchor() + "_bn_mean");
var = ws()->CreateTensor("_t_" + anchor() + "_bn_var");
mean->ReshapeLike(input(1)); var->ReshapeLike(input(2));
output(0)->ReshapeLike(input(0));
if (this->use_stats == -1) use_global_stats = phase() == "TEST" ? true : false;
else use_global_stats = this->use_stats == 1 ? true : false;
is_recomputing = ws()->GetTensor("_t_global_recompute_flag")
->template data<bool, CPUContext>()[0];
if (input(0).template IsType<float>()) {
if (input(0).ndim() == 4) SpatialRunWithType<float>();
else if (input(0).ndim() == 2) PerActivationRunWithType<float>();
else LOG(FATAL) << "The ndim of input tensor should be 2 or 4.";
} else { LOG(FATAL) << "Unsupported input types."; }
}
DEPLOY_CPU(BN);
#ifdef WITH_CUDA
DEPLOY_CUDA(BN);
#endif
OPERATOR_SCHEMA(BN).NumInputs(5).NumOutputs(1);
DEPLOY_CUDNN(BN);
template <class Context> template <typename T>
void CuDNNBNGradientOp<Context>::SpatialRunWithType() {
cudnnSetTensorDesc<T>(&input_desc, &input(-1));
cudnnSetTensorDesc<T>(&output_desc, output(0));
CUDNN_CHECK(cudnnDeriveBNTensorDescriptor(bn_desc, output_desc, CUDNN_BATCHNORM_SPATIAL));
if (use_global_stats) {
if (output(0)->name() != "ignore") {
INIT_MULTIPLIER(num_multiplier, num);
INIT_MULTIPLIER(spatial_multiplier, spatial_dim);
// get buffer
stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0));
auto* dYdata = input(-1).template data<T, Context>();
auto* dXdata = output(0)->template mutable_data<T, Context>();
auto* Std_data = stddev->template mutable_data<T, Context>();
auto* Sdata = input(3).template data<T, Context>();
auto* hVar_data = input(2).template data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* NMul_data = num_multiplier->template data<T, Context>();
auto* NByC_data = num_by_chans.template mutable_data<T, Context>();
// use the moving average var
ctx().template Copy<T, Context, Context>(var->count(), tVar_data, hVar_data);
math::AddScalar<T, Context>(var->count(), this->eps, tVar_data);
math::Sqrt<T, Context>(var->count(), tVar_data, tVar_data);
// divide scale by std
math::Div<T, Context>(var->count(), Sdata, tVar_data, tVar_data);
// broadcast
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1,
1.0,
NMul_data, tVar_data,
0.0,
NByC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1,
1.0,
NByC_data, SMul_data,
0.0,
Std_data);
// elementwise multiply top grad with(slope / std)
math::Mul<T, Context>(output(0)->count(), dYdata, Std_data, dXdata);
// release buffer
ws()->ReleaseBuffer(stddev);
}
return;
}
if (output(0)->name() != "ignore" ||
output(1)->name() != "ignore" ||
output(2)->name() != "ignore") {
auto* dYdata = input(-1).template data<T, Context>();
auto* dXdata = output(0)->template mutable_data<T, Context>();
auto* Xdata = input(0).template data<T, Context>();
auto* Sdata = input(3).template data<T, Context>();
auto* dSdata = output(1)->template mutable_data<T, Context>();
auto* dBdata = output(2)->template mutable_data<T, Context>();
auto* tMean_data = mean->template data<T, Context>();
auto* tVar_data = var->template data<T, Context>();
CUDNN_CHECK(cudnnBatchNormalizationBackward(cudnn_handle(),
CUDNN_BATCHNORM_SPATIAL,
CUDNNType<T>::one,
CUDNNType<T>::zero,
CUDNNType<T>::one,
CUDNNType<T>::one,
output_desc, Xdata,
input_desc, dYdata,
output_desc, dXdata,
bn_desc,
Sdata,
dSdata,
dBdata,
this->eps,
tMean_data,
tVar_data));
}
}
template <class Context> template <typename T>
void CuDNNBNGradientOp<Context>::PerActivationRunWithType() {
Tensor x_reshape;
x_reshape.Reshape(vector<TIndex>({ num, channels, 1, 1 }));
cudnnSetTensorDesc<T>(&input_desc, &x_reshape);
cudnnSetTensorDesc<T>(&output_desc, &x_reshape);
CUDNN_CHECK(cudnnDeriveBNTensorDescriptor(bn_desc, output_desc, CUDNN_BATCHNORM_PER_ACTIVATION));
if (use_global_stats) {
if (output(0)->name() != "ignore") {
INIT_MULTIPLIER(num_multiplier, num);
INIT_MULTIPLIER(spatial_multiplier, spatial_dim);
// get buffer
stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0));
auto* dYdata = input(-1).template data<T, Context>();
auto* dXdata = output(0)->template mutable_data<T, Context>();
auto* Std_data = stddev->template mutable_data<T, Context>();
auto* Sdata = input(3).template data<T, Context>();
auto* hVar_data = input(2).template data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* NMul_data = num_multiplier->template data<T, Context>();
auto* NByC_data = num_by_chans.template mutable_data<T, Context>();
// use the moving average var
ctx().template Copy<T, Context, Context>(var->count(), tVar_data, hVar_data);
math::AddScalar<T, Context>(var->count(), this->eps, tVar_data);
math::Sqrt<T, Context>(var->count(), tVar_data, tVar_data);
// divide scale by std
math::Div<T, Context>(var->count(), Sdata, tVar_data, tVar_data);
// broadcast
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num, channels, 1,
1.0,
NMul_data, tVar_data,
0.0,
NByC_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1,
1.0,
NByC_data, SMul_data,
0.0,
Std_data);
// elementwise multiply top grad with(slope / std)
math::Mul<T, Context>(output(0)->count(), dYdata, Std_data, dXdata);
// release buffer
ws()->ReleaseBuffer(stddev);
}
return;
}
if (output(0)->name() != "ignore" ||
output(1)->name() != "ignore" ||
output(2)->name() != "ignore") {
auto* dYdata = input(-1).template data<T, Context>();
auto* dXdata = output(0)->template mutable_data<T, Context>();
auto* Xdata = input(0).template data<T, Context>();
auto* Sdata = input(3).template data<T, Context>();
auto* dSdata = output(1)->template mutable_data<T, Context>();
auto* dBdata = output(2)->template mutable_data<T, Context>();
auto* tMean_data = mean->template data<T, Context>();
auto* tVar_data = var->template data<T, Context>();
CUDNN_CHECK(cudnnBatchNormalizationBackward(cudnn_handle(),
CUDNN_BATCHNORM_PER_ACTIVATION,
CUDNNType<T>::one,
CUDNNType<T>::zero,
CUDNNType<T>::one,
CUDNNType<T>::one,
output_desc, Xdata,
input_desc, dYdata,
output_desc, dXdata,
bn_desc,
Sdata,
dSdata,
dBdata,
this->eps,
tMean_data,
tVar_data));
}
}
template <class Context>
void CuDNNBNGradientOp<Context>::RunOnDevice() {
num = input(0).dim(0); channels = input(0).dim(1);
spatial_dim = input(0).count(2); nbychans = num * channels;
mean = ws()->GetTensor("_t_" + anchor() + "_bn_mean");
var = ws()->GetTensor("_t_" + anchor() + "_bn_var");
num_by_chans.Reshape(vector<TIndex>(1, nbychans));
output(0)->ReshapeLike(input(0)); // dX
output(1)->ReshapeLike(input(3)); // dScale
output(2)->ReshapeLike(input(3)); // dBias
if (this->use_stats == -1) use_global_stats = phase() == "TEST" ? true : false;
else use_global_stats = this->use_stats == 1 ? true : false;
if (input(0).template IsType<float>()) {
if (input(0).ndim() == 4) SpatialRunWithType<float>();
else if (input(0).ndim() == 2) PerActivationRunWithType<float>();
else LOG(FATAL) << "The ndim of input tensor must be 2 or 4.";
} else { LOG(FATAL) << "Unsupported input types."; }
}
template <class Context>
void BNGradientOp<Context>::ShareGradient() {
if (use_global_stats) {
if (output(0)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad");
output(0)->Replace(*dX);
}
} else {
if (output(0)->name() != "ignore" ||
output(1)->name() != "ignore" ||
output(2)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad");
output(0)->Replace(*dX);
}
}
}
DEPLOY_CPU(BNGradient);
#ifdef WITH_CUDA
DEPLOY_CUDA(BNGradient);
#endif
OPERATOR_SCHEMA(BNGradient).NumInputs(5).NumOutputs(3);
DEPLOY_CUDNN(BNGradient);
} // namespace dragon
#endif
#endif // WITH_CUDNN
\ No newline at end of file
#include "operators/norm/batch_norm_op.h"
#include "core/workspace.h"
namespace dragon {
DEPLOY_CPU(FusedBatchNorm);
#ifdef WITH_CUDA
DEPLOY_CUDA(FusedBatchNorm);
#endif
OPERATOR_SCHEMA(FusedBatchNorm).NumInputs(5).NumOutputs(1);
template <class Context>
void FusedBatchNormGradientOp<Context>::ShareGradient() {
if (use_global_stats) {
if (output(0)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad");
ws()->CreateAvatar(output(0), dX);
}
} else {
if (output(0)->name() != "ignore" ||
output(1)->name() != "ignore" ||
output(2)->name() != "ignore") {
Tensor* dX = ws()->GetBuffer("Grad");
ws()->CreateAvatar(output(0), dX);
}
}
}
DEPLOY_CPU(FusedBatchNormGradient);
#ifdef WITH_CUDA
DEPLOY_CUDA(FusedBatchNormGradient);
#endif
OPERATOR_SCHEMA(FusedBatchNormGradient).NumInputs(5).NumOutputs(3);
class GetFusedBatchNormGradient final : public GradientMakerBase {
public:
GRADIENT_MAKER_CTOR(GetFusedBatchNormGradient);
vector<OperatorDef> MakeDefs() override {
return SingleDef(def.type() + "Gradient", "",
vector<string> {I(0), I(1), I(2), I(3), GO(0)},
vector<string> {GI(0), GI(3), GI(4)});
}
};
REGISTER_GRADIENT(FusedBatchNorm, GetFusedBatchNormGradient);
} // namespace dragon
\ No newline at end of file
...@@ -7,11 +7,7 @@ namespace dragon { ...@@ -7,11 +7,7 @@ namespace dragon {
template <class Context> template <typename T> template <class Context> template <typename T>
void InstanceNormOp<Context>::RunWithType() { void InstanceNormOp<Context>::RunWithType() {
INIT_MULTIPLIER(spatial_multiplier, spatial_dim); INIT_MULTIPLIER(spatial_multiplier, S);
// get buffer
stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0));
auto* tMean_data = mean.template mutable_data<T, Context>(); auto* tMean_data = mean.template mutable_data<T, Context>();
auto* tVar_data = var->template mutable_data<T, Context>(); auto* tVar_data = var->template mutable_data<T, Context>();
...@@ -19,58 +15,114 @@ void InstanceNormOp<Context>::RunWithType() { ...@@ -19,58 +15,114 @@ void InstanceNormOp<Context>::RunWithType() {
auto* Ydata = output(0)->template mutable_data<T, Context>(); auto* Ydata = output(0)->template mutable_data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>(); auto* SMul_data = spatial_multiplier->template data<T, Context>();
auto* Std_data = stddev->template mutable_data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>();
ctx().template Copy<T, Context, Context>(output(0)->count(), Ydata, Xdata);
math::Gemv<T, Context>(CblasNoTrans, nbychans, spatial_dim, // compute mean
1.0 / spatial_dim, if (data_format == "NCHW") {
Xdata, SMul_data, math::Gemv<T, Context>(CblasNoTrans, NC, S,
1.0 / S, Xdata, SMul_data,
0, tMean_data); 0, tMean_data);
} else if (data_format == "NHWC") {
if (!inplace) { auto* x = Xdata;
ctx().template Copy<T, Context, Context>(input(0).count(), Ydata, Xdata); auto* tm = tMean_data;
for (int i = 0; i < N; i++) {
math::Gemv<T, Context>(CblasTrans, S, C,
1.0 / S, x, SMul_data,
0, tm);
x += CS;
tm += C;
}
} }
// subtract mean // subtract mean
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, if (data_format == "NCHW") {
-1.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
tMean_data, SMul_data, -1.0, tMean_data, SMul_data,
1.0, Ydata); 1.0, Ydata);
} else if (data_format == "NHWC") {
auto* y = Ydata;
auto* tm = tMean_data;
for (int i = 0; i < N; i++) {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, S, C, 1,
-1.0, SMul_data, tm,
1.0, y);
y += CS;
tm += C;
}
}
// Var(X) = E((X - EX) ^ 2) // compute variance
math::Pow<T, Context>(output(0)->count(), 2, Ydata, Std_data); // note that we use VAR(X) = E((X - EX) ^ 2)
math::Gemv<T, Context>(CblasNoTrans, nbychans, spatial_dim, math::Square<T, Context>(output(0)->count(), Ydata, Std_data);
1.0 / spatial_dim, if (data_format == "NCHW") {
Std_data, SMul_data, math::Gemv<T, Context>(CblasNoTrans, NC, S,
0.0, 1.0 / S, Std_data, SMul_data,
tVar_data); 0.0, tVar_data);
} else if (data_format == "NHWC") {
// normalize var auto* x2 = Std_data;
math::AddScalar<T, Context>(mean.count(), eps, tVar_data); auto* tv = tVar_data;
math::Pow<T, Context>(mean.count(), 0.5, tVar_data, tVar_data); for (int i = 0; i < N; i++) {
math::Gemv<T, Context>(CblasTrans, S, C,
// divide by var 1.0 / S, x2, SMul_data,
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, 0, tv);
1.0, x2 += CS;
tVar_data, SMul_data, tv += C;
0.0, }
Std_data); }
math::Div<T, Context>(output(0)->count(), Ydata, Std_data, Ydata);
// release buffer // compute stddev
math::AddScalar<T, Context>(var->count(), eps, tVar_data);
math::Sqrt<T, Context>(var->count(), tVar_data, tVar_data);
// divide by stddev
if (data_format == "NCHW") {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
1.0, tVar_data, SMul_data,
0.0, Std_data);
} else if (data_format == "NHWC") {
auto* std = Std_data;
auto* tv = tVar_data;
for (int i = 0; i < N; i++) {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, S, C, 1,
1.0, SMul_data, tv,
0.0, std);
std += CS;
tv += C;
}
}
math::Div<T, Context>(output(0)->count(), Ydata, Std_data, Ydata);
ws()->ReleaseBuffer(stddev); ws()->ReleaseBuffer(stddev);
} }
template <class Context> template <class Context>
void InstanceNormOp<Context>::RunOnDevice() { void InstanceNormOp<Context>::Setup() {
num = input(0).dim(0); channels = input(0).dim(1); // determine the data format
spatial_dim = input(0).count(2); nbychans = num * channels; TIndex channel_axis = axis;
vector<TIndex> dims({ num, channels }); data_format = "NCHW";
var = ws()->CreateTensor("_t_" + anchor() + "_bn_var"); if (channel_axis == -1) channel_axis += (int)input(0).ndim();
mean.Reshape(dims); var->Reshape(dims); if (channel_axis + 1 == (int)input(0).ndim()) data_format = "NHWC";
if (input(0).ndim() == 2) LOG(WARNING) << "The 2d input will output all zeros.";
N = input(0).dim(0);
C = input(0).dim(channel_axis);
NC = N * C;
S = input(0).count() / NC;
CS = C * S;
// make resource
var = ws()->CreateTensor("/mnt/" + anchor() + "/ins_norm_var");
stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0));
// reshape
mean.Reshape(vector<TIndex>(1, NC));
var->Reshape(vector<TIndex>(1, NC));
output(0)->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0));
}
// if true, Act/Exp/Pow/Norm Ops can not exist before when train template <class Context>
if (inplace) output(0)->Share(input(0)); void InstanceNormOp<Context>::RunOnDevice() {
Setup();
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) RunWithType<float>();
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
...@@ -84,11 +136,7 @@ OPERATOR_SCHEMA(InstanceNorm).NumInputs(1).NumOutputs(1); ...@@ -84,11 +136,7 @@ OPERATOR_SCHEMA(InstanceNorm).NumInputs(1).NumOutputs(1);
template <class Context> template <typename T> template <class Context> template <typename T>
void InstanceNormGradientOp<Context>::RunWithType() { void InstanceNormGradientOp<Context>::RunWithType() {
INIT_MULTIPLIER(spatial_multiplier, spatial_dim); INIT_MULTIPLIER(spatial_multiplier, S);
// get buffer
stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0));
auto* dYdata = input(-1).template data<T, Context>(); auto* dYdata = input(-1).template data<T, Context>();
auto* dXdata = output(0)->template mutable_data<T, Context>(); auto* dXdata = output(0)->template mutable_data<T, Context>();
...@@ -96,60 +144,117 @@ void InstanceNormGradientOp<Context>::RunWithType() { ...@@ -96,60 +144,117 @@ void InstanceNormGradientOp<Context>::RunWithType() {
auto* tVar_data = var->template mutable_data<T, Context>(); auto* tVar_data = var->template mutable_data<T, Context>();
auto* SMul_data = spatial_multiplier->template data<T, Context>(); auto* SMul_data = spatial_multiplier->template data<T, Context>();
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, if (data_format == "NCHW") {
1.0, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
tVar_data, SMul_data, 1.0, tVar_data, SMul_data,
0.0, 0.0, Std_data);
Std_data); } else if (data_format == "NHWC") {
auto* std = Std_data;
auto* tv = tVar_data;
for (int i = 0; i < N; i++) {
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, S, C, 1,
1.0, SMul_data, tv,
0.0, std);
std += CS;
tv += C;
}
}
auto* Ydata = input(-2).template data<T, Context>(); auto* Ydata = input(-2).template data<T, Context>();
math::Mul<T, Context>(output(0)->count(), Ydata, dYdata, dXdata); math::Mul<T, Context>(output(0)->count(), Ydata, dYdata, dXdata);
// sum(dE/dY \cdot Y) // sum(dE/dY \cdot Y)
math::Gemv<T, Context>(CblasNoTrans, nbychans, spatial_dim, if (data_format == "NCHW") {
1.0, math::Gemv<T, Context>(CblasNoTrans, NC, S,
dXdata, SMul_data, 1.0, dXdata, SMul_data,
0.0, tVar_data); 0.0, tVar_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
1.0, 1.0, tVar_data, SMul_data,
tVar_data, SMul_data, 0.0, dXdata);
0.0, } else if (data_format == "NHWC") {
dXdata); for (int i = 0; i < N; i++) {
auto* dx = dXdata;
auto* tv = tVar_data;
for (int i = 0; i < N; i++) {
math::Gemv<T, Context>(CblasTrans, S, C,
1.0, dx, SMul_data,
0, tv);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, S, C, 1,
1.0, SMul_data, tv,
0.0, dx);
dx += CS;
tv += C;
}
}
}
// sum(dE/dY \cdot Y) \cdot Y // sum(dE/dY \cdot Y) \cdot Y
math::Mul<T, Context>(output(0)->count(), Ydata, dXdata, dXdata); math::Mul<T, Context>(output(0)->count(), Ydata, dXdata, dXdata);
// sum(dE/dY) + sum(dE/dY \cdot Y) \cdot Y // sum(dE/dY) + sum(dE/dY \cdot Y) \cdot Y
math::Gemv<T, Context>(CblasNoTrans, nbychans, spatial_dim, if (data_format == "NCHW") {
1.0, math::Gemv<T, Context>(CblasNoTrans, NC, S,
dYdata, SMul_data, 1.0, dYdata, SMul_data,
0.0, tVar_data); 0.0, tVar_data);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, nbychans, spatial_dim, 1, math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1,
1.0, 1.0, tVar_data, SMul_data,
tVar_data, SMul_data, 1.0, dXdata);
1.0, } else if (data_format == "NHWC") {
dXdata); for (int i = 0; i < N; i++) {
auto* dy = dYdata;
auto* dx = dXdata;
auto* tv = tVar_data;
for (int i = 0; i < N; i++) {
math::Gemv<T, Context>(CblasTrans, S, C,
1.0, dy, SMul_data,
0, tv);
math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, S, C, 1,
1.0, SMul_data, tv,
1.0, dx);
dy += CS;
dx += CS;
tv += C;
}
}
}
// dE/dY - mean(dE/dY)- mean(dE/dY \cdot Y) \cdot Y // dE/dY - mean(dE/dY)- mean(dE/dY \cdot Y) \cdot Y
// = dE/dY - mean(sum(dE/dY) + sum(dE/dY \cdot Y) \cdot Y) // = dE/dY - mean(sum(dE/dY) + sum(dE/dY \cdot Y) \cdot Y)
math::Axpby<T, Context>(output(0)->count(), 1.0, dYdata, math::Axpby<T, Context>(output(0)->count(), 1.0, dYdata, -1.0 / S, dXdata);
-1.0 / spatial_dim,
dXdata);
// divide by var // divide by stddev
math::Div<T, Context>(output(0)->count(), dXdata, Std_data, dXdata); math::Div<T, Context>(output(0)->count(), dXdata, Std_data, dXdata);
// release buffer
ws()->ReleaseBuffer(stddev); ws()->ReleaseBuffer(stddev);
} }
template <class Context> template <class Context>
void InstanceNormGradientOp<Context>::RunOnDevice() { void InstanceNormGradientOp<Context>::Setup() {
num = input(0).dim(0); channels = input(0).dim(1); // determine the data format
spatial_dim = input(0).count(2); nbychans = num * channels; TIndex channel_axis = axis;
var = ws()->GetTensor("_t_" + anchor() + "_bn_var"); data_format = "NCHW";
if (channel_axis == -1) channel_axis += (int)input(0).ndim();
if (channel_axis + 1 == (int)input(0).ndim()) data_format = "NHWC";
if (input(0).ndim() == 2) LOG(WARNING) << "The 2d input will output all zeros.";
N = input(0).dim(0);
C = input(0).dim(channel_axis);
NC = N * C;
S = input(0).count() / NC;
CS = C * S;
// make resource
var = ws()->GetTensor("/mnt/" + anchor() + "/ins_norm_var");
stddev = ws()->GetBuffer();
stddev->ReshapeLike(input(0));
// reshape
output(0)->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0));
}
template <class Context>
void InstanceNormGradientOp<Context>::RunOnDevice() {
Setup();
if (input(0).template IsType<float>()) RunWithType<float>(); if (input(0).template IsType<float>()) RunWithType<float>();
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
......
...@@ -15,7 +15,7 @@ void L2NormOp<Context>::RunWithType() { ...@@ -15,7 +15,7 @@ void L2NormOp<Context>::RunWithType() {
buffer->Reshape(dims); buffer->Reshape(dims);
// normalize by inner_dim independently if not across it // normalize by inner_dim independently if not across it
norm = ws()->CreateTensor("_t_" + anchor() + "_l2norm_normalizer"); norm = ws()->CreateTensor("/mnt/" + anchor() + "/l2norm_normalizer");
dims = input(0).dims(); dims = input(0).dims();
for (int i = axis; i < end_axis; i++) dims[i] = 1; for (int i = axis; i < end_axis; i++) dims[i] = 1;
norm->Reshape(dims); norm->Reshape(dims);
...@@ -95,7 +95,7 @@ void L2NormGradientOp<Context>::RunWithType() { ...@@ -95,7 +95,7 @@ void L2NormGradientOp<Context>::RunWithType() {
INIT_MULTIPLIER(multiplier, dim); INIT_MULTIPLIER(multiplier, dim);
// normalize by inner_dim independently if not across it // normalize by inner_dim independently if not across it
norm = ws()->GetTensor("_t_" + anchor() + "_l2norm_normalizer"); norm = ws()->GetTensor("/mnt/" + anchor() + "/l2norm_normalizer");
buffer = ws()->GetBuffer(); buffer = ws()->GetBuffer();
vector<TIndex> dims = input(0).dims(); vector<TIndex> dims = input(0).dims();
for (int i = 0; i < axis; i++) dims[i] = 1; for (int i = 0; i < axis; i++) dims[i] = 1;
......
...@@ -69,7 +69,7 @@ void LSTMUnitGradientOp<Context>::RunOnDevice() { ...@@ -69,7 +69,7 @@ void LSTMUnitGradientOp<Context>::RunOnDevice() {
output(0)->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0));
output(1)->ReshapeLike(input(1)); output(1)->ReshapeLike(input(1));
if (InputSize() != 5) { if (InputSize() != 5) {
zeros = ws()->CreateTensor("_t_zeros"); zeros = ws()->CreateTensor("/share/zeros");
if (zeros->count() < input(0).count()) if (zeros->count() < input(0).count())
zeros->ReshapeLike(input(0)); zeros->ReshapeLike(input(0));
} }
......
...@@ -9,7 +9,7 @@ void RMSPropUpdateOp<Context>::ComputeRunWithFloat() { ...@@ -9,7 +9,7 @@ void RMSPropUpdateOp<Context>::ComputeRunWithFloat() {
if (!history.get()) { if (!history.get()) {
string slot = OperatorBase::GetSingleArg<string>("slot", ""); string slot = OperatorBase::GetSingleArg<string>("slot", "");
if (slot.empty()) history.reset(new Tensor()); if (slot.empty()) history.reset(new Tensor());
else history.reset(ws()->CreateTensor("_t_" + output(0)->name() + "_" + slot)); else history.reset(ws()->CreateTensor("/mnt/" + name() + "/history"));
history->ReshapeLike(input(0)); history->ReshapeLike(input(0));
} }
lr = param("base_lr") * this->lr_mult; lr = param("base_lr") * this->lr_mult;
......
...@@ -38,7 +38,7 @@ template <class Context> ...@@ -38,7 +38,7 @@ template <class Context>
void DenseConcatGradientOp<Context>::ElimateCorruption() { void DenseConcatGradientOp<Context>::ElimateCorruption() {
Set<string> all_heads; Set<string> all_heads;
queue<int> safe_heads; queue<int> safe_heads;
Tensor* head = ws()->GetTensor("_t_mirror_stage_head"); Tensor* head = ws()->GetTensor("/opt/mirror_stage/head");
string* head_data = head->mutable_data<string, CPUContext>(); string* head_data = head->mutable_data<string, CPUContext>();
for (int i = 0; i < head->count(); i++) all_heads.insert(head_data[i]); for (int i = 0; i < head->count(); i++) all_heads.insert(head_data[i]);
...@@ -54,7 +54,7 @@ void DenseConcatGradientOp<Context>::ElimateCorruption() { ...@@ -54,7 +54,7 @@ void DenseConcatGradientOp<Context>::ElimateCorruption() {
} }
int idx = safe_heads.front(); int idx = safe_heads.front();
safe_heads.pop(); safe_heads.pop();
Tensor* buffer = ws()->GetTensor("_t_mirror_stage_buffer_" + dragon_cast<string, int>(idx)); Tensor* buffer = ws()->GetTensor("/opt/mirror_stage/buffer_" + dragon_cast<string, int>(idx));
input(0).Move(buffer->memory()); input(0).Move(buffer->memory());
head_data[idx] = input(0).name(); head_data[idx] = input(0).name();
if (input(-2).template IsType<float>()) RestoreX1<float>(); if (input(-2).template IsType<float>()) RestoreX1<float>();
...@@ -91,7 +91,7 @@ void DenseConcatGradientOp<Context>::ElimateCorruption() { ...@@ -91,7 +91,7 @@ void DenseConcatGradientOp<Context>::ElimateCorruption() {
<< "\nadd WORKSPACE_MAX_CORRUPTED_SIZE for more powerful mirror stage ?"; << "\nadd WORKSPACE_MAX_CORRUPTED_SIZE for more powerful mirror stage ?";
int idx = safe_heads.front(); int idx = safe_heads.front();
safe_heads.pop(); safe_heads.pop();
Tensor* buffer = ws()->GetTensor("_t_mirror_stage_buffer_" + dragon_cast<string, int>(idx)); Tensor* buffer = ws()->GetTensor("/opt/mirror_stage/buffer_" + dragon_cast<string, int>(idx));
output(i)->Move(buffer->memory()); output(i)->Move(buffer->memory());
head_data[idx] = output(i)->name(); head_data[idx] = output(i)->name();
} }
......
...@@ -15,18 +15,18 @@ void LRNOp<Context>::AcrossRunWithType() { ...@@ -15,18 +15,18 @@ void LRNOp<Context>::AcrossRunWithType() {
template <class Context> template <typename T> template <class Context> template <typename T>
void LRNOp<Context>::SplitRunWithType() { void LRNOp<Context>::SplitRunWithType() {
sqr_in = ws()->CreateTensor("_t_" + anchor() + "_sqr_in"); sqr_in = ws()->CreateTensor("/mnt/" + anchor() + "/sqr_in");
sqr_in->ReshapeLike(input(0)); sqr_in->ReshapeLike(input(0));
sqr_in->Share(input(0)); sqr_in->Share(input(0));
prod_in = ws()->CreateTensor("_t_" + anchor() + "_prod_in"); prod_in = ws()->CreateTensor("/mnt/" + anchor() + "/prod_in");
prod_in->ReshapeLike(input(0)); prod_in->ReshapeLike(input(0));
prod_in->Share(input(0)); prod_in->Share(input(0));
} }
template <class Context> template <typename T> template <class Context> template <typename T>
void LRNOp<Context>::SquareRunWithType() { void LRNOp<Context>::SquareRunWithType() {
sqr_out = ws()->CreateTensor("_t_" + anchor() + "_sqr_out"); sqr_out = ws()->CreateTensor("/mnt/" + anchor() + "/sqr_out");
if (!sqr_op) { if (!sqr_op) {
Argument power; Argument power;
power.set_name("power"); power.set_f(2.0); power.set_name("power"); power.set_f(2.0);
...@@ -43,7 +43,7 @@ void LRNOp<Context>::SquareRunWithType() { ...@@ -43,7 +43,7 @@ void LRNOp<Context>::SquareRunWithType() {
template <class Context> template <typename T> template <class Context> template <typename T>
void LRNOp<Context>::PoolRunWithType() { void LRNOp<Context>::PoolRunWithType() {
pool_out = ws()->CreateTensor("_t_" + anchor() + "_pool_out"); pool_out = ws()->CreateTensor("/mnt/" + anchor() + "/pool_out");
if (!pool_op) { if (!pool_op) {
Argument ks, s, p, mode; Argument ks, s, p, mode;
ks.set_name("kernel_size"); ks.add_ints(local_size); ks.set_name("kernel_size"); ks.add_ints(local_size);
...@@ -63,7 +63,7 @@ void LRNOp<Context>::PoolRunWithType() { ...@@ -63,7 +63,7 @@ void LRNOp<Context>::PoolRunWithType() {
template <class Context> template <typename T> template <class Context> template <typename T>
void LRNOp<Context>::PowRunWithType() { void LRNOp<Context>::PowRunWithType() {
pow_out = ws()->CreateTensor("_t_" + anchor() + "_pow_out"); pow_out = ws()->CreateTensor("/mnt/" + anchor() + "/pow_out");
if (!pow_op) { if (!pow_op) {
Argument scale, shift, power; Argument scale, shift, power;
scale.set_name("scale"); scale.set_f(alpha); scale.set_name("scale"); scale.set_f(alpha);
...@@ -129,8 +129,8 @@ void LRNGradientOp<Context>::AcrossRunWithType() { ...@@ -129,8 +129,8 @@ void LRNGradientOp<Context>::AcrossRunWithType() {
template <class Context> template <typename T> template <class Context> template <typename T>
void LRNGradientOp<Context>::ProdRunWithType() { void LRNGradientOp<Context>::ProdRunWithType() {
prod_in = ws()->GetTensor("_t_" + anchor() + "_prod_in"); prod_in = ws()->GetTensor("/mnt/" + anchor() + "/prod_in");
pow_out = ws()->GetTensor("_t_" + anchor() + "_pow_out"); pow_out = ws()->GetTensor("/mnt/" + anchor() + "/pow_out");
if (!prod_op) { if (!prod_op) {
Argument operation; Argument operation;
operation.set_name("operation"); operation.set_s("PROD"); operation.set_name("operation"); operation.set_s("PROD");
...@@ -150,7 +150,7 @@ void LRNGradientOp<Context>::ProdRunWithType() { ...@@ -150,7 +150,7 @@ void LRNGradientOp<Context>::ProdRunWithType() {
template <class Context> template <typename T> template <class Context> template <typename T>
void LRNGradientOp<Context>::PowRunWithType() { void LRNGradientOp<Context>::PowRunWithType() {
pool_out = ws()->GetTensor("_t_" + anchor() + "_pool_out"); pool_out = ws()->GetTensor("/mnt/" + anchor() + "/pool_out");
if (!pow_op) { if (!pow_op) {
Argument scale, shift, power; Argument scale, shift, power;
scale.set_name("scale"); scale.set_f(alpha); scale.set_name("scale"); scale.set_f(alpha);
...@@ -171,7 +171,7 @@ void LRNGradientOp<Context>::PowRunWithType() { ...@@ -171,7 +171,7 @@ void LRNGradientOp<Context>::PowRunWithType() {
template <class Context> template <typename T> template <class Context> template <typename T>
void LRNGradientOp<Context>::PoolRunWithType() { void LRNGradientOp<Context>::PoolRunWithType() {
sqr_out = ws()->GetTensor("_t_" + anchor() + "_sqr_out"); sqr_out = ws()->GetTensor("/mnt/" + anchor() + "/sqr_out");
if (!pool_op) { if (!pool_op) {
Argument ks, s, p, mode; Argument ks, s, p, mode;
ks.set_name("kernel_size"); ks.add_ints(local_size); ks.set_name("kernel_size"); ks.add_ints(local_size);
...@@ -193,7 +193,7 @@ void LRNGradientOp<Context>::PoolRunWithType() { ...@@ -193,7 +193,7 @@ void LRNGradientOp<Context>::PoolRunWithType() {
template <class Context> template <typename T> template <class Context> template <typename T>
void LRNGradientOp<Context>::SquareRunWithType() { void LRNGradientOp<Context>::SquareRunWithType() {
sqr_in = ws()->GetTensor("_t_" + anchor() + "_sqr_in"); sqr_in = ws()->GetTensor("/mnt/" + anchor() + "/sqr_in");
if (!sqr_op) { if (!sqr_op) {
Argument power; Argument power;
power.set_name("power"); power.set_f(2.0); power.set_name("power"); power.set_f(2.0);
......
...@@ -7,7 +7,7 @@ namespace dragon { ...@@ -7,7 +7,7 @@ namespace dragon {
template <class Context> template <typename T> template <class Context> template <typename T>
void Pooling2dOp<Context>::MAXRunWithType() { void Pooling2dOp<Context>::MAXRunWithType() {
mask = ws()->CreateTensor("_t_" + anchor() + "_pool_mask"); mask = ws()->CreateTensor("/mnt/" + anchor() + "/max_pool_mask");
mask->ReshapeLike(*output(0)); mask->ReshapeLike(*output(0));
auto* Xdata = input(0).template data<T, Context>(); auto* Xdata = input(0).template data<T, Context>();
...@@ -122,7 +122,7 @@ OPERATOR_SCHEMA(Pooling2d).NumInputs(1).NumOutputs(1); ...@@ -122,7 +122,7 @@ OPERATOR_SCHEMA(Pooling2d).NumInputs(1).NumOutputs(1);
template <class Context> template <typename T> template <class Context> template <typename T>
void Pooling2dGradientOp<Context>::MAXRunWithType() { void Pooling2dGradientOp<Context>::MAXRunWithType() {
mask = ws()->GetTensor("_t_" + anchor() + "_pool_mask"); mask = ws()->GetTensor("/mnt/" + anchor() + "/max_pool_mask");
auto* dYdata = input(-1).template data<T, Context>(); auto* dYdata = input(-1).template data<T, Context>();
auto* dXdata = output(0)->template mutable_data<T, Context>(); auto* dXdata = output(0)->template mutable_data<T, Context>();
......
...@@ -17,10 +17,10 @@ void ROIAlignOp<Context>::RunWithType() { ...@@ -17,10 +17,10 @@ void ROIAlignOp<Context>::RunWithType() {
template <class Context> template <class Context>
void ROIAlignOp<Context>::RunOnDevice() { void ROIAlignOp<Context>::RunOnDevice() {
mask = ws()->CreateTensor("/mnt/" + anchor() + "/roi_align_mask");
vector<TIndex> dims({input(1).dim(0), input(0).dim(1), pool_h, pool_w}); vector<TIndex> dims({input(1).dim(0), input(0).dim(1), pool_h, pool_w});
output(0)->Reshape(dims);
mask = ws()->CreateTensor("_t_" + anchor() + "_roi_align_mask"); output(0)->Reshape(dims);
mask->Reshape(dims); mask->Reshape(dims);
if (input(0).template IsType<float>()) return RunWithType<float>(); if (input(0).template IsType<float>()) return RunWithType<float>();
...@@ -45,20 +45,14 @@ void ROIAlignGradientOp<Context>::RunWithType() { ...@@ -45,20 +45,14 @@ void ROIAlignGradientOp<Context>::RunWithType() {
template <class Context> template <class Context>
void ROIAlignGradientOp<Context>::RunOnDevice() { void ROIAlignGradientOp<Context>::RunOnDevice() {
output(0)->ReshapeLike(input(0)); mask = ws()->GetTensor("/mnt/" + anchor() + "/roi_align_mask");
mask = ws()->GetTensor("_t_" + anchor() + "_roi_align_mask"); output(0)->ReshapeLike(input(0));
if (input(0).template IsType<float>()) return RunWithType<float>(); if (input(0).template IsType<float>()) return RunWithType<float>();
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
} }
template <class Context>
void ROIAlignGradientOp<Context>::CleanResource() {
Operator<Context>::CleanResource();
ws()->ReleaseBuffer(mask, "Common", true);
}
DEPLOY_CPU(ROIAlignGradient); DEPLOY_CPU(ROIAlignGradient);
#ifdef WITH_CUDA #ifdef WITH_CUDA
DEPLOY_CUDA(ROIAlignGradient); DEPLOY_CUDA(ROIAlignGradient);
......
...@@ -17,10 +17,10 @@ void ROIPoolingOp<Context>::RunWithType() { ...@@ -17,10 +17,10 @@ void ROIPoolingOp<Context>::RunWithType() {
template <class Context> template <class Context>
void ROIPoolingOp<Context>::RunOnDevice() { void ROIPoolingOp<Context>::RunOnDevice() {
mask = ws()->CreateTensor("/mnt/" + anchor() + "/roi_pool_mask");
vector<TIndex> dims({input(1).dim(0), input(0).dim(1), pool_h, pool_w}); vector<TIndex> dims({input(1).dim(0), input(0).dim(1), pool_h, pool_w});
output(0)->Reshape(dims); output(0)->Reshape(dims);
mask = ws()->CreateTensor("_t_" + anchor() + "_roi_pool_mask");
mask->Reshape(dims); mask->Reshape(dims);
if (input(0).template IsType<float>()) return RunWithType<float>(); if (input(0).template IsType<float>()) return RunWithType<float>();
...@@ -33,7 +33,6 @@ DEPLOY_CUDA(ROIPooling); ...@@ -33,7 +33,6 @@ DEPLOY_CUDA(ROIPooling);
#endif #endif
OPERATOR_SCHEMA(ROIPooling).NumInputs(2).NumOutputs(1); OPERATOR_SCHEMA(ROIPooling).NumInputs(2).NumOutputs(1);
template <class Context> template <typename T> template <class Context> template <typename T>
void ROIPoolingGradientOp<Context>::RunWithType() { void ROIPoolingGradientOp<Context>::RunWithType() {
kernel::ROIPoolingGrad<T, Context>(spatial_scale, kernel::ROIPoolingGrad<T, Context>(spatial_scale,
...@@ -46,20 +45,14 @@ void ROIPoolingGradientOp<Context>::RunWithType() { ...@@ -46,20 +45,14 @@ void ROIPoolingGradientOp<Context>::RunWithType() {
template <class Context> template <class Context>
void ROIPoolingGradientOp<Context>::RunOnDevice() { void ROIPoolingGradientOp<Context>::RunOnDevice() {
output(0)->ReshapeLike(input(0)); mask = ws()->GetTensor("/mnt/" + anchor() + "/roi_pool_mask");
mask = ws()->GetTensor("_t_" + anchor() + "_roi_pool_mask"); output(0)->ReshapeLike(input(0));
if (input(0).template IsType<float>()) return RunWithType<float>(); if (input(0).template IsType<float>()) return RunWithType<float>();
else LOG(FATAL) << "Unsupported input types."; else LOG(FATAL) << "Unsupported input types.";
} }
template <class Context>
void ROIPoolingGradientOp<Context>::CleanResource() {
Operator<Context>::CleanResource();
ws()->ReleaseBuffer(mask, "Common", true);
}
DEPLOY_CPU(ROIPoolingGradient); DEPLOY_CPU(ROIPoolingGradient);
#ifdef WITH_CUDA #ifdef WITH_CUDA
DEPLOY_CUDA(ROIPoolingGradient); DEPLOY_CUDA(ROIPoolingGradient);
......
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!