Commit 44906e17 by Ting PAN

Fix bugs on the destructor of Operator & Graph

1 parent 6683676d
...@@ -82,4 +82,4 @@ static inline std::mt19937* rand_generator() { ...@@ -82,4 +82,4 @@ static inline std::mt19937* rand_generator() {
} // namepsace dragon } // namepsace dragon
#endif // DRAGON_CORE_CONTEXT_H_ #endif // DRAGON_CORE_CONTEXT_H_
\ No newline at end of file
...@@ -22,6 +22,7 @@ class GraphBase { ...@@ -22,6 +22,7 @@ class GraphBase {
}; };
GraphBase(const GraphDef& meta_graph, Workspace* ws); GraphBase(const GraphDef& meta_graph, Workspace* ws);
virtual ~GraphBase() {}
virtual bool Create(const GraphDef& optimized_graph, Workspace* ws) = 0; virtual bool Create(const GraphDef& optimized_graph, Workspace* ws) = 0;
virtual bool Run(const string& include, const string& exclude) = 0; virtual bool Run(const string& include, const string& exclude) = 0;
...@@ -37,6 +38,7 @@ class GraphBase { ...@@ -37,6 +38,7 @@ class GraphBase {
class Graph final : public GraphBase { class Graph final : public GraphBase {
public: public:
Graph(const GraphDef& meta_graph, Workspace* ws); Graph(const GraphDef& meta_graph, Workspace* ws);
~Graph() { for (auto* op : ops_) delete op; }
bool Create(const GraphDef& optimized_graph, Workspace* ws) override; bool Create(const GraphDef& optimized_graph, Workspace* ws) override;
bool Run(const string& include, const string& exclude) override; bool Run(const string& include, const string& exclude) override;
......
...@@ -25,6 +25,7 @@ class Workspace; ...@@ -25,6 +25,7 @@ class Workspace;
class OperatorBase { class OperatorBase {
public: public:
OperatorBase(const OperatorDef& op_def, Workspace* ws); OperatorBase(const OperatorDef& op_def, Workspace* ws);
virtual ~OperatorBase() {}
Tensor& input(int idx); Tensor& input(int idx);
Tensor* output(int idx); Tensor* output(int idx);
...@@ -141,7 +142,7 @@ DECLARE_REGISTRY(CUDNNOperatorRegistry, OperatorBase, const OperatorDef&, Worksp ...@@ -141,7 +142,7 @@ DECLARE_REGISTRY(CUDNNOperatorRegistry, OperatorBase, const OperatorDef&, Worksp
#define TENSOR_FILL(tensor, shape) \ #define TENSOR_FILL(tensor, shape) \
if (tensor.count() == 0) { \ if (tensor.count() == 0) { \
CHECK(ws()->GetFiller(tensor.name())) \ CHECK(ws()->GetFiller(tensor.name())) \
<< "Tensor(" << tensor.name() << ") is empty. \n" \ << "\nTensor(" << tensor.name() << ") is empty. \n" \
<< "may be specify a filler for it ?"; \ << "may be specify a filler for it ?"; \
tensor.Reshape(shape); \ tensor.Reshape(shape); \
unique_ptr< Filler<T, Context> > filler( \ unique_ptr< Filler<T, Context> > filler( \
......
...@@ -32,7 +32,7 @@ class Workspace { ...@@ -32,7 +32,7 @@ class Workspace {
~Workspace(); ~Workspace();
void Init() { void Init() {
CreateTensor("ignore"); CreateTensor("ignore");
CreateBuffer("Common", WORKSPACE_COMMON_BUFFER_SIZE); CreateBuffer("Common", WORKSPACE_COMMON_BUFFER_SIZE);
CreateBuffer("Grad", WORKSPACE_GRAD_BUFFER_SIZE); CreateBuffer("Grad", WORKSPACE_GRAD_BUFFER_SIZE);
} }
...@@ -54,6 +54,8 @@ class Workspace { ...@@ -54,6 +54,8 @@ class Workspace {
// clear the buffers // clear the buffers
ResetBuffer("Common", WORKSPACE_COMMON_BUFFER_SIZE); ResetBuffer("Common", WORKSPACE_COMMON_BUFFER_SIZE);
ResetBuffer("Grad", WORKSPACE_GRAD_BUFFER_SIZE); ResetBuffer("Grad", WORKSPACE_GRAD_BUFFER_SIZE);
// clear tenosrs
for (auto& kv : tensor_map_) kv.second->Reset();
} }
/******************** Tensor ********************/ /******************** Tensor ********************/
...@@ -80,7 +82,7 @@ class Workspace { ...@@ -80,7 +82,7 @@ class Workspace {
string query = GetTensorName(name); string query = GetTensorName(name);
if (!HasTensor(query)) if (!HasTensor(query))
tensor_map_[query] = unique_ptr<Tensor>(new Tensor(query)); tensor_map_[query] = unique_ptr<Tensor>(new Tensor(query));
return tensor_map_[query].get(); return GetTensor(query);
} }
Tensor* GetTensor(const string& name, bool use_remote=true) { Tensor* GetTensor(const string& name, bool use_remote=true) {
...@@ -137,16 +139,35 @@ class Workspace { ...@@ -137,16 +139,35 @@ class Workspace {
/******************** Filler ********************/ /******************** Filler ********************/
bool HasFiller(const string& name, bool use_remote=true) {
// search local workspace
bool result = filler_map_.count(name) > 0;
if (!use_remote) return result;
// search remote workspace
for (auto& it : workspace_map_)
result |= it.second->HasFiller(name);
return result;
}
inline void CreateFiller(const TensorFiller filler) { inline void CreateFiller(const TensorFiller filler) {
CHECK_GT(filler.tensor().size(), 0) CHECK_GT(filler.tensor().size(), 0)
<< "Tensor without a valid name can not be filled."; << "Tensor without a valid name can not be filled.";
if (filler_map_.count(filler.tensor())) return; if (HasFiller(filler.tensor())) return;
filler_map_[filler.tensor()] = filler; filler_map_[filler.tensor()] = filler;
} }
inline const TensorFiller* GetFiller(const string& name) { inline const TensorFiller* GetFiller(const string& name) {
if (filler_map_.count(name) > 0) return &filler_map_[name]; // search local workspace
else return nullptr; if (filler_map_.count(name) > 0)
return &filler_map_[name];
// search remote workspace
for (auto& it : workspace_map_) {
if (it.second->HasFiller(name))
return it.second->GetFiller(name);
}
return nullptr;
} }
/******************** Avatar ********************/ /******************** Avatar ********************/
...@@ -159,8 +180,8 @@ class Workspace { ...@@ -159,8 +180,8 @@ class Workspace {
} }
inline Tensor* SearchAvatar(Tensor* orig) { inline Tensor* SearchAvatar(Tensor* orig) {
if (avatar_map_.count(orig->name()) > 0) if (avatar_map_.count(orig->name()) > 0)
return tensor_map_[avatar_map_[orig->name()]].get(); return GetTensor(avatar_map_[orig->name()]);
return orig; return orig;
} }
......
...@@ -56,6 +56,13 @@ public: ...@@ -56,6 +56,13 @@ public:
CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc, CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc,
CUDNN_ACTIVATION_ELU, CUDNN_PROPAGATE_NAN, this->alpha)); CUDNN_ACTIVATION_ELU, CUDNN_PROPAGATE_NAN, this->alpha));
} }
~CuDNNEluOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyActivationDescriptor(act_desc));
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
...@@ -75,6 +82,13 @@ class CuDNNEluGradientOp final : public EluGradientOp<Context> { ...@@ -75,6 +82,13 @@ class CuDNNEluGradientOp final : public EluGradientOp<Context> {
CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc, CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc,
CUDNN_ACTIVATION_ELU, CUDNN_PROPAGATE_NAN, this->alpha)); CUDNN_ACTIVATION_ELU, CUDNN_PROPAGATE_NAN, this->alpha));
} }
~CuDNNEluGradientOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyActivationDescriptor(act_desc));
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
......
...@@ -54,6 +54,13 @@ public: ...@@ -54,6 +54,13 @@ public:
CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc, CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc,
CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0)); CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0));
} }
~CuDNNReluOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyActivationDescriptor(act_desc));
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
...@@ -73,6 +80,13 @@ class CuDNNReluGradientOp final : public ReluGradientOp<Context> { ...@@ -73,6 +80,13 @@ class CuDNNReluGradientOp final : public ReluGradientOp<Context> {
CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc, CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc,
CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0)); CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0));
} }
~CuDNNReluGradientOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyActivationDescriptor(act_desc));
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
......
...@@ -45,6 +45,13 @@ public: ...@@ -45,6 +45,13 @@ public:
CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc, CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc,
CUDNN_ACTIVATION_SIGMOID, CUDNN_PROPAGATE_NAN, 0)); CUDNN_ACTIVATION_SIGMOID, CUDNN_PROPAGATE_NAN, 0));
} }
~CuDNNSigmoidOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyActivationDescriptor(act_desc));
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
...@@ -64,6 +71,13 @@ class CuDNNSigmoidGradientOp final : public SigmoidGradientOp<Context> { ...@@ -64,6 +71,13 @@ class CuDNNSigmoidGradientOp final : public SigmoidGradientOp<Context> {
CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc, CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc,
CUDNN_ACTIVATION_SIGMOID, CUDNN_PROPAGATE_NAN, 0)); CUDNN_ACTIVATION_SIGMOID, CUDNN_PROPAGATE_NAN, 0));
} }
~CuDNNSigmoidGradientOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyActivationDescriptor(act_desc));
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
......
...@@ -58,6 +58,12 @@ class CuDNNSoftmaxOp final : public Operator<Context> { ...@@ -58,6 +58,12 @@ class CuDNNSoftmaxOp final : public Operator<Context> {
CUDNN_CHECK(cudnnCreateTensorDescriptor(&input_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&input_desc));
CUDNN_CHECK(cudnnCreateTensorDescriptor(&output_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&output_desc));
} }
~CuDNNSoftmaxOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
...@@ -76,6 +82,12 @@ class CuDNNSoftmaxGradientOp final : public Operator<Context> { ...@@ -76,6 +82,12 @@ class CuDNNSoftmaxGradientOp final : public Operator<Context> {
CUDNN_CHECK(cudnnCreateTensorDescriptor(&input_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&input_desc));
CUDNN_CHECK(cudnnCreateTensorDescriptor(&output_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&output_desc));
} }
~CuDNNSoftmaxGradientOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
......
...@@ -45,6 +45,13 @@ public: ...@@ -45,6 +45,13 @@ public:
CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc, CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc,
CUDNN_ACTIVATION_TANH, CUDNN_PROPAGATE_NAN, 0)); CUDNN_ACTIVATION_TANH, CUDNN_PROPAGATE_NAN, 0));
} }
~CuDNNTanhOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyActivationDescriptor(act_desc));
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
...@@ -64,6 +71,13 @@ class CuDNNTanhGradientOp final : public TanhGradientOp<Context> { ...@@ -64,6 +71,13 @@ class CuDNNTanhGradientOp final : public TanhGradientOp<Context> {
CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc, CUDNN_CHECK(cudnnSetActivationDescriptor(act_desc,
CUDNN_ACTIVATION_TANH, CUDNN_PROPAGATE_NAN, 0)); CUDNN_ACTIVATION_TANH, CUDNN_PROPAGATE_NAN, 0));
} }
~CuDNNTanhGradientOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyActivationDescriptor(act_desc));
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
......
...@@ -143,6 +143,12 @@ class CuDNNBatchNormOp final : public FusedBatchNormOp<Context> { ...@@ -143,6 +143,12 @@ class CuDNNBatchNormOp final : public FusedBatchNormOp<Context> {
this->eps = std::max(this->eps, float(CUDNN_BN_MIN_EPSILON)); this->eps = std::max(this->eps, float(CUDNN_BN_MIN_EPSILON));
} }
~CuDNNBatchNormOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(bn_desc));
}
void Setup(); void Setup();
void RunOnDevice() override; void RunOnDevice() override;
...@@ -167,6 +173,12 @@ class CuDNNBatchNormGradientOp final : public FusedBatchNormGradientOp<Context> ...@@ -167,6 +173,12 @@ class CuDNNBatchNormGradientOp final : public FusedBatchNormGradientOp<Context>
this->eps = std::max(this->eps, float(CUDNN_BN_MIN_EPSILON)); this->eps = std::max(this->eps, float(CUDNN_BN_MIN_EPSILON));
} }
~CuDNNBatchNormGradientOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(bn_desc));
}
void Setup(); void Setup();
void RunOnDevice() override; void RunOnDevice() override;
......
...@@ -71,6 +71,18 @@ class CuDNNConv2dOp : public Conv2dOp<Context> { ...@@ -71,6 +71,18 @@ class CuDNNConv2dOp : public Conv2dOp<Context> {
else LOG(FATAL) << "Unknown data format: " << this->data_format; else LOG(FATAL) << "Unknown data format: " << this->data_format;
} }
~CuDNNConv2dOp() {
CUDNN_CHECK(cudnnDestroyFilterDescriptor(filter_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyConvolutionDescriptor(conv_desc));
if (HasBias()) CUDNN_CHECK(cudnnDestroyTensorDescriptor(bias_desc));
for (int g = 0; g < cudnn_group; g++) {
cudaStreamDestroy(stream[g]);
CUDNN_CHECK(cudnnDestroy(handle[g]));
}
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
...@@ -113,6 +125,18 @@ class CuDNNConv2dGradientOp : public Conv2dGradientOp<Context> { ...@@ -113,6 +125,18 @@ class CuDNNConv2dGradientOp : public Conv2dGradientOp<Context> {
else LOG(FATAL) << "Unknown data format: " << this->data_format; else LOG(FATAL) << "Unknown data format: " << this->data_format;
} }
~CuDNNConv2dGradientOp() {
CUDNN_CHECK(cudnnDestroyFilterDescriptor(filter_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyConvolutionDescriptor(conv_desc));
if (HasBias()) CUDNN_CHECK(cudnnDestroyTensorDescriptor(bias_desc));
for (int g = 0; g < cudnn_group * 3; g++) {
cudaStreamDestroy(stream[g]);
CUDNN_CHECK(cudnnDestroy(handle[g]));
}
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
......
...@@ -73,6 +73,19 @@ class CuDNNConv2dTransposeOp : public Conv2dTransposeOp<Context> { ...@@ -73,6 +73,19 @@ class CuDNNConv2dTransposeOp : public Conv2dTransposeOp<Context> {
else if (this->data_format == "NHWC") format = CUDNN_TENSOR_NHWC; else if (this->data_format == "NHWC") format = CUDNN_TENSOR_NHWC;
else LOG(FATAL) << "Unknown data format: " << this->data_format; else LOG(FATAL) << "Unknown data format: " << this->data_format;
} }
~CuDNNConv2dTransposeOp() {
CUDNN_CHECK(cudnnDestroyFilterDescriptor(filter_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyConvolutionDescriptor(conv_desc));
if (HasBias()) CUDNN_CHECK(cudnnDestroyTensorDescriptor(bias_desc));
for (int g = 0; g < cudnn_group; g++) {
cudaStreamDestroy(stream[g]);
CUDNN_CHECK(cudnnDestroy(handle[g]));
}
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
...@@ -114,6 +127,19 @@ public: ...@@ -114,6 +127,19 @@ public:
else if (this->data_format == "NHWC") format = CUDNN_TENSOR_NHWC; else if (this->data_format == "NHWC") format = CUDNN_TENSOR_NHWC;
else LOG(FATAL) << "Unknown data format: " << this->data_format; else LOG(FATAL) << "Unknown data format: " << this->data_format;
} }
~CuDNNConv2dTransposeGradientOp() {
CUDNN_CHECK(cudnnDestroyFilterDescriptor(filter_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyConvolutionDescriptor(conv_desc));
if (HasBias()) CUDNN_CHECK(cudnnDestroyTensorDescriptor(bias_desc));
for (int g = 0; g < cudnn_group * 3; g++) {
cudaStreamDestroy(stream[g]);
CUDNN_CHECK(cudnnDestroy(handle[g]));
}
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
......
...@@ -91,6 +91,12 @@ class CuDNNLRNOp : public LRNOp<Context> { ...@@ -91,6 +91,12 @@ class CuDNNLRNOp : public LRNOp<Context> {
this->k)); this->k));
} }
~CuDNNLRNOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyLRNDescriptor(norm_desc));
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
...@@ -113,6 +119,12 @@ class CuDNNLRNGradientOp : public LRNGradientOp<Context > { ...@@ -113,6 +119,12 @@ class CuDNNLRNGradientOp : public LRNGradientOp<Context > {
this->k)); this->k));
} }
~CuDNNLRNGradientOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyLRNDescriptor(norm_desc));
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
......
...@@ -108,6 +108,12 @@ class CuDNNPooling2dOp final : public Pooling2dOp<Context> { ...@@ -108,6 +108,12 @@ class CuDNNPooling2dOp final : public Pooling2dOp<Context> {
} else LOG(FATAL) << "Unsupported pooling mode: " << this->mode; } else LOG(FATAL) << "Unsupported pooling mode: " << this->mode;
} }
~CuDNNPooling2dOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyPoolingDescriptor(pool_desc));
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
...@@ -151,6 +157,12 @@ class CuDNNPooling2dGradientOp final : public Pooling2dGradientOp<Context> { ...@@ -151,6 +157,12 @@ class CuDNNPooling2dGradientOp final : public Pooling2dGradientOp<Context> {
#endif #endif
} }
~CuDNNPooling2dGradientOp() {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(input_desc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(output_desc));
CUDNN_CHECK(cudnnDestroyPoolingDescriptor(pool_desc));
}
void RunOnDevice() override; void RunOnDevice() override;
template <typename T> void RunWithType(); template <typename T> void RunWithType();
......
...@@ -13,15 +13,17 @@ ...@@ -13,15 +13,17 @@
namespace dragon { namespace dragon {
std::unordered_map<std::string, std::shared_ptr < Workspace > > g_workspaces; Map<string, unique_ptr < Workspace > > g_workspaces;
Map<string, vector<string> > sub_workspaces;
std::mutex g_mutex; std::mutex g_mutex;
Workspace* CreateWorkspace(const std::string& name){ Workspace* CreateWorkspace(const std::string& name){
std::unique_lock<std::mutex> lock(g_mutex); std::unique_lock<std::mutex> lock(g_mutex);
LOG(INFO) << "Create the Workspace(" << name << ")."; LOG(INFO) << "Create the Workspace(" << name << ").";
if (g_workspaces.count(name)) return g_workspaces[name].get(); if (g_workspaces.count(name)) return g_workspaces[name].get();
std::shared_ptr<Workspace> new_workspace(new Workspace(name)); unique_ptr<Workspace> new_workspace(new Workspace(name));
g_workspaces[name] = new_workspace; g_workspaces[name] = std::move(new_workspace);
sub_workspaces[name] = vector<string>();
return new_workspace.get(); return new_workspace.get();
} }
...@@ -31,6 +33,10 @@ Workspace* ResetWorkspace(const std::string& name) { ...@@ -31,6 +33,10 @@ Workspace* ResetWorkspace(const std::string& name) {
<< "\nWorkspace(" << name << ") does not exist, can not be reset."; << "\nWorkspace(" << name << ") does not exist, can not be reset.";
LOG(INFO) << "Reset the Workspace(" << name << ")."; LOG(INFO) << "Reset the Workspace(" << name << ").";
g_workspaces[name].reset(new Workspace(name)); g_workspaces[name].reset(new Workspace(name));
for (auto& sub_workspace : sub_workspaces[name]) {
if (g_workspaces.count(sub_workspace) > 0)
g_workspaces[name]->MoveWorkspace(g_workspaces[sub_workspace].get());
}
return g_workspaces[name].get(); return g_workspaces[name].get();
} }
...@@ -43,13 +49,14 @@ void ReleaseWorkspace(const std::string& name) { ...@@ -43,13 +49,14 @@ void ReleaseWorkspace(const std::string& name) {
g_workspaces.erase(name); g_workspaces.erase(name);
} }
void MoveWorkspace(Workspace* main, Workspace* sub) { void MoveWorkspace(Workspace* target_ws, Workspace* source_ws) {
std::unique_lock<std::mutex> lock(g_mutex); std::unique_lock<std::mutex> lock(g_mutex);
CHECK(main) << "\nThe given main workspace is invalid."; CHECK(source_ws) << "\nThe given source workspace is invalid.";
CHECK(sub) << "\nThe given sub workspace is invalid."; CHECK(target_ws) << "\nThe given target workspace is invalid.";
LOG(INFO) << "Move the Workspace(" << sub->name() << ") " target_ws->MoveWorkspace(source_ws);
<< "into the Workspace(" << main->name() << ")."; sub_workspaces[target_ws->name()].push_back(string(source_ws->name()));
main->MoveWorkspace(sub); LOG(INFO) << "Move the Workspace(" << source_ws->name() << ") "
<< "into the Workspace(" << target_ws->name() << ").";
} }
std::string CreateGraph(const std::string& graph_file, Workspace* ws) { std::string CreateGraph(const std::string& graph_file, Workspace* ws) {
......
...@@ -8,6 +8,7 @@ DEFINE_TYPED_REGISTRY(TensorFetcherRegistry, TypeId, TensorFetcherBase); ...@@ -8,6 +8,7 @@ DEFINE_TYPED_REGISTRY(TensorFetcherRegistry, TypeId, TensorFetcherBase);
DEFINE_TYPED_REGISTRY(TensorFeederRegistry, TypeId, TensorFeederBase); DEFINE_TYPED_REGISTRY(TensorFeederRegistry, TypeId, TensorFeederBase);
Map<string, unique_ptr < Workspace > > g_workspaces; Map<string, unique_ptr < Workspace > > g_workspaces;
Map<string, vector<string> > sub_workspaces;
Workspace* g_workspace; Workspace* g_workspace;
string g_current_workspace; string g_current_workspace;
...@@ -124,6 +125,7 @@ bool SwitchWorkspaceInternal(const string& name, const bool create_if_missing) { ...@@ -124,6 +125,7 @@ bool SwitchWorkspaceInternal(const string& name, const bool create_if_missing) {
unique_ptr<Workspace> new_workspace(new Workspace(name)); unique_ptr<Workspace> new_workspace(new Workspace(name));
g_workspace = new_workspace.get(); g_workspace = new_workspace.get();
g_workspaces[name] = std::move(new_workspace); g_workspaces[name] = std::move(new_workspace);
sub_workspaces[name] = vector<string>();
g_current_workspace = name; g_current_workspace = name;
return true; return true;
} else { } else {
...@@ -148,6 +150,23 @@ PyObject* SwitchWorkspaceCC(PyObject* self, PyObject *args) { ...@@ -148,6 +150,23 @@ PyObject* SwitchWorkspaceCC(PyObject* self, PyObject *args) {
Py_RETURN_TRUE; Py_RETURN_TRUE;
} }
PyObject* MoveWorkspaceCC(PyObject* self, PyObject *args) {
char* target_ws, *src_ws;
if (!PyArg_ParseTuple(args, "ss", &target_ws, &src_ws)) {
PyErr_SetString(PyExc_ValueError, "You should provide target and src workspace respectively.");
return nullptr;
}
CHECK(g_workspaces.count(src_ws))
<< "\nThe source Workspace(" << src_ws << ") does not exist.";
CHECK(g_workspaces.count(target_ws))
<< "\nThe target Workspace(" << target_ws << ") does not exist.";
g_workspaces[target_ws]->MoveWorkspace(g_workspaces[src_ws].get());
sub_workspaces[target_ws].push_back(string(src_ws));
LOG(INFO) << "Move the Workspace(" << src_ws << ") into the "
<< "Workspace(" << target_ws << ").";
Py_RETURN_TRUE;
}
PyObject* CurrentWorkspaceCC(PyObject* self, PyObject* args) { PyObject* CurrentWorkspaceCC(PyObject* self, PyObject* args) {
return StdStringToPyUnicode(g_current_workspace); return StdStringToPyUnicode(g_current_workspace);
} }
...@@ -173,6 +192,10 @@ PyObject* ResetWorkspaceCC(PyObject* self, PyObject* args) { ...@@ -173,6 +192,10 @@ PyObject* ResetWorkspaceCC(PyObject* self, PyObject* args) {
LOG(INFO) << "Reset the Workspace(" << target_workspace << ")"; LOG(INFO) << "Reset the Workspace(" << target_workspace << ")";
g_workspaces[target_workspace].reset(new Workspace(target_workspace)); g_workspaces[target_workspace].reset(new Workspace(target_workspace));
g_workspace = g_workspaces[target_workspace].get(); g_workspace = g_workspaces[target_workspace].get();
for (auto& sub_workspace : sub_workspaces[target_workspace]) {
if (g_workspaces.count(sub_workspace) > 0)
g_workspace->MoveWorkspace(g_workspaces[sub_workspace].get());
}
Py_RETURN_TRUE; Py_RETURN_TRUE;
} }
...@@ -387,6 +410,7 @@ PyMethodDef* GetAllMethods() { ...@@ -387,6 +410,7 @@ PyMethodDef* GetAllMethods() {
PYFUNC(NoGradientOperatorsCC), PYFUNC(NoGradientOperatorsCC),
PYFUNC(CreateGradientDefsCC), PYFUNC(CreateGradientDefsCC),
PYFUNC(SwitchWorkspaceCC), PYFUNC(SwitchWorkspaceCC),
PYFUNC(MoveWorkspaceCC),
PYFUNC(CurrentWorkspaceCC), PYFUNC(CurrentWorkspaceCC),
PYFUNC(WorkspacesCC), PYFUNC(WorkspacesCC),
PYFUNC(ResetWorkspaceCC), PYFUNC(ResetWorkspaceCC),
......
...@@ -27,6 +27,7 @@ CURRENT_GRAPH_IDX = 0 ...@@ -27,6 +27,7 @@ CURRENT_GRAPH_IDX = 0
__all__ = [ __all__ = [
'SwitchWorkspace', 'SwitchWorkspace',
'MoveWorkspace',
'ResetWorkspace', 'ResetWorkspace',
'ClearWorkspace', 'ClearWorkspace',
'CreateGraph', 'CreateGraph',
...@@ -86,6 +87,30 @@ def SwitchWorkspace(workspace_name, create_if_missing=True): ...@@ -86,6 +87,30 @@ def SwitchWorkspace(workspace_name, create_if_missing=True):
SwitchWorkspaceCC(workspace_name, create_if_missing) SwitchWorkspaceCC(workspace_name, create_if_missing)
def MoveWorkspace(target_ws, source_ws):
"""Move the source workspace into the target workspace.
Parameters
----------
target_ws : str
The name of the target workspace.
source_ws : str
The name of the source workspace.
Returns
-------
None
References
----------
The wrapper of ``MoveWorkspaceCC``.
"""
if target_ws == '' or source_ws == '':
raise ValueError('The target or source name can not be empty.')
MoveWorkspaceCC(target_ws, source_ws)
def ResetWorkspace(workspace_name=''): def ResetWorkspace(workspace_name=''):
"""Reset the specific workspace. """Reset the specific workspace.
......
...@@ -36,7 +36,7 @@ find_packages('dragon') ...@@ -36,7 +36,7 @@ find_packages('dragon')
find_modules() find_modules()
setup(name = 'dragon', setup(name = 'dragon',
version='0.2.1.13', version='0.2.1.14',
description = 'Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework', description = 'Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework',
url='https://github.com/neopenx/Dragon', url='https://github.com/neopenx/Dragon',
author='Ting Pan', author='Ting Pan',
......
...@@ -29,5 +29,5 @@ template<> void CPUContext::Memcpy<CUDAContext, CPUContext>( ...@@ -29,5 +29,5 @@ template<> void CPUContext::Memcpy<CUDAContext, CPUContext>(
LOG(FATAL) << "CUDA was not compiled."; LOG(FATAL) << "CUDA was not compiled.";
#endif #endif
} }
} // namespace dragon } // namespace dragon
\ No newline at end of file
...@@ -422,4 +422,4 @@ GraphBase* NewGraph(const GraphDef& meta_graph, Workspace* ws) { ...@@ -422,4 +422,4 @@ GraphBase* NewGraph(const GraphDef& meta_graph, Workspace* ws) {
return GraphRegistry()->Create(meta_graph.graph_type(), meta_graph, ws); return GraphRegistry()->Create(meta_graph.graph_type(), meta_graph, ws);
} }
} // namespace dragon } // namespace dragon
\ No newline at end of file
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!