Skip to content
Toggle navigation
P
Projects
G
Groups
S
Snippets
Help
SeetaResearch
/
Dragon
This project
Loading...
Sign in
Toggle navigation
Go to a project
Project
Repository
Issues
0
Merge Requests
0
Pipelines
Wiki
Snippets
Settings
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Commit 44906e17
authored
Mar 27, 2018
by
Ting PAN
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Fix bugs on the destructor of Operator & Graph
1 parent
6683676d
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
250 additions
and
16 deletions
Dragon/include/core/context.h
Dragon/include/core/graph.h
Dragon/include/core/operator.h
Dragon/include/core/workspace.h
Dragon/include/operators/activation/elu_op.h
Dragon/include/operators/activation/relu_op.h
Dragon/include/operators/activation/sigmoid_op.h
Dragon/include/operators/activation/softmax_op.h
Dragon/include/operators/activation/tanh_op.h
Dragon/include/operators/norm/batch_norm_op.h
Dragon/include/operators/vision/conv_op.h
Dragon/include/operators/vision/conv_transpose_op.h
Dragon/include/operators/vision/lrn_op.h
Dragon/include/operators/vision/pooling_op.h
Dragon/modules/cc/dragon.cc
Dragon/modules/python/dragon.cc
Dragon/python/dragon/core/workspace.py
Dragon/python/setup.py
Dragon/src/core/context.cc
Dragon/src/core/graph.cc
Dragon/include/core/context.h
View file @
44906e1
Dragon/include/core/graph.h
View file @
44906e1
...
...
@@ -22,6 +22,7 @@ class GraphBase {
};
GraphBase
(
const
GraphDef
&
meta_graph
,
Workspace
*
ws
);
virtual
~
GraphBase
()
{}
virtual
bool
Create
(
const
GraphDef
&
optimized_graph
,
Workspace
*
ws
)
=
0
;
virtual
bool
Run
(
const
string
&
include
,
const
string
&
exclude
)
=
0
;
...
...
@@ -37,6 +38,7 @@ class GraphBase {
class
Graph
final
:
public
GraphBase
{
public
:
Graph
(
const
GraphDef
&
meta_graph
,
Workspace
*
ws
);
~
Graph
()
{
for
(
auto
*
op
:
ops_
)
delete
op
;
}
bool
Create
(
const
GraphDef
&
optimized_graph
,
Workspace
*
ws
)
override
;
bool
Run
(
const
string
&
include
,
const
string
&
exclude
)
override
;
...
...
Dragon/include/core/operator.h
View file @
44906e1
...
...
@@ -25,6 +25,7 @@ class Workspace;
class
OperatorBase
{
public
:
OperatorBase
(
const
OperatorDef
&
op_def
,
Workspace
*
ws
);
virtual
~
OperatorBase
()
{}
Tensor
&
input
(
int
idx
);
Tensor
*
output
(
int
idx
);
...
...
@@ -141,7 +142,7 @@ DECLARE_REGISTRY(CUDNNOperatorRegistry, OperatorBase, const OperatorDef&, Worksp
#define TENSOR_FILL(tensor, shape) \
if (tensor.count() == 0) { \
CHECK(ws()->GetFiller(tensor.name())) \
<< "Tensor(" << tensor.name() << ") is empty. \n" \
<< "
\n
Tensor(" << tensor.name() << ") is empty. \n" \
<< "may be specify a filler for it ?"; \
tensor.Reshape(shape); \
unique_ptr< Filler<T, Context> > filler( \
...
...
Dragon/include/core/workspace.h
View file @
44906e1
...
...
@@ -54,6 +54,8 @@ class Workspace {
// clear the buffers
ResetBuffer
(
"Common"
,
WORKSPACE_COMMON_BUFFER_SIZE
);
ResetBuffer
(
"Grad"
,
WORKSPACE_GRAD_BUFFER_SIZE
);
// clear tenosrs
for
(
auto
&
kv
:
tensor_map_
)
kv
.
second
->
Reset
();
}
/******************** Tensor ********************/
...
...
@@ -80,7 +82,7 @@ class Workspace {
string
query
=
GetTensorName
(
name
);
if
(
!
HasTensor
(
query
))
tensor_map_
[
query
]
=
unique_ptr
<
Tensor
>
(
new
Tensor
(
query
));
return
tensor_map_
[
query
].
get
(
);
return
GetTensor
(
query
);
}
Tensor
*
GetTensor
(
const
string
&
name
,
bool
use_remote
=
true
)
{
...
...
@@ -137,16 +139,35 @@ class Workspace {
/******************** Filler ********************/
bool
HasFiller
(
const
string
&
name
,
bool
use_remote
=
true
)
{
// search local workspace
bool
result
=
filler_map_
.
count
(
name
)
>
0
;
if
(
!
use_remote
)
return
result
;
// search remote workspace
for
(
auto
&
it
:
workspace_map_
)
result
|=
it
.
second
->
HasFiller
(
name
);
return
result
;
}
inline
void
CreateFiller
(
const
TensorFiller
filler
)
{
CHECK_GT
(
filler
.
tensor
().
size
(),
0
)
<<
"Tensor without a valid name can not be filled."
;
if
(
filler_map_
.
count
(
filler
.
tensor
()))
return
;
if
(
HasFiller
(
filler
.
tensor
()))
return
;
filler_map_
[
filler
.
tensor
()]
=
filler
;
}
inline
const
TensorFiller
*
GetFiller
(
const
string
&
name
)
{
if
(
filler_map_
.
count
(
name
)
>
0
)
return
&
filler_map_
[
name
];
else
return
nullptr
;
// search local workspace
if
(
filler_map_
.
count
(
name
)
>
0
)
return
&
filler_map_
[
name
];
// search remote workspace
for
(
auto
&
it
:
workspace_map_
)
{
if
(
it
.
second
->
HasFiller
(
name
))
return
it
.
second
->
GetFiller
(
name
);
}
return
nullptr
;
}
/******************** Avatar ********************/
...
...
@@ -160,7 +181,7 @@ class Workspace {
inline
Tensor
*
SearchAvatar
(
Tensor
*
orig
)
{
if
(
avatar_map_
.
count
(
orig
->
name
())
>
0
)
return
tensor_map_
[
avatar_map_
[
orig
->
name
()]].
get
(
);
return
GetTensor
(
avatar_map_
[
orig
->
name
()]
);
return
orig
;
}
...
...
Dragon/include/operators/activation/elu_op.h
View file @
44906e1
...
...
@@ -56,6 +56,13 @@ public:
CUDNN_CHECK
(
cudnnSetActivationDescriptor
(
act_desc
,
CUDNN_ACTIVATION_ELU
,
CUDNN_PROPAGATE_NAN
,
this
->
alpha
));
}
~
CuDNNEluOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyActivationDescriptor
(
act_desc
));
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
@@ -75,6 +82,13 @@ class CuDNNEluGradientOp final : public EluGradientOp<Context> {
CUDNN_CHECK
(
cudnnSetActivationDescriptor
(
act_desc
,
CUDNN_ACTIVATION_ELU
,
CUDNN_PROPAGATE_NAN
,
this
->
alpha
));
}
~
CuDNNEluGradientOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyActivationDescriptor
(
act_desc
));
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
Dragon/include/operators/activation/relu_op.h
View file @
44906e1
...
...
@@ -54,6 +54,13 @@ public:
CUDNN_CHECK
(
cudnnSetActivationDescriptor
(
act_desc
,
CUDNN_ACTIVATION_RELU
,
CUDNN_PROPAGATE_NAN
,
0
));
}
~
CuDNNReluOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyActivationDescriptor
(
act_desc
));
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
@@ -73,6 +80,13 @@ class CuDNNReluGradientOp final : public ReluGradientOp<Context> {
CUDNN_CHECK
(
cudnnSetActivationDescriptor
(
act_desc
,
CUDNN_ACTIVATION_RELU
,
CUDNN_PROPAGATE_NAN
,
0
));
}
~
CuDNNReluGradientOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyActivationDescriptor
(
act_desc
));
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
Dragon/include/operators/activation/sigmoid_op.h
View file @
44906e1
...
...
@@ -45,6 +45,13 @@ public:
CUDNN_CHECK
(
cudnnSetActivationDescriptor
(
act_desc
,
CUDNN_ACTIVATION_SIGMOID
,
CUDNN_PROPAGATE_NAN
,
0
));
}
~
CuDNNSigmoidOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyActivationDescriptor
(
act_desc
));
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
@@ -64,6 +71,13 @@ class CuDNNSigmoidGradientOp final : public SigmoidGradientOp<Context> {
CUDNN_CHECK
(
cudnnSetActivationDescriptor
(
act_desc
,
CUDNN_ACTIVATION_SIGMOID
,
CUDNN_PROPAGATE_NAN
,
0
));
}
~
CuDNNSigmoidGradientOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyActivationDescriptor
(
act_desc
));
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
Dragon/include/operators/activation/softmax_op.h
View file @
44906e1
...
...
@@ -58,6 +58,12 @@ class CuDNNSoftmaxOp final : public Operator<Context> {
CUDNN_CHECK
(
cudnnCreateTensorDescriptor
(
&
input_desc
));
CUDNN_CHECK
(
cudnnCreateTensorDescriptor
(
&
output_desc
));
}
~
CuDNNSoftmaxOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
@@ -76,6 +82,12 @@ class CuDNNSoftmaxGradientOp final : public Operator<Context> {
CUDNN_CHECK
(
cudnnCreateTensorDescriptor
(
&
input_desc
));
CUDNN_CHECK
(
cudnnCreateTensorDescriptor
(
&
output_desc
));
}
~
CuDNNSoftmaxGradientOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
Dragon/include/operators/activation/tanh_op.h
View file @
44906e1
...
...
@@ -45,6 +45,13 @@ public:
CUDNN_CHECK
(
cudnnSetActivationDescriptor
(
act_desc
,
CUDNN_ACTIVATION_TANH
,
CUDNN_PROPAGATE_NAN
,
0
));
}
~
CuDNNTanhOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyActivationDescriptor
(
act_desc
));
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
@@ -64,6 +71,13 @@ class CuDNNTanhGradientOp final : public TanhGradientOp<Context> {
CUDNN_CHECK
(
cudnnSetActivationDescriptor
(
act_desc
,
CUDNN_ACTIVATION_TANH
,
CUDNN_PROPAGATE_NAN
,
0
));
}
~
CuDNNTanhGradientOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyActivationDescriptor
(
act_desc
));
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
Dragon/include/operators/norm/batch_norm_op.h
View file @
44906e1
...
...
@@ -143,6 +143,12 @@ class CuDNNBatchNormOp final : public FusedBatchNormOp<Context> {
this
->
eps
=
std
::
max
(
this
->
eps
,
float
(
CUDNN_BN_MIN_EPSILON
));
}
~
CuDNNBatchNormOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
bn_desc
));
}
void
Setup
();
void
RunOnDevice
()
override
;
...
...
@@ -167,6 +173,12 @@ class CuDNNBatchNormGradientOp final : public FusedBatchNormGradientOp<Context>
this
->
eps
=
std
::
max
(
this
->
eps
,
float
(
CUDNN_BN_MIN_EPSILON
));
}
~
CuDNNBatchNormGradientOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
bn_desc
));
}
void
Setup
();
void
RunOnDevice
()
override
;
...
...
Dragon/include/operators/vision/conv_op.h
View file @
44906e1
...
...
@@ -71,6 +71,18 @@ class CuDNNConv2dOp : public Conv2dOp<Context> {
else
LOG
(
FATAL
)
<<
"Unknown data format: "
<<
this
->
data_format
;
}
~
CuDNNConv2dOp
()
{
CUDNN_CHECK
(
cudnnDestroyFilterDescriptor
(
filter_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyConvolutionDescriptor
(
conv_desc
));
if
(
HasBias
())
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
bias_desc
));
for
(
int
g
=
0
;
g
<
cudnn_group
;
g
++
)
{
cudaStreamDestroy
(
stream
[
g
]);
CUDNN_CHECK
(
cudnnDestroy
(
handle
[
g
]));
}
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
@@ -113,6 +125,18 @@ class CuDNNConv2dGradientOp : public Conv2dGradientOp<Context> {
else
LOG
(
FATAL
)
<<
"Unknown data format: "
<<
this
->
data_format
;
}
~
CuDNNConv2dGradientOp
()
{
CUDNN_CHECK
(
cudnnDestroyFilterDescriptor
(
filter_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyConvolutionDescriptor
(
conv_desc
));
if
(
HasBias
())
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
bias_desc
));
for
(
int
g
=
0
;
g
<
cudnn_group
*
3
;
g
++
)
{
cudaStreamDestroy
(
stream
[
g
]);
CUDNN_CHECK
(
cudnnDestroy
(
handle
[
g
]));
}
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
Dragon/include/operators/vision/conv_transpose_op.h
View file @
44906e1
...
...
@@ -73,6 +73,19 @@ class CuDNNConv2dTransposeOp : public Conv2dTransposeOp<Context> {
else
if
(
this
->
data_format
==
"NHWC"
)
format
=
CUDNN_TENSOR_NHWC
;
else
LOG
(
FATAL
)
<<
"Unknown data format: "
<<
this
->
data_format
;
}
~
CuDNNConv2dTransposeOp
()
{
CUDNN_CHECK
(
cudnnDestroyFilterDescriptor
(
filter_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyConvolutionDescriptor
(
conv_desc
));
if
(
HasBias
())
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
bias_desc
));
for
(
int
g
=
0
;
g
<
cudnn_group
;
g
++
)
{
cudaStreamDestroy
(
stream
[
g
]);
CUDNN_CHECK
(
cudnnDestroy
(
handle
[
g
]));
}
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
@@ -114,6 +127,19 @@ public:
else
if
(
this
->
data_format
==
"NHWC"
)
format
=
CUDNN_TENSOR_NHWC
;
else
LOG
(
FATAL
)
<<
"Unknown data format: "
<<
this
->
data_format
;
}
~
CuDNNConv2dTransposeGradientOp
()
{
CUDNN_CHECK
(
cudnnDestroyFilterDescriptor
(
filter_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyConvolutionDescriptor
(
conv_desc
));
if
(
HasBias
())
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
bias_desc
));
for
(
int
g
=
0
;
g
<
cudnn_group
*
3
;
g
++
)
{
cudaStreamDestroy
(
stream
[
g
]);
CUDNN_CHECK
(
cudnnDestroy
(
handle
[
g
]));
}
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
Dragon/include/operators/vision/lrn_op.h
View file @
44906e1
...
...
@@ -91,6 +91,12 @@ class CuDNNLRNOp : public LRNOp<Context> {
this
->
k
));
}
~
CuDNNLRNOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyLRNDescriptor
(
norm_desc
));
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
@@ -113,6 +119,12 @@ class CuDNNLRNGradientOp : public LRNGradientOp<Context > {
this
->
k
));
}
~
CuDNNLRNGradientOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyLRNDescriptor
(
norm_desc
));
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
Dragon/include/operators/vision/pooling_op.h
View file @
44906e1
...
...
@@ -108,6 +108,12 @@ class CuDNNPooling2dOp final : public Pooling2dOp<Context> {
}
else
LOG
(
FATAL
)
<<
"Unsupported pooling mode: "
<<
this
->
mode
;
}
~
CuDNNPooling2dOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyPoolingDescriptor
(
pool_desc
));
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
@@ -151,6 +157,12 @@ class CuDNNPooling2dGradientOp final : public Pooling2dGradientOp<Context> {
#endif
}
~
CuDNNPooling2dGradientOp
()
{
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
input_desc
));
CUDNN_CHECK
(
cudnnDestroyTensorDescriptor
(
output_desc
));
CUDNN_CHECK
(
cudnnDestroyPoolingDescriptor
(
pool_desc
));
}
void
RunOnDevice
()
override
;
template
<
typename
T
>
void
RunWithType
();
...
...
Dragon/modules/cc/dragon.cc
View file @
44906e1
...
...
@@ -13,15 +13,17 @@
namespace
dragon
{
std
::
unordered_map
<
std
::
string
,
std
::
shared_ptr
<
Workspace
>
>
g_workspaces
;
Map
<
string
,
unique_ptr
<
Workspace
>
>
g_workspaces
;
Map
<
string
,
vector
<
string
>
>
sub_workspaces
;
std
::
mutex
g_mutex
;
Workspace
*
CreateWorkspace
(
const
std
::
string
&
name
){
std
::
unique_lock
<
std
::
mutex
>
lock
(
g_mutex
);
LOG
(
INFO
)
<<
"Create the Workspace("
<<
name
<<
")."
;
if
(
g_workspaces
.
count
(
name
))
return
g_workspaces
[
name
].
get
();
std
::
shared_ptr
<
Workspace
>
new_workspace
(
new
Workspace
(
name
));
g_workspaces
[
name
]
=
new_workspace
;
unique_ptr
<
Workspace
>
new_workspace
(
new
Workspace
(
name
));
g_workspaces
[
name
]
=
std
::
move
(
new_workspace
);
sub_workspaces
[
name
]
=
vector
<
string
>
();
return
new_workspace
.
get
();
}
...
...
@@ -31,6 +33,10 @@ Workspace* ResetWorkspace(const std::string& name) {
<<
"
\n
Workspace("
<<
name
<<
") does not exist, can not be reset."
;
LOG
(
INFO
)
<<
"Reset the Workspace("
<<
name
<<
")."
;
g_workspaces
[
name
].
reset
(
new
Workspace
(
name
));
for
(
auto
&
sub_workspace
:
sub_workspaces
[
name
])
{
if
(
g_workspaces
.
count
(
sub_workspace
)
>
0
)
g_workspaces
[
name
]
->
MoveWorkspace
(
g_workspaces
[
sub_workspace
].
get
());
}
return
g_workspaces
[
name
].
get
();
}
...
...
@@ -43,13 +49,14 @@ void ReleaseWorkspace(const std::string& name) {
g_workspaces
.
erase
(
name
);
}
void
MoveWorkspace
(
Workspace
*
main
,
Workspace
*
sub
)
{
void
MoveWorkspace
(
Workspace
*
target_ws
,
Workspace
*
source_ws
)
{
std
::
unique_lock
<
std
::
mutex
>
lock
(
g_mutex
);
CHECK
(
main
)
<<
"
\n
The given main workspace is invalid."
;
CHECK
(
sub
)
<<
"
\n
The given sub workspace is invalid."
;
LOG
(
INFO
)
<<
"Move the Workspace("
<<
sub
->
name
()
<<
") "
<<
"into the Workspace("
<<
main
->
name
()
<<
")."
;
main
->
MoveWorkspace
(
sub
);
CHECK
(
source_ws
)
<<
"
\n
The given source workspace is invalid."
;
CHECK
(
target_ws
)
<<
"
\n
The given target workspace is invalid."
;
target_ws
->
MoveWorkspace
(
source_ws
);
sub_workspaces
[
target_ws
->
name
()].
push_back
(
string
(
source_ws
->
name
()));
LOG
(
INFO
)
<<
"Move the Workspace("
<<
source_ws
->
name
()
<<
") "
<<
"into the Workspace("
<<
target_ws
->
name
()
<<
")."
;
}
std
::
string
CreateGraph
(
const
std
::
string
&
graph_file
,
Workspace
*
ws
)
{
...
...
Dragon/modules/python/dragon.cc
View file @
44906e1
...
...
@@ -8,6 +8,7 @@ DEFINE_TYPED_REGISTRY(TensorFetcherRegistry, TypeId, TensorFetcherBase);
DEFINE_TYPED_REGISTRY
(
TensorFeederRegistry
,
TypeId
,
TensorFeederBase
);
Map
<
string
,
unique_ptr
<
Workspace
>
>
g_workspaces
;
Map
<
string
,
vector
<
string
>
>
sub_workspaces
;
Workspace
*
g_workspace
;
string
g_current_workspace
;
...
...
@@ -124,6 +125,7 @@ bool SwitchWorkspaceInternal(const string& name, const bool create_if_missing) {
unique_ptr
<
Workspace
>
new_workspace
(
new
Workspace
(
name
));
g_workspace
=
new_workspace
.
get
();
g_workspaces
[
name
]
=
std
::
move
(
new_workspace
);
sub_workspaces
[
name
]
=
vector
<
string
>
();
g_current_workspace
=
name
;
return
true
;
}
else
{
...
...
@@ -148,6 +150,23 @@ PyObject* SwitchWorkspaceCC(PyObject* self, PyObject *args) {
Py_RETURN_TRUE
;
}
PyObject
*
MoveWorkspaceCC
(
PyObject
*
self
,
PyObject
*
args
)
{
char
*
target_ws
,
*
src_ws
;
if
(
!
PyArg_ParseTuple
(
args
,
"ss"
,
&
target_ws
,
&
src_ws
))
{
PyErr_SetString
(
PyExc_ValueError
,
"You should provide target and src workspace respectively."
);
return
nullptr
;
}
CHECK
(
g_workspaces
.
count
(
src_ws
))
<<
"
\n
The source Workspace("
<<
src_ws
<<
") does not exist."
;
CHECK
(
g_workspaces
.
count
(
target_ws
))
<<
"
\n
The target Workspace("
<<
target_ws
<<
") does not exist."
;
g_workspaces
[
target_ws
]
->
MoveWorkspace
(
g_workspaces
[
src_ws
].
get
());
sub_workspaces
[
target_ws
].
push_back
(
string
(
src_ws
));
LOG
(
INFO
)
<<
"Move the Workspace("
<<
src_ws
<<
") into the "
<<
"Workspace("
<<
target_ws
<<
")."
;
Py_RETURN_TRUE
;
}
PyObject
*
CurrentWorkspaceCC
(
PyObject
*
self
,
PyObject
*
args
)
{
return
StdStringToPyUnicode
(
g_current_workspace
);
}
...
...
@@ -173,6 +192,10 @@ PyObject* ResetWorkspaceCC(PyObject* self, PyObject* args) {
LOG
(
INFO
)
<<
"Reset the Workspace("
<<
target_workspace
<<
")"
;
g_workspaces
[
target_workspace
].
reset
(
new
Workspace
(
target_workspace
));
g_workspace
=
g_workspaces
[
target_workspace
].
get
();
for
(
auto
&
sub_workspace
:
sub_workspaces
[
target_workspace
])
{
if
(
g_workspaces
.
count
(
sub_workspace
)
>
0
)
g_workspace
->
MoveWorkspace
(
g_workspaces
[
sub_workspace
].
get
());
}
Py_RETURN_TRUE
;
}
...
...
@@ -387,6 +410,7 @@ PyMethodDef* GetAllMethods() {
PYFUNC
(
NoGradientOperatorsCC
),
PYFUNC
(
CreateGradientDefsCC
),
PYFUNC
(
SwitchWorkspaceCC
),
PYFUNC
(
MoveWorkspaceCC
),
PYFUNC
(
CurrentWorkspaceCC
),
PYFUNC
(
WorkspacesCC
),
PYFUNC
(
ResetWorkspaceCC
),
...
...
Dragon/python/dragon/core/workspace.py
View file @
44906e1
...
...
@@ -27,6 +27,7 @@ CURRENT_GRAPH_IDX = 0
__all__
=
[
'SwitchWorkspace'
,
'MoveWorkspace'
,
'ResetWorkspace'
,
'ClearWorkspace'
,
'CreateGraph'
,
...
...
@@ -86,6 +87,30 @@ def SwitchWorkspace(workspace_name, create_if_missing=True):
SwitchWorkspaceCC
(
workspace_name
,
create_if_missing
)
def
MoveWorkspace
(
target_ws
,
source_ws
):
"""Move the source workspace into the target workspace.
Parameters
----------
target_ws : str
The name of the target workspace.
source_ws : str
The name of the source workspace.
Returns
-------
None
References
----------
The wrapper of ``MoveWorkspaceCC``.
"""
if
target_ws
==
''
or
source_ws
==
''
:
raise
ValueError
(
'The target or source name can not be empty.'
)
MoveWorkspaceCC
(
target_ws
,
source_ws
)
def
ResetWorkspace
(
workspace_name
=
''
):
"""Reset the specific workspace.
...
...
Dragon/python/setup.py
View file @
44906e1
...
...
@@ -36,7 +36,7 @@ find_packages('dragon')
find_modules
()
setup
(
name
=
'dragon'
,
version
=
'0.2.1.1
3
'
,
version
=
'0.2.1.1
4
'
,
description
=
'Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework'
,
url
=
'https://github.com/neopenx/Dragon'
,
author
=
'Ting Pan'
,
...
...
Dragon/src/core/context.cc
View file @
44906e1
Dragon/src/core/graph.cc
View file @
44906e1
Write
Preview
Markdown
is supported
Attach a file
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to post a comment