Skip to content
Toggle navigation
P
Projects
G
Groups
S
Snippets
Help
SeetaResearch
/
Dragon
This project
Loading...
Sign in
Toggle navigation
Go to a project
Project
Repository
Issues
0
Merge Requests
0
Pipelines
Wiki
Snippets
Settings
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Commit abae2712
authored
Jul 23, 2018
by
Ting PAN
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Update LMDB format
1 parent
f47e53cf
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
47 additions
and
49 deletions
Dragon/include/core/common.h
Dragon/include/operators/arithmetic/fundamental_op.h
Dragon/python/dragon/config.py
Dragon/python/dragon/docs/contents/ops.rst
Dragon/python/dragon/docs/contents/vm/caffe/layer.rst
Dragon/python/dragon/docs/contents/vm/caffe/net.rst
Dragon/python/dragon/io/data_reader.py
Dragon/python/dragon/operators/loss.py
Dragon/python/dragon/tools/db.py
Dragon/python/dragon/tools/summary_writer.py
Dragon/python/dragon/version.py
Dragon/python/dragon/vm/caffe/layer.py
Dragon/python/dragon/vm/torch/__init__.py
Dragon/python/setup.py
DragonLair
Dragon/include/core/common.h
View file @
abae271
...
@@ -52,9 +52,9 @@ using Set = std::unordered_set<Value> ;
...
@@ -52,9 +52,9 @@ using Set = std::unordered_set<Value> ;
/*
/*
* Define the Kernel version.
* Define the Kernel version.
*
*
* | Major(2) | Minor(2) | Patch(0
8
) |
* | Major(2) | Minor(2) | Patch(0
9
) |
*/
*/
#define DRAGON_VERSION 220
8
#define DRAGON_VERSION 220
9
/*
/*
* Define the default random seed.
* Define the default random seed.
...
...
Dragon/include/operators/arithmetic/fundamental_op.h
View file @
abae271
...
@@ -259,16 +259,18 @@ class RDivGradientOp final : public Operator<Context> {
...
@@ -259,16 +259,18 @@ class RDivGradientOp final : public Operator<Context> {
#define RunByX1X2(dtype) \
#define RunByX1X2(dtype) \
DefineX1X2; \
DefineX1X2; \
if (X1->dims() == X2->dims()) { \
if (X2->ndim() == 0) { \
EltwiseRunWithType<dtype>(); \
BroadcastRunWithType<dtype>(0); \
} else if (X1->dim(0) == X2->dim(0) && \
} else if (X2->ndim() == 1 && X2->dim(0) == 1) { \
X2->count(1) == 1) { \
BroadcastRunWithType<dtype>(0); \
BroadcastRunWithType<dtype>(2); \
} else if (X1->dim(-1) == X2->dim(-1) && \
} else if (X1->dim(-1) == X2->dim(-1) && \
X2->count(0, X2->axis(-1)) == 1) { \
X2->count(0, X2->axis(-1)) == 1) { \
BroadcastRunWithType<dtype>(1); \
BroadcastRunWithType<dtype>(1); \
} else if (X2->ndim() == 1 && X2->dim(0) == 1) { \
} else if (X1->dim(0) == X2->dim(0) && \
BroadcastRunWithType<dtype>(0); \
X2->count(1) == 1) { \
BroadcastRunWithType<dtype>(2); \
} else if (X1->dims() == X2->dims()) { \
EltwiseRunWithType<dtype>(); \
} else { \
} else { \
LOG(FATAL) << "Could not broadcast with shapes " \
LOG(FATAL) << "Could not broadcast with shapes " \
<< X1->DimString() << " " \
<< X1->DimString() << " " \
...
@@ -277,16 +279,18 @@ class RDivGradientOp final : public Operator<Context> {
...
@@ -277,16 +279,18 @@ class RDivGradientOp final : public Operator<Context> {
#define RRunByX1X2(dtype) \
#define RRunByX1X2(dtype) \
DefineX1X2; \
DefineX1X2; \
if (X1->dims() == X2->dims()) { \
if (X1->ndim() == 0) { \
EltwiseRunWithType<dtype>(); \
BroadcastRunWithType<dtype>(0); \
} else if (X1->dim(0) == X2->dim(0) && \
} else if (X1->ndim() == 1 && X1->dim(0) == 1) { \
X1->count(1) == 1) { \
BroadcastRunWithType<dtype>(0); \
BroadcastRunWithType<dtype>(2); \
} else if (X1->dim(-1) == X2->dim(-1) && \
} else if (X1->dim(-1) == X2->dim(-1) && \
X1->count(0, X1->axis(-1)) == 1) { \
X1->count(0, X1->axis(-1)) == 1) { \
BroadcastRunWithType<dtype>(1); \
BroadcastRunWithType<dtype>(1); \
} else if (X1->ndim() == 1 && X1->dim(0) == 1) { \
} else if (X1->dim(0) == X2->dim(0) && \
BroadcastRunWithType<dtype>(0); \
X1->count(1) == 1) { \
BroadcastRunWithType<dtype>(2); \
} else if (X1->dims() == X2->dims()) { \
EltwiseRunWithType<dtype>(); \
} else { \
} else { \
LOG(FATAL) << "Could not broadcast with shapes " \
LOG(FATAL) << "Could not broadcast with shapes " \
<< X1->DimString() << " " \
<< X1->DimString() << " " \
...
...
Dragon/python/dragon/config.py
View file @
abae271
...
@@ -41,8 +41,8 @@ option['random_seed'] = 3
...
@@ -41,8 +41,8 @@ option['random_seed'] = 3
# Disable the memonger if true
# Disable the memonger if true
option
[
'debug_mode'
]
=
False
option
[
'debug_mode'
]
=
False
#
Set it by the memonger
#
Whether to share grads
option
[
'share_grads'
]
=
Fals
e
option
[
'share_grads'
]
=
Tru
e
# Whether to log the meta graphs
# Whether to log the meta graphs
option
[
'log_meta_graph'
]
=
False
option
[
'log_meta_graph'
]
=
False
...
...
Dragon/python/dragon/docs/contents/ops.rst
View file @
abae271
...
@@ -82,7 +82,8 @@ List Brief
...
@@ -82,7 +82,8 @@ List Brief
`SmoothL1Loss`_ SmoothL1Loss. `[Girshick, 2015] <https://arxiv.org/abs/1504.08083>`_.
`SmoothL1Loss`_ SmoothL1Loss. `[Girshick, 2015] <https://arxiv.org/abs/1504.08083>`_.
`L1Loss`_ L1Loss.
`L1Loss`_ L1Loss.
`L2Loss`_ L2Loss(EuclideanLoss).
`L2Loss`_ L2Loss(EuclideanLoss).
`SparseSoftmaxFocalLoss`_ SoftmaxFocalLoss with sparse labels. `[Lin et.al, 2017] <https://arxiv.org/abs/1708.02002>`_.
`SigmoidFocalLoss`_ SigmoidFocalLoss with sparse labels. `[Lin et.al, 2017] <https://arxiv.org/abs/1708.02002>`_.
`SoftmaxFocalLoss`_ SoftmaxFocalLoss with sparse labels. `[Lin et.al, 2017] <https://arxiv.org/abs/1708.02002>`_.
`CTCLoss`_ CTCLoss with batched variable length of labels. `[Graves & Gomez, 2006] <http://www.cs.utoronto.ca/~graves/icml_2006.pdf>`_.
`CTCLoss`_ CTCLoss with batched variable length of labels. `[Graves & Gomez, 2006] <http://www.cs.utoronto.ca/~graves/icml_2006.pdf>`_.
============================= ======================================================================
============================= ======================================================================
...
@@ -235,7 +236,8 @@ List Brief
...
@@ -235,7 +236,8 @@ List Brief
.. _SmoothL1Loss: operators/loss.html#dragon.operators.loss.SmoothL1Loss
.. _SmoothL1Loss: operators/loss.html#dragon.operators.loss.SmoothL1Loss
.. _L1Loss: operators/loss.html#dragon.operators.loss.L1Loss
.. _L1Loss: operators/loss.html#dragon.operators.loss.L1Loss
.. _L2Loss: operators/loss.html#dragon.operators.loss.L2Loss
.. _L2Loss: operators/loss.html#dragon.operators.loss.L2Loss
.. _SparseSoftmaxFocalLoss: operators/loss.html#dragon.operators.loss.SparseSoftmaxFocalLoss
.. _SigmoidFocalLoss: operators/loss.html#dragon.operators.loss.SigmoidFocalLoss
.. _SoftmaxFocalLoss: operators/loss.html#dragon.operators.loss.SoftmaxFocalLoss
.. _CTCLoss: operators/loss.html#dragon.operators.loss.CTCLoss
.. _CTCLoss: operators/loss.html#dragon.operators.loss.CTCLoss
.. _Add: operators/arithmetic.html#dragon.operators.arithmetic.Add
.. _Add: operators/arithmetic.html#dragon.operators.arithmetic.Add
...
...
Dragon/python/dragon/docs/contents/vm/caffe/layer.rst
View file @
abae271
...
@@ -95,6 +95,8 @@ List Brief
...
@@ -95,6 +95,8 @@ List Brief
`SigmoidCrossEntropyLossLayer`_ The implementation of ``SigmoidCrossEntropyLossLayer``.
`SigmoidCrossEntropyLossLayer`_ The implementation of ``SigmoidCrossEntropyLossLayer``.
`L2LossLayer`_ The implementation of ``L2LossLayer``.
`L2LossLayer`_ The implementation of ``L2LossLayer``.
`SmoothL1LossLayer`_ The implementation of ``SmoothL1LossLayer``.
`SmoothL1LossLayer`_ The implementation of ``SmoothL1LossLayer``.
`SigmoidWithFocalLossLayer`_ The implementation of ``SigmoidWithFocalLossLayer``.
`SoftmaxWithFocalLossLayer`_ The implementation of ``SoftmaxWithFocalLossLayer``.
================================= =============================================================================
================================= =============================================================================
MPI
MPI
...
@@ -198,6 +200,8 @@ API Reference
...
@@ -198,6 +200,8 @@ API Reference
.. _SigmoidCrossEntropyLossLayer: #dragon.vm.caffe.layers.loss.SigmoidCrossEntropyLossLayer
.. _SigmoidCrossEntropyLossLayer: #dragon.vm.caffe.layers.loss.SigmoidCrossEntropyLossLayer
.. _L2LossLayer: #dragon.vm.caffe.layers.loss.L2LossLayer
.. _L2LossLayer: #dragon.vm.caffe.layers.loss.L2LossLayer
.. _SmoothL1LossLayer: #dragon.vm.caffe.layers.loss.SmoothL1LossLayer
.. _SmoothL1LossLayer: #dragon.vm.caffe.layers.loss.SmoothL1LossLayer
.. _SigmoidWithFocalLossLayer: #dragon.vm.caffe.layers.loss.SigmoidWithFocalLossLayer
.. _SoftmaxWithFocalLossLayer: #dragon.vm.caffe.layers.loss.SoftmaxWithFocalLossLayer
.. _MPIBroadcastLayer: #dragon.vm.caffe.layers.mpi.MPIBroadcastLayer
.. _MPIBroadcastLayer: #dragon.vm.caffe.layers.mpi.MPIBroadcastLayer
.. _MPIGatherLayer: #dragon.vm.caffe.layers.mpi.MPIGatherLayer
.. _MPIGatherLayer: #dragon.vm.caffe.layers.mpi.MPIGatherLayer
...
...
Dragon/python/dragon/docs/contents/vm/caffe/net.rst
View file @
abae271
...
@@ -47,8 +47,8 @@ API Reference
...
@@ -47,8 +47,8 @@ API Reference
.. _Net.replace: #dragon.vm.caffe.net.Net.replace
.. _Net.replace: #dragon.vm.caffe.net.Net.replace
.. _Net.function: #dragon.vm.caffe.net.Net.function
.. _Net.function: #dragon.vm.caffe.net.Net.function
.. _NetInit(prototxt, phase): #dragon.vm.caffe.net.Net.NetInit
.. _NetInit(proto
_
txt, phase): #dragon.vm.caffe.net.Net.NetInit
.. _NetInitLoad(prototxt, model, phase): #dragon.vm.caffe.net.Net.NetInitLoad
.. _NetInitLoad(proto
_
txt, model, phase): #dragon.vm.caffe.net.Net.NetInitLoad
.. _workspace.Snapshot(*args, **kwargs): ../../core/workspace.html#dragon.core.workspace.Snapshot
.. _workspace.Snapshot(*args, **kwargs): ../../core/workspace.html#dragon.core.workspace.Snapshot
.. _workspace.Restore(*args, **kwargs): ../../core/workspace.html#dragon.core.workspace.Restore
.. _workspace.Restore(*args, **kwargs): ../../core/workspace.html#dragon.core.workspace.Restore
...
...
Dragon/python/dragon/io/data_reader.py
View file @
abae271
...
@@ -165,8 +165,8 @@ class DataReader(Process):
...
@@ -165,8 +165,8 @@ class DataReader(Process):
# init db
# init db
self
.
_db
=
LMDB
()
self
.
_db
=
LMDB
()
self
.
_db
.
open
(
self
.
_source
)
self
.
_db
.
open
(
self
.
_source
)
self
.
_db_
size
=
int
(
self
.
_db
.
get
(
'size'
)
)
self
.
_db_
zfill
=
self
.
_db
.
zfill
(
)
self
.
_db_
zfill
=
int
(
self
.
_db
.
get
(
'zfill'
)
)
self
.
_db_
size
=
self
.
_db
.
num_entries
(
)
self
.
_epoch_size
=
int
(
self
.
_db_size
/
self
.
_num_parts
+
1
)
self
.
_epoch_size
=
int
(
self
.
_db_size
/
self
.
_num_parts
+
1
)
if
self
.
_use_shuffle
:
if
self
.
_use_shuffle
:
...
...
Dragon/python/dragon/operators/loss.py
View file @
abae271
...
@@ -219,7 +219,7 @@ def L2Loss(inputs, normalization='BATCH_SIZE', **kwargs):
...
@@ -219,7 +219,7 @@ def L2Loss(inputs, normalization='BATCH_SIZE', **kwargs):
def
SigmoidFocalLoss
(
inputs
,
axis
=
1
,
normalization
=
'VALID'
,
def
SigmoidFocalLoss
(
inputs
,
axis
=
1
,
normalization
=
'VALID'
,
alpha
=
0.25
,
gamma
=
2.0
,
neg_id
=
0
,
**
kwargs
):
alpha
=
0.25
,
gamma
=
2.0
,
neg_id
=
0
,
**
kwargs
):
"""S
oftmax
FocalLoss with sparse labels. `[Lin et.al, 2017] <https://arxiv.org/abs/1708.02002>`_.
"""S
igmoid
FocalLoss with sparse labels. `[Lin et.al, 2017] <https://arxiv.org/abs/1708.02002>`_.
Parameters
Parameters
----------
----------
...
...
Dragon/python/dragon/tools/db.py
View file @
abae271
...
@@ -88,6 +88,13 @@ class LMDB(object):
...
@@ -88,6 +88,13 @@ class LMDB(object):
self
.
txn
=
self
.
env
.
begin
(
write
=
(
mode
==
'w'
))
self
.
txn
=
self
.
env
.
begin
(
write
=
(
mode
==
'w'
))
self
.
cursor
=
self
.
txn
.
cursor
()
self
.
cursor
=
self
.
txn
.
cursor
()
def
zfill
(
self
):
self
.
cursor
.
first
()
return
len
(
self
.
key
())
def
num_entries
(
self
):
return
self
.
env
.
stat
()[
'entries'
]
def
_try_put
(
self
):
def
_try_put
(
self
):
"""Try to commit the buffers.
"""Try to commit the buffers.
...
...
Dragon/python/dragon/tools/summary_writer.py
View file @
abae271
...
@@ -39,7 +39,6 @@ class ScalarSummary(object):
...
@@ -39,7 +39,6 @@ class ScalarSummary(object):
"""
"""
self
.
log_dir
=
os
.
path
.
join
(
log_dir
,
'scalar'
)
self
.
log_dir
=
os
.
path
.
join
(
log_dir
,
'scalar'
)
if
not
os
.
path
.
exists
(
self
.
log_dir
):
os
.
makedirs
(
self
.
log_dir
)
def
add_summary
(
self
,
scalar
,
global_step
):
def
add_summary
(
self
,
scalar
,
global_step
):
"""Add a summary.
"""Add a summary.
...
@@ -62,5 +61,6 @@ class ScalarSummary(object):
...
@@ -62,5 +61,6 @@ class ScalarSummary(object):
else
:
raise
TypeError
()
else
:
raise
TypeError
()
key
=
key
.
replace
(
'/'
,
'_'
)
key
=
key
.
replace
(
'/'
,
'_'
)
if
not
os
.
path
.
exists
(
self
.
log_dir
):
os
.
makedirs
(
self
.
log_dir
)
with
open
(
os
.
path
.
join
(
self
.
log_dir
,
key
+
'.txt'
),
'a'
)
as
f
:
with
open
(
os
.
path
.
join
(
self
.
log_dir
,
key
+
'.txt'
),
'a'
)
as
f
:
f
.
write
(
str
(
global_step
)
+
' '
+
str
(
value
)
+
'
\n
'
)
f
.
write
(
str
(
global_step
)
+
' '
+
str
(
value
)
+
'
\n
'
)
\ No newline at end of file
Dragon/python/dragon/version.py
View file @
abae271
...
@@ -14,7 +14,7 @@ from __future__ import division
...
@@ -14,7 +14,7 @@ from __future__ import division
from
__future__
import
print_function
from
__future__
import
print_function
version
=
'0.2.2'
version
=
'0.2.2'
full_version
=
'0.2.2.
8
'
full_version
=
'0.2.2.
9
'
release
=
False
release
=
False
if
not
release
:
if
not
release
:
...
...
Dragon/python/dragon/vm/caffe/layer.py
View file @
abae271
...
@@ -58,22 +58,6 @@ class Layer(object):
...
@@ -58,22 +58,6 @@ class Layer(object):
self
.
_common_param
[
'mirror_stage'
]
=
LayerParameter
.
mirror_stage
self
.
_common_param
[
'mirror_stage'
]
=
LayerParameter
.
mirror_stage
def
Setup
(
self
,
bottom
):
def
Setup
(
self
,
bottom
):
"""Setup the parameters.
Parameters
----------
bottom : list of Tensor
The inputs.
Returns
-------
None
References
---------=
The implementation of `LayerSetUp(layer.hpp, L91)`_.
"""
self
.
_param
=
dict
(
self
.
_param
,
**
self
.
_common_param
)
self
.
_param
=
dict
(
self
.
_param
,
**
self
.
_common_param
)
def
Fill
(
self
,
tensor
,
layer_param
,
filler
):
def
Fill
(
self
,
tensor
,
layer_param
,
filler
):
...
...
Dragon/python/dragon/vm/torch/__init__.py
View file @
abae271
...
@@ -9,10 +9,6 @@
...
@@ -9,10 +9,6 @@
#
#
# ------------------------------------------------------------
# ------------------------------------------------------------
# Default configs
import
dragon.memonger
as
opt
opt
.
ShareGrads
(
enabled
=
True
)
# Import Dynamic Methods
# Import Dynamic Methods
import
dragon.vm.torch.ops.builtin
import
dragon.vm.torch.ops.builtin
...
...
Dragon/python/setup.py
View file @
abae271
...
@@ -42,7 +42,7 @@ find_modules()
...
@@ -42,7 +42,7 @@ find_modules()
setup
(
name
=
'dragon'
,
setup
(
name
=
'dragon'
,
version
=
'0.2.2.
8
'
,
version
=
'0.2.2.
9
'
,
description
=
'Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework'
,
description
=
'Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework'
,
url
=
'https://github.com/seetaresearch/Dragon'
,
url
=
'https://github.com/seetaresearch/Dragon'
,
author
=
'Ting Pan'
,
author
=
'Ting Pan'
,
...
...
DragonLair
@
a4a90cc6
Subproject commit
0e7c32d84ba3758cb1ae703923d73a47add5442d
Subproject commit
a4a90cc6a8757fe7bc0d5d1ce8b7af35a0679438
Write
Preview
Markdown
is supported
Attach a file
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to post a comment