Commit abae2712 by Ting PAN

Update LMDB format

1 parent f47e53cf
......@@ -52,9 +52,9 @@ using Set = std::unordered_set<Value> ;
/*
* Define the Kernel version.
*
* | Major(2) | Minor(2) | Patch(08) |
* | Major(2) | Minor(2) | Patch(09) |
*/
#define DRAGON_VERSION 2208
#define DRAGON_VERSION 2209
/*
* Define the default random seed.
......
......@@ -259,16 +259,18 @@ class RDivGradientOp final : public Operator<Context> {
#define RunByX1X2(dtype) \
DefineX1X2; \
if (X1->dims() == X2->dims()) { \
EltwiseRunWithType<dtype>(); \
} else if (X1->dim(0) == X2->dim(0) && \
X2->count(1) == 1) { \
BroadcastRunWithType<dtype>(2); \
if (X2->ndim() == 0) { \
BroadcastRunWithType<dtype>(0); \
} else if (X2->ndim() == 1 && X2->dim(0) == 1) { \
BroadcastRunWithType<dtype>(0); \
} else if (X1->dim(-1) == X2->dim(-1) && \
X2->count(0, X2->axis(-1)) == 1) { \
BroadcastRunWithType<dtype>(1); \
} else if (X2->ndim() == 1 && X2->dim(0) == 1) { \
BroadcastRunWithType<dtype>(0); \
} else if (X1->dim(0) == X2->dim(0) && \
X2->count(1) == 1) { \
BroadcastRunWithType<dtype>(2); \
} else if (X1->dims() == X2->dims()) { \
EltwiseRunWithType<dtype>(); \
} else { \
LOG(FATAL) << "Could not broadcast with shapes " \
<< X1->DimString() << " " \
......@@ -277,16 +279,18 @@ class RDivGradientOp final : public Operator<Context> {
#define RRunByX1X2(dtype) \
DefineX1X2; \
if (X1->dims() == X2->dims()) { \
EltwiseRunWithType<dtype>(); \
} else if (X1->dim(0) == X2->dim(0) && \
X1->count(1) == 1) { \
BroadcastRunWithType<dtype>(2); \
if (X1->ndim() == 0) { \
BroadcastRunWithType<dtype>(0); \
} else if (X1->ndim() == 1 && X1->dim(0) == 1) { \
BroadcastRunWithType<dtype>(0); \
} else if (X1->dim(-1) == X2->dim(-1) && \
X1->count(0, X1->axis(-1)) == 1) { \
BroadcastRunWithType<dtype>(1); \
} else if (X1->ndim() == 1 && X1->dim(0) == 1) { \
BroadcastRunWithType<dtype>(0); \
} else if (X1->dim(0) == X2->dim(0) && \
X1->count(1) == 1) { \
BroadcastRunWithType<dtype>(2); \
} else if (X1->dims() == X2->dims()) { \
EltwiseRunWithType<dtype>(); \
} else { \
LOG(FATAL) << "Could not broadcast with shapes " \
<< X1->DimString() << " " \
......
......@@ -41,8 +41,8 @@ option['random_seed'] = 3
# Disable the memonger if true
option['debug_mode'] = False
# Set it by the memonger
option['share_grads'] = False
# Whether to share grads
option['share_grads'] = True
# Whether to log the meta graphs
option['log_meta_graph'] = False
......
......@@ -82,7 +82,8 @@ List Brief
`SmoothL1Loss`_ SmoothL1Loss. `[Girshick, 2015] <https://arxiv.org/abs/1504.08083>`_.
`L1Loss`_ L1Loss.
`L2Loss`_ L2Loss(EuclideanLoss).
`SparseSoftmaxFocalLoss`_ SoftmaxFocalLoss with sparse labels. `[Lin et.al, 2017] <https://arxiv.org/abs/1708.02002>`_.
`SigmoidFocalLoss`_ SigmoidFocalLoss with sparse labels. `[Lin et.al, 2017] <https://arxiv.org/abs/1708.02002>`_.
`SoftmaxFocalLoss`_ SoftmaxFocalLoss with sparse labels. `[Lin et.al, 2017] <https://arxiv.org/abs/1708.02002>`_.
`CTCLoss`_ CTCLoss with batched variable length of labels. `[Graves & Gomez, 2006] <http://www.cs.utoronto.ca/~graves/icml_2006.pdf>`_.
============================= ======================================================================
......@@ -235,7 +236,8 @@ List Brief
.. _SmoothL1Loss: operators/loss.html#dragon.operators.loss.SmoothL1Loss
.. _L1Loss: operators/loss.html#dragon.operators.loss.L1Loss
.. _L2Loss: operators/loss.html#dragon.operators.loss.L2Loss
.. _SparseSoftmaxFocalLoss: operators/loss.html#dragon.operators.loss.SparseSoftmaxFocalLoss
.. _SigmoidFocalLoss: operators/loss.html#dragon.operators.loss.SigmoidFocalLoss
.. _SoftmaxFocalLoss: operators/loss.html#dragon.operators.loss.SoftmaxFocalLoss
.. _CTCLoss: operators/loss.html#dragon.operators.loss.CTCLoss
.. _Add: operators/arithmetic.html#dragon.operators.arithmetic.Add
......
......@@ -95,6 +95,8 @@ List Brief
`SigmoidCrossEntropyLossLayer`_ The implementation of ``SigmoidCrossEntropyLossLayer``.
`L2LossLayer`_ The implementation of ``L2LossLayer``.
`SmoothL1LossLayer`_ The implementation of ``SmoothL1LossLayer``.
`SigmoidWithFocalLossLayer`_ The implementation of ``SigmoidWithFocalLossLayer``.
`SoftmaxWithFocalLossLayer`_ The implementation of ``SoftmaxWithFocalLossLayer``.
================================= =============================================================================
MPI
......@@ -198,6 +200,8 @@ API Reference
.. _SigmoidCrossEntropyLossLayer: #dragon.vm.caffe.layers.loss.SigmoidCrossEntropyLossLayer
.. _L2LossLayer: #dragon.vm.caffe.layers.loss.L2LossLayer
.. _SmoothL1LossLayer: #dragon.vm.caffe.layers.loss.SmoothL1LossLayer
.. _SigmoidWithFocalLossLayer: #dragon.vm.caffe.layers.loss.SigmoidWithFocalLossLayer
.. _SoftmaxWithFocalLossLayer: #dragon.vm.caffe.layers.loss.SoftmaxWithFocalLossLayer
.. _MPIBroadcastLayer: #dragon.vm.caffe.layers.mpi.MPIBroadcastLayer
.. _MPIGatherLayer: #dragon.vm.caffe.layers.mpi.MPIGatherLayer
......
......@@ -47,8 +47,8 @@ API Reference
.. _Net.replace: #dragon.vm.caffe.net.Net.replace
.. _Net.function: #dragon.vm.caffe.net.Net.function
.. _NetInit(prototxt, phase): #dragon.vm.caffe.net.Net.NetInit
.. _NetInitLoad(prototxt, model, phase): #dragon.vm.caffe.net.Net.NetInitLoad
.. _NetInit(proto_txt, phase): #dragon.vm.caffe.net.Net.NetInit
.. _NetInitLoad(proto_txt, model, phase): #dragon.vm.caffe.net.Net.NetInitLoad
.. _workspace.Snapshot(*args, **kwargs): ../../core/workspace.html#dragon.core.workspace.Snapshot
.. _workspace.Restore(*args, **kwargs): ../../core/workspace.html#dragon.core.workspace.Restore
......
......@@ -165,8 +165,8 @@ class DataReader(Process):
# init db
self._db = LMDB()
self._db.open(self._source)
self._db_size = int(self._db.get('size'))
self._db_zfill = int(self._db.get('zfill'))
self._db_zfill = self._db.zfill()
self._db_size = self._db.num_entries()
self._epoch_size = int(self._db_size / self._num_parts + 1)
if self._use_shuffle:
......
......@@ -219,7 +219,7 @@ def L2Loss(inputs, normalization='BATCH_SIZE', **kwargs):
def SigmoidFocalLoss(inputs, axis=1, normalization='VALID',
alpha=0.25, gamma=2.0, neg_id=0, **kwargs):
"""SoftmaxFocalLoss with sparse labels. `[Lin et.al, 2017] <https://arxiv.org/abs/1708.02002>`_.
"""SigmoidFocalLoss with sparse labels. `[Lin et.al, 2017] <https://arxiv.org/abs/1708.02002>`_.
Parameters
----------
......
......@@ -88,6 +88,13 @@ class LMDB(object):
self.txn = self.env.begin(write=(mode == 'w'))
self.cursor = self.txn.cursor()
def zfill(self):
self.cursor.first()
return len(self.key())
def num_entries(self):
return self.env.stat()['entries']
def _try_put(self):
"""Try to commit the buffers.
......
......@@ -39,7 +39,6 @@ class ScalarSummary(object):
"""
self.log_dir = os.path.join(log_dir, 'scalar')
if not os.path.exists(self.log_dir): os.makedirs(self.log_dir)
def add_summary(self, scalar, global_step):
"""Add a summary.
......@@ -62,5 +61,6 @@ class ScalarSummary(object):
else: raise TypeError()
key = key.replace('/', '_')
if not os.path.exists(self.log_dir): os.makedirs(self.log_dir)
with open(os.path.join(self.log_dir, key + '.txt'), 'a') as f:
f.write(str(global_step) + ' ' + str(value) + '\n')
\ No newline at end of file
......@@ -14,7 +14,7 @@ from __future__ import division
from __future__ import print_function
version = '0.2.2'
full_version = '0.2.2.8'
full_version = '0.2.2.9'
release = False
if not release:
......
......@@ -58,22 +58,6 @@ class Layer(object):
self._common_param['mirror_stage'] = LayerParameter.mirror_stage
def Setup(self, bottom):
"""Setup the parameters.
Parameters
----------
bottom : list of Tensor
The inputs.
Returns
-------
None
References
---------=
The implementation of `LayerSetUp(layer.hpp, L91)`_.
"""
self._param = dict(self._param, **self._common_param)
def Fill(self, tensor, layer_param, filler):
......
......@@ -9,10 +9,6 @@
#
# ------------------------------------------------------------
# Default configs
import dragon.memonger as opt
opt.ShareGrads(enabled=True)
# Import Dynamic Methods
import dragon.vm.torch.ops.builtin
......
......@@ -42,7 +42,7 @@ find_modules()
setup(name = 'dragon',
version='0.2.2.8',
version='0.2.2.9',
description = 'Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework',
url='https://github.com/seetaresearch/Dragon',
author='Ting Pan',
......
Subproject commit 0e7c32d84ba3758cb1ae703923d73a47add5442d
Subproject commit a4a90cc6a8757fe7bc0d5d1ce8b7af35a0679438
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!