Commit 2598f4dc by Ting PAN

Align the tensor abstraction

Summary:
This commit aligns the properties and methods of tensor class.
1 parent 413dbad0
Showing with 1057 additions and 1544 deletions
...@@ -368,7 +368,7 @@ class Input(Layer): ...@@ -368,7 +368,7 @@ class Input(Layer):
namespace='Tensor'), namespace='Tensor'),
shape=self.blob_shapes[i], shape=self.blob_shapes[i],
dtype='float32', dtype='float32',
).placeholder() for i in range(len(self.blob_shapes))] ).constant() for i in range(len(self.blob_shapes))]
class Normalize(Layer): class Normalize(Layer):
......
...@@ -110,7 +110,7 @@ class Reshape(object): ...@@ -110,7 +110,7 @@ class Reshape(object):
reshape1 = dali.ops.Reshape(shape=(2, 3)) reshape1 = dali.ops.Reshape(shape=(2, 3))
y = reshape1(inputs['x']) y = reshape1(inputs['x'])
# Reshape to the shape given by another tensor # Reshape to the shape of other tensor
reshape2 = dali.ops.Reshape() reshape2 = dali.ops.Reshape()
z = reshape2(inputs['x'], inputs['x_shape']) z = reshape2(inputs['x'], inputs['x_shape'])
``` ```
......
...@@ -7,13 +7,13 @@ dragon ...@@ -7,13 +7,13 @@ dragon
------- -------
`class EagerTensor <dragon/EagerTensor.html>`_ `class EagerTensor <dragon/EagerTensor.html>`_
: Tensor abstraction under the eager execution. : Tensor abstraction for eager executing.
`class GradientTape <dragon/GradientTape.html>`_ `class GradientTape <dragon/GradientTape.html>`_
: Record the operations for auto differentiation. : Record the operations for auto differentiation.
`class Tensor <dragon/Tensor.html>`_ `class Tensor <dragon/Tensor.html>`_
: Tensor abstraction under the graph execution. : Tensor abstraction for graph executing.
`class TensorSpec <dragon/TensorSpec.html>`_ `class TensorSpec <dragon/TensorSpec.html>`_
: Spec to describe properties of a tensor. : Spec to describe properties of a tensor.
...@@ -25,10 +25,10 @@ dragon ...@@ -25,10 +25,10 @@ dragon
--------- ---------
`arange(...) <dragon/arange.html>`_ `arange(...) <dragon/arange.html>`_
: Return a tensor with evenly spaced values within a interval. : Return a tensor of evenly spaced values within a interval.
`assign(...) <dragon/assign.html>`_ `assign(...) <dragon/assign.html>`_
: Assign the value to ref. : Assign the value to input.
`broadcast_to(...) <dragon/broadcast_to.html>`_ `broadcast_to(...) <dragon/broadcast_to.html>`_
: Broadcast input according to a given shape. : Broadcast input according to a given shape.
...@@ -46,10 +46,10 @@ dragon ...@@ -46,10 +46,10 @@ dragon
: Concatenate the inputs along the given axis. : Concatenate the inputs along the given axis.
`constant(...) <dragon/constant.html>`_ `constant(...) <dragon/constant.html>`_
: Return a tensor taking the value content. : Return a tensor initialized from the value.
`copy(...) <dragon/copy.html>`_ `copy(...) <dragon/copy.html>`_
: Copy the value to ref. : Copy the input.
`create_function(...) <dragon/create_function.html>`_ `create_function(...) <dragon/create_function.html>`_
: Create a callable graph from the specified outputs. : Create a callable graph from the specified outputs.
...@@ -70,10 +70,10 @@ dragon ...@@ -70,10 +70,10 @@ dragon
: Return a tensor constructed as the identity matrix. : Return a tensor constructed as the identity matrix.
`eye_like(...) <dragon/eye_like.html>`_ `eye_like(...) <dragon/eye_like.html>`_
:Return a tensor shaping like another constructed as the identity matrix. :Return a tensor of identity matrix with shape as the other.
`fill(...) <dragon/fill.html>`_ `fill(...) <dragon/fill.html>`_
: Return a tensor filled with the specific value. : Return a tensor filled with the scalar value.
`flatten(...) <dragon/flatten.html>`_ `flatten(...) <dragon/flatten.html>`_
: Flatten the input along the given axes. : Flatten the input along the given axes.
...@@ -97,22 +97,22 @@ dragon ...@@ -97,22 +97,22 @@ dragon
: Load a shared library. : Load a shared library.
`masked_assign(...) <dragon/masked_assign.html>`_ `masked_assign(...) <dragon/masked_assign.html>`_
: Assign the value to ref where mask is **1**. : Assign the value to input where mask is 1.
`masked_select(...) <dragon/masked_select.html>`_ `masked_select(...) <dragon/masked_select.html>`_
: Select the elements where the given mask is **1**. : Select the elements of input where mask is 1.
`name_scope(...) <dragon/name_scope.html>`_ `name_scope(...) <dragon/name_scope.html>`_
: Context-manager to nest the name as prefix for operations. : Context-manager to nest the name as prefix for operations.
`nonzero(...) <dragon/nonzero.html>`_ `nonzero(...) <dragon/nonzero.html>`_
: Return the indices of non-zero elements. : Return the index of non-zero elements.
`ones(...) <dragon/ones.html>`_ `ones(...) <dragon/ones.html>`_
: Return a tensor filled with ones. : Return a tensor filled with ones.
`ones_like(...) <dragon/ones_like.html>`_ `ones_like(...) <dragon/ones_like.html>`_
: Return a tensor shaping like another filled with ones. : Return a tensor of ones with shape as the other.
`one_hot(...) <dragon/one_hot.html>`_ `one_hot(...) <dragon/one_hot.html>`_
: Return the one-hot representation for input. : Return the one-hot representation for input.
...@@ -163,7 +163,7 @@ dragon ...@@ -163,7 +163,7 @@ dragon
: Return a tensor filled with zeros. : Return a tensor filled with zeros.
`zeros_like(...) <dragon/zeros_like.html>`_ `zeros_like(...) <dragon/zeros_like.html>`_
: Return a tensor shaping like another filled with zeros. : Return a tensor of zeros with shape as the other.
.. toctree:: .. toctree::
:hidden: :hidden:
......
...@@ -92,10 +92,6 @@ __add__ ...@@ -92,10 +92,6 @@ __add__
####### #######
.. automethod:: dragon.EagerTensor.__add__ .. automethod:: dragon.EagerTensor.__add__
__div__
#######
.. automethod:: dragon.EagerTensor.__div__
__float__ __float__
######### #########
.. automethod:: dragon.EagerTensor.__float__ .. automethod:: dragon.EagerTensor.__float__
...@@ -116,10 +112,6 @@ __iadd__ ...@@ -116,10 +112,6 @@ __iadd__
######## ########
.. automethod:: dragon.EagerTensor.__iadd__ .. automethod:: dragon.EagerTensor.__iadd__
__idiv__
########
.. automethod:: dragon.EagerTensor.__idiv__
__imul__ __imul__
######## ########
.. automethod:: dragon.EagerTensor.__imul__ .. automethod:: dragon.EagerTensor.__imul__
...@@ -132,6 +124,10 @@ __isub__ ...@@ -132,6 +124,10 @@ __isub__
######## ########
.. automethod:: dragon.EagerTensor.__isub__ .. automethod:: dragon.EagerTensor.__isub__
__itruediv__
############
.. automethod:: dragon.EagerTensor.__itruediv__
__le__ __le__
###### ######
.. automethod:: dragon.EagerTensor.__le__ .. automethod:: dragon.EagerTensor.__le__
...@@ -152,10 +148,6 @@ __radd__ ...@@ -152,10 +148,6 @@ __radd__
######## ########
.. automethod:: dragon.EagerTensor.__radd__ .. automethod:: dragon.EagerTensor.__radd__
__rdiv__
########
.. automethod:: dragon.EagerTensor.__rdiv__
__rmul__ __rmul__
######## ########
.. automethod:: dragon.EagerTensor.__rmul__ .. automethod:: dragon.EagerTensor.__rmul__
...@@ -164,6 +156,10 @@ __rsub__ ...@@ -164,6 +156,10 @@ __rsub__
######## ########
.. automethod:: dragon.EagerTensor.__rsub__ .. automethod:: dragon.EagerTensor.__rsub__
__rtruediv__
############
.. automethod:: dragon.EagerTensor.__rtruediv__
__setitem__ __setitem__
########### ###########
.. automethod:: dragon.EagerTensor.__setitem__ .. automethod:: dragon.EagerTensor.__setitem__
...@@ -172,18 +168,13 @@ __sub__ ...@@ -172,18 +168,13 @@ __sub__
####### #######
.. automethod:: dragon.EagerTensor.__sub__ .. automethod:: dragon.EagerTensor.__sub__
__truediv__
###########
.. automethod:: dragon.EagerTensor.__truediv__
.. _dragon.assign(...): assign.html .. _dragon.assign(...): assign.html
.. _dragon.cast(...): cast.html .. _dragon.cast(...): cast.html
.. _dragon.copy(...): copy.html .. _dragon.copy(...): copy.html
.. _dragon.math.add(...): math/add.html
.. _dragon.math.div(...): math/div.html
.. _dragon.math.greater(...): math/greater.html
.. _dragon.math.greater_equal(...): math/greater_equal.html
.. _dragon.math.less(...): math/less.html
.. _dragon.math.less_equal(...): math/less_equal.html
.. _dragon.math.mul(...): math/mul.html
.. _dragon.math.negative(...): math/negative.html
.. _dragon.math.sub(...): math/sub.html
.. _dragon.masked_assign(...): masked_assign.html .. _dragon.masked_assign(...): masked_assign.html
.. _dragon.masked_select(...): masked_select.html .. _dragon.masked_select(...): masked_select.html
.. _dragon.reshape(...): reshape.html .. _dragon.reshape(...): reshape.html
......
...@@ -65,10 +65,6 @@ normal ...@@ -65,10 +65,6 @@ normal
###### ######
.. automethod:: dragon.Tensor.normal .. automethod:: dragon.Tensor.normal
placeholder
###########
.. automethod:: dragon.Tensor.placeholder
reshape reshape
####### #######
.. automethod:: dragon.Tensor.reshape .. automethod:: dragon.Tensor.reshape
...@@ -85,10 +81,6 @@ uniform ...@@ -85,10 +81,6 @@ uniform
####### #######
.. automethod:: dragon.Tensor.uniform .. automethod:: dragon.Tensor.uniform
variable
########
.. automethod:: dragon.Tensor.variable
Overrides Overrides
--------- ---------
...@@ -96,10 +88,6 @@ __add__ ...@@ -96,10 +88,6 @@ __add__
####### #######
.. automethod:: dragon.Tensor.__add__ .. automethod:: dragon.Tensor.__add__
__div__
#######
.. automethod:: dragon.Tensor.__div__
__float__ __float__
######### #########
.. automethod:: dragon.Tensor.__float__ .. automethod:: dragon.Tensor.__float__
...@@ -140,10 +128,6 @@ __radd__ ...@@ -140,10 +128,6 @@ __radd__
######## ########
.. automethod:: dragon.Tensor.__radd__ .. automethod:: dragon.Tensor.__radd__
__rdiv__
########
.. automethod:: dragon.Tensor.__rdiv__
__rmul__ __rmul__
######## ########
.. automethod:: dragon.Tensor.__rmul__ .. automethod:: dragon.Tensor.__rmul__
...@@ -160,24 +144,21 @@ __sub__ ...@@ -160,24 +144,21 @@ __sub__
####### #######
.. automethod:: dragon.Tensor.__sub__ .. automethod:: dragon.Tensor.__sub__
__rtruediv__
############
.. automethod:: dragon.Tensor.__rtruediv__
__truediv__
############
.. automethod:: dragon.Tensor.__truediv__
.. _dragon.assign(...): assign.html .. _dragon.assign(...): assign.html
.. _dragon.cast(...): cast.html .. _dragon.cast(...): cast.html
.. _dragon.copy(...): copy.html .. _dragon.copy(...): copy.html
.. _dragon.math.add(...): math/add.html
.. _dragon.math.div(...): math/div.html
.. _dragon.math.greater(...): math/greater.html
.. _dragon.math.greater_equal(...): math/greater_equal.html
.. _dragon.math.less(...): math/less.html
.. _dragon.math.less_equal(...): math/less_equal.html
.. _dragon.math.mul(...): math/mul.html
.. _dragon.math.negative(...): math/negative.html
.. _dragon.math.sub(...): math/sub.html
.. _dragon.masked_assign(...): masked_assign.html .. _dragon.masked_assign(...): masked_assign.html
.. _dragon.masked_select(...): masked_select.html .. _dragon.masked_select(...): masked_select.html
.. _dragon.reshape(...): reshape.html .. _dragon.reshape(...): reshape.html
.. _dragon.slice(...): slice.html .. _dragon.slice(...): slice.html
.. _dragon.workspace.feed_tensor(...): workspace/feed_tensor.html
.. _dragon.workspace.fetch_tensor(...): workspace/fetch_tensor.html
.. raw:: html .. raw:: html
......
...@@ -13,13 +13,13 @@ dragon.random ...@@ -13,13 +13,13 @@ dragon.random
: Return a tensor initialized from the glorot uniform distribution. : Return a tensor initialized from the glorot uniform distribution.
`multinomial(...) <random/multinomial.html>`_ `multinomial(...) <random/multinomial.html>`_
: Return a tensor with indices sampled from multinomial distribution. : Return a tensor with indices sampled from the multinomial distribution.
`normal(...) <random/normal.html>`_ `normal(...) <random/normal.html>`_
: Return a tensor initialized from the normal distribution. : Return a tensor initialized from the normal distribution.
`normal_like(...) <random/normal_like.html>`_ `normal_like(...) <random/normal_like.html>`_
: Return a tensor shaping like another initialized from the normal distribution. : Return a tensor initialized from the normal distribution with shape as the other.
`set_seed(...) <random/set_seed.html>`_ `set_seed(...) <random/set_seed.html>`_
: Set the global random seed. : Set the global random seed.
...@@ -31,7 +31,7 @@ dragon.random ...@@ -31,7 +31,7 @@ dragon.random
: Return a tensor initialized from the uniform distribution. : Return a tensor initialized from the uniform distribution.
`uniform_like(...) <random/uniform_like.html>`_ `uniform_like(...) <random/uniform_like.html>`_
: Return a tensor shaping like another initialized from the uniform distribution. : Return a tensor initialized from the uniform distribution with shape as the other.
.. toctree:: .. toctree::
:hidden: :hidden:
......
...@@ -71,13 +71,10 @@ TensorFlow ...@@ -71,13 +71,10 @@ TensorFlow
* `tensorflow <tensorflow.html>`_ * `tensorflow <tensorflow.html>`_
* `tensorflow.bitwise <tensorflow/bitwise.html>`_ * `tensorflow.bitwise <tensorflow/bitwise.html>`_
* `tensorflow.dtypes <tensorflow/dtypes.html>`_ * `tensorflow.dtypes <tensorflow/dtypes.html>`_
* `tensorflow.initializers <tensorflow/initializers.html>`_
* `tensorflow.keras <tensorflow/keras.html>`_ * `tensorflow.keras <tensorflow/keras.html>`_
* `tensorflow.linalg <tensorflow/linalg.html>`_ * `tensorflow.linalg <tensorflow/linalg.html>`_
* `tensorflow.losses <tensorflow/losses.html>`_
* `tensorflow.math <tensorflow/math.html>`_ * `tensorflow.math <tensorflow/math.html>`_
* `tensorflow.nn <tensorflow/nn.html>`_ * `tensorflow.nn <tensorflow/nn.html>`_
* `tensorflow.optimizers <tensorflow/optimizers.html>`_
* `tensorflow.random <tensorflow/random.html>`_ * `tensorflow.random <tensorflow/random.html>`_
TensorLayer TensorLayer
...@@ -233,27 +230,18 @@ Modules ...@@ -233,27 +230,18 @@ Modules
`Module vm.tensorflow.dtypes <tensorflow/dtypes.html>`_ `Module vm.tensorflow.dtypes <tensorflow/dtypes.html>`_
: Virtual API for ``tensorflow.dtypes`` namespace. : Virtual API for ``tensorflow.dtypes`` namespace.
`Module vm.tensorflow.initializers <tensorflow/initializers.html>`_
: Virtual API for ``tensorflow.initializers`` namespace.
`Module vm.tensorflow.keras <tensorflow/keras.html>`_ `Module vm.tensorflow.keras <tensorflow/keras.html>`_
: Virtual API for ``tensorflow.keras`` namespace. : Virtual API for ``tensorflow.keras`` namespace.
`Module vm.tensorflow.linalg <tensorflow/linalg.html>`_ `Module vm.tensorflow.linalg <tensorflow/linalg.html>`_
: Virtual API for ``tensorflow.linalg`` namespace. : Virtual API for ``tensorflow.linalg`` namespace.
`Module vm.tensorflow.losses <tensorflow/losses.html>`_
: Virtual API for ``tensorflow.losses`` namespace.
`Module vm.tensorflow.math <tensorflow/math.html>`_ `Module vm.tensorflow.math <tensorflow/math.html>`_
: Virtual API for ``tensorflow.math`` namespace. : Virtual API for ``tensorflow.math`` namespace.
`Module vm.tensorflow.nn <tensorflow/nn.html>`_ `Module vm.tensorflow.nn <tensorflow/nn.html>`_
: Virtual API for ``tensorflow.nn`` namespace. : Virtual API for ``tensorflow.nn`` namespace.
`Module vm.tensorflow.optimizers <tensorflow/optimizers.html>`_
: Virtual API for ``tensorflow.optimizers`` namespace.
`Module vm.tensorflow.random <tensorflow/random.html>`_ `Module vm.tensorflow.random <tensorflow/random.html>`_
: Virtual API for ``tensorflow.random`` namespace. : Virtual API for ``tensorflow.random`` namespace.
...@@ -325,13 +313,10 @@ Modules ...@@ -325,13 +313,10 @@ Modules
tensorflow tensorflow
tensorflow/bitwise tensorflow/bitwise
tensorflow/dtypes tensorflow/dtypes
tensorflow/initializers
tensorflow/keras tensorflow/keras
tensorflow/linalg tensorflow/linalg
tensorflow/losses
tensorflow/math tensorflow/math
tensorflow/nn tensorflow/nn
tensorflow/optimizers
tensorflow/random tensorflow/random
tensorlayer/initializers tensorlayer/initializers
tensorlayer/layers tensorlayer/layers
......
...@@ -28,7 +28,7 @@ vm.tensorflow ...@@ -28,7 +28,7 @@ vm.tensorflow
: Concatenate the values along the given axis. : Concatenate the values along the given axis.
`constant(...) <tensorflow/constant.html>`_ `constant(...) <tensorflow/constant.html>`_
: Return a tensor taking the value content. : Return a tensor initialized from the value.
`device(...) <tensorflow/device.html>`_ `device(...) <tensorflow/device.html>`_
: Context-manager to nest the the device spec. : Context-manager to nest the the device spec.
...@@ -40,7 +40,7 @@ vm.tensorflow ...@@ -40,7 +40,7 @@ vm.tensorflow
: Return a tensor constructed as the identity matrix. : Return a tensor constructed as the identity matrix.
`fill(...) <tensorflow/fill.html>`_ `fill(...) <tensorflow/fill.html>`_
: Return a tensor filled with the specific value. : Return a tensor filled with the scalar value.
`gather(...) <tensorflow/gather.html>`_ `gather(...) <tensorflow/gather.html>`_
: Select the elements according to the indices along the given axis. : Select the elements according to the indices along the given axis.
...@@ -61,16 +61,16 @@ vm.tensorflow ...@@ -61,16 +61,16 @@ vm.tensorflow
: Return a tensor filled with ones. : Return a tensor filled with ones.
`ones_like(...) <tensorflow/ones_like.html>`_ `ones_like(...) <tensorflow/ones_like.html>`_
: Return a tensor shaping like another filled with ones. : Return a tensor of ones with shape as the other.
`one_hot(...) <tensorflow/one_hot.html>`_ `one_hot(...) <tensorflow/one_hot.html>`_
: Return the one-hot representation from indices. : Return the one-hot representation for input.
`pad(...) <tensorflow/pad.html>`_ `pad(...) <tensorflow/pad.html>`_
: Pad the input according to the given sizes. : Pad the input according to the given sizes.
`range(...) <tensorflow/range.html>`_ `range(...) <tensorflow/range.html>`_
: Return a tensor with evenly spaced values within a interval. : Return a tensor of evenly spaced values within a interval.
`reshape(...) <tensorflow/reshape.html>`_ `reshape(...) <tensorflow/reshape.html>`_
: Change the dimensions of input. : Change the dimensions of input.
...@@ -94,7 +94,7 @@ vm.tensorflow ...@@ -94,7 +94,7 @@ vm.tensorflow
: Return a tensor filled with zeros. : Return a tensor filled with zeros.
`zeros_like(...) <tensorflow/zeros_like.html>`_ `zeros_like(...) <tensorflow/zeros_like.html>`_
: Return a tensor shaping like another filled with zeros. : Return a tensor of zeros with shape as the other.
.. toctree:: .. toctree::
:hidden: :hidden:
......
vm.tensorflow.initializers
==========================
.. only:: html
Classes
-------
`class Constant <keras/initializers/Constant.html>`_
: Fill tensors with a scalar.
`class GlorotNormal <keras/initializers/GlorotNormal.html>`_
: Fill tensors according to a glorot normal distribution.
`class GlorotUniform <keras/initializers/GlorotUniform.html>`_
: Fill tensors according to a glorot uniform distribution.
`class Initializer <keras/initializers/GlorotUniform.html>`_
: The basic Initializer.
`class Ones <keras/initializers/Ones.html>`_
: Fill tensors with ones.
`class RandomNormal <keras/initializers/RandomNormal.html>`_
: Fill tensors according to a random normal distribution.
`class RandomUniform <keras/initializers/RandomUniform.html>`_
: Fill tensors according to a random uniform distribution.
`class TruncatedNormal <keras/initializers/TruncatedNormal.html>`_
: Fill tensors according to a truncated normal distribution.
`class VarianceScaling <keras/initializers/VarianceScaling.html>`_
: Fill tensors with the random values adapting to shape.
`class Zeros <keras/initializers/Zeros.html>`_
: Fill tensors with zeros.
.. raw:: html
<style>
h1:before {
content: "Module: dragon.";
color: #103d3e;
}
</style>
...@@ -7,31 +7,31 @@ initializers ...@@ -7,31 +7,31 @@ initializers
------- -------
`class Constant <initializers/Constant.html>`_ `class Constant <initializers/Constant.html>`_
: Fill tensors with a scalar. : Fill tensor with a scalar value.
`class GlorotNormal <initializers/GlorotNormal.html>`_ `class GlorotNormal <initializers/GlorotNormal.html>`_
: Fill tensors according to a glorot normal distribution. : Fill tensor from a glorot normal distribution.
`class GlorotUniform <initializers/GlorotUniform.html>`_ `class GlorotUniform <initializers/GlorotUniform.html>`_
: Fill tensors according to a glorot uniform distribution. : Fill tensor from a glorot uniform distribution.
`class Initializer <initializers/GlorotUniform.html>`_ `class Initializer <initializers/GlorotUniform.html>`_
: The basic Initializer. : The basic Initializer.
`class Ones <initializers/Ones.html>`_ `class Ones <initializers/Ones.html>`_
: Fill tensors with ones. : Fill tensor with ones.
`class RandomNormal <initializers/RandomNormal.html>`_ `class RandomNormal <initializers/RandomNormal.html>`_
: Fill tensors according to a random normal distribution. : Fill tensor from a normal distribution.
`class RandomUniform <initializers/RandomUniform.html>`_ `class RandomUniform <initializers/RandomUniform.html>`_
: Fill tensors according to a random uniform distribution. : Fill tensor from an uniform distribution.
`class TruncatedNormal <initializers/TruncatedNormal.html>`_ `class TruncatedNormal <initializers/TruncatedNormal.html>`_
: Fill tensors according to a truncated normal distribution. : Fill tensor from a truncated normal distribution.
`class VarianceScaling <initializers/VarianceScaling.html>`_ `class VarianceScaling <initializers/VarianceScaling.html>`_
: Fill tensors with the random values adapting to shape. : Fill tensor from a scaled random distribution.
`class Zeros <initializers/Zeros.html>`_ `class Zeros <initializers/Zeros.html>`_
: Fill tensors with zeros. : Fill tensors with zeros.
......
...@@ -7,18 +7,18 @@ optimizers ...@@ -7,18 +7,18 @@ optimizers
------- -------
`class Adam <optimizers/Adam.html>`_ `class Adam <optimizers/Adam.html>`_
: The optimizer which implements Adam algorithm. : The optimizer to apply Adam algorithm.
`[Kingma & Ba, 2014] <https://arxiv.org/abs/1412.6980>`_. `[Kingma & Ba, 2014] <https://arxiv.org/abs/1412.6980>`_.
`class Optimizer <optimizers/Optimizer.html>`_ `class Optimizer <optimizers/Optimizer.html>`_
: The base class for optimizers. : The base class for optimizers.
`class RMSprop <optimizers/RMSprop.html>`_ `class RMSprop <optimizers/RMSprop.html>`_
: The optimizer which implements RMSprop algorithm. : The optimizer to apply RMSprop algorithm.
`[Hinton et.al, 2013] <http://www.cs.utoronto.ca/~bonner/courses/2016s/csc321/lectures/lec6.pdf>`_. `[Hinton et.al, 2013] <http://www.cs.utoronto.ca/~bonner/courses/2016s/csc321/lectures/lec6.pdf>`_.
`class SGD <optimizers/SGD.html>`_ `class SGD <optimizers/SGD.html>`_
: The optimizer which implements SGD algorithm. : The optimizer to apply SGD algorithm.
.. toctree:: .. toctree::
:hidden: :hidden:
......
vm.tensorflow.losses
====================
.. only:: html
Classes
-------
`class BinaryCrossentropy <keras/losses/BinaryCrossentropy.html>`_
: A criterion to compute the binary cross entropy with contiguous targets.
`class CategoricalCrossentropy <keras/losses/CategoricalCrossentropy.html>`_
: A criterion to compute the categorical cross entropy with contiguous targets.
`class Loss <keras/losses/Loss.html>`_
: The base class for loss criterion.
`class MeanAbsoluteError <keras/losses/MeanAbsoluteError.html>`_
: A criterion to compute the reduced element-wise absolute value difference.
`class MeanSquaredError <keras/losses/MeanSquaredError.html>`_
: A criterion to compute the reduced element-wise squared error.
`class SparseCategoricalCrossentropy <keras/losses/SparseCategoricalCrossentropy.html>`_
: A criterion to compute the categorical cross entropy with sparse labels.
Functions
---------
`binary_crossentropy(...) <keras/losses/binary_crossentropy.html>`_
: Compute the binary cross entropy with contiguous targets.
`categorical_crossentropy(...) <keras/losses/categorical_crossentropy.html>`_
: Compute the categorical cross entropy with contiguous targets.
`mean_absolute_error(...) <keras/losses/mean_absolute_error.html>`_
: Compute the reduced element-wise absolute value difference.
`mean_squared_error(...) <keras/losses/mean_squared_error.html>`_
: Compute the reduced element-wise squared error.
`sparse_categorical_crossentropy(...) <keras/losses/sparse_categorical_crossentropy.html>`_
: Compute the categorical cross entropy with sparse labels.
.. raw:: html
<style>
h1:before {
content: "Module: dragon.";
color: #103d3e;
}
</style>
vm.tensorflow.optimizers
========================
.. only:: html
Classes
-------
`class Adam <keras/optimizers/Adam.html>`_
: The optimizer which implements Adam algorithm.
`[Kingma & Ba, 2014] <https://arxiv.org/abs/1412.6980>`_.
`class Optimizer <keras/optimizers/Optimizer.html>`_
: The base class for optimizers.
`class RMSprop <keras/optimizers/RMSprop.html>`_
: The optimizer which implements RMSprop algorithm.
`[Hinton et.al, 2013] <http://www.cs.utoronto.ca/~bonner/courses/2016s/csc321/lectures/lec6.pdf>`_.
`class SGD <keras/optimizers/SGD.html>`_
: The optimizer which implements SGD algorithm.
.. raw:: html
<style>
h1:before {
content: "Module: dragon.";
color: #103d3e;
}
</style>
...@@ -7,31 +7,31 @@ vm.tensorlayer.initializers ...@@ -7,31 +7,31 @@ vm.tensorlayer.initializers
------- -------
`class Constant <initializers/Constant.html>`_ `class Constant <initializers/Constant.html>`_
: Fill tensors with a scalar. : Fill tensor with a scalar value.
`class GlorotNormal <initializers/GlorotNormal.html>`_ `class GlorotNormal <initializers/GlorotNormal.html>`_
: Fill tensors according to a glorot normal distribution. : Fill tensor from a glorot normal distribution.
`class GlorotUniform <initializers/GlorotUniform.html>`_ `class GlorotUniform <initializers/GlorotUniform.html>`_
: Fill tensors according to a glorot uniform distribution. : Fill tensor from a glorot uniform distribution.
`class Initializer <initializers/GlorotUniform.html>`_ `class Initializer <initializers/GlorotUniform.html>`_
: The basic Initializer. : The basic Initializer.
`class Ones <initializers/Ones.html>`_ `class Ones <initializers/Ones.html>`_
: Fill tensors with ones. : Fill tensor with ones.
`class RandomNormal <initializers/RandomNormal.html>`_ `class RandomNormal <initializers/RandomNormal.html>`_
: Fill tensors according to a random normal distribution. : Fill tensor from a normal distribution.
`class RandomUniform <initializers/RandomUniform.html>`_ `class RandomUniform <initializers/RandomUniform.html>`_
: Fill tensors according to a random uniform distribution. : Fill tensor from an uniform distribution.
`class TruncatedNormal <initializers/TruncatedNormal.html>`_ `class TruncatedNormal <initializers/TruncatedNormal.html>`_
: Fill tensors according to a truncated normal distribution. : Fill tensor from a truncated normal distribution.
`class Zeros <initializers/Zeros.html>`_ `class Zeros <initializers/Zeros.html>`_
: Fill tensors with zeros. : Fill tensor with zeros.
.. toctree:: .. toctree::
:hidden: :hidden:
......
...@@ -34,7 +34,7 @@ vm.torch ...@@ -34,7 +34,7 @@ vm.torch
: Compute the element-wise addition. : Compute the element-wise addition.
`arange(...) <torch/arange.html>`_ `arange(...) <torch/arange.html>`_
: Return a tensor with evenly spaced values within a interval. : Return a tensor of evenly spaced values within a interval.
`argmax(...) <torch/argmax.html>`_ `argmax(...) <torch/argmax.html>`_
: Return the indices of maximum elements along the given axis. : Return the indices of maximum elements along the given axis.
...@@ -163,14 +163,11 @@ vm.torch ...@@ -163,14 +163,11 @@ vm.torch
`nonzero(...) <torch/nonzero.html>`_ `nonzero(...) <torch/nonzero.html>`_
: Return the indices of non-zero elements. : Return the indices of non-zero elements.
`normal(...) <torch/normal.html>`_
: Return a tensor with a normal distribution.
`ones(...) <torch/ones.html>`_ `ones(...) <torch/ones.html>`_
: Return a tensor with value 1 filled. : Return a tensor filled with ones.
`ones_like(...) <torch/ones_like.html>`_ `ones_like(...) <torch/ones_like.html>`_
: Return a tensor with value 1 filled, shape as input. : Return a tensor of ones with shape as the other.
`one_hot(...) <torch/one_hot.html>`_ `one_hot(...) <torch/one_hot.html>`_
: Return the one-hot representation for input. : Return the one-hot representation for input.
...@@ -182,10 +179,10 @@ vm.torch ...@@ -182,10 +179,10 @@ vm.torch
: Compute the power of input. : Compute the power of input.
`rand(...) <torch/rand.html>`_ `rand(...) <torch/rand.html>`_
: Return a float tensor with a uniform distribution of U(0, 1). : Return a tensor from the uniform distribution of U(0, 1).
`randn(...) <torch/randn.html>`_ `randn(...) <torch/randn.html>`_
: Return a float tensor with a normal distribution of N(0, 1). : Return a tensor from the normal distribution of N(0, 1).
`reciprocal(...) <torch/reciprocal.html>`_ `reciprocal(...) <torch/reciprocal.html>`_
: Compute the reciprocal of input. : Compute the reciprocal of input.
...@@ -235,9 +232,6 @@ vm.torch ...@@ -235,9 +232,6 @@ vm.torch
`topk_acc(...) <torch/topk_acc.html>`_ `topk_acc(...) <torch/topk_acc.html>`_
: Compute the top-k accuracy according to the label. : Compute the top-k accuracy according to the label.
`uniform(...) <torch/uniform.html>`_
: Return a tensor with a normal distribution.
`unsqueeze(...) <torch/unsqueeze.html>`_ `unsqueeze(...) <torch/unsqueeze.html>`_
: Expand the dimensions of input with size 1. : Expand the dimensions of input with size 1.
...@@ -245,10 +239,10 @@ vm.torch ...@@ -245,10 +239,10 @@ vm.torch
: Select the elements from two branches under the condition. : Select the elements from two branches under the condition.
`zeros(...) <torch/zeros.html>`_ `zeros(...) <torch/zeros.html>`_
: Return a tensor with value 0 filled. : Return a tensor filled with zeros.
`zeros_like(...) <torch/zeros_like.html>`_ `zeros_like(...) <torch/zeros_like.html>`_
: Return a tensor with value 0s filled, shape as input. : Return a tensor of zeros with shape as the other.
.. toctree:: .. toctree::
:hidden: :hidden:
...@@ -301,7 +295,6 @@ vm.torch ...@@ -301,7 +295,6 @@ vm.torch
torch/ne torch/ne
torch/no_grad torch/no_grad
torch/nonzero torch/nonzero
torch/normal
torch/ones torch/ones
torch/ones_like torch/ones_like
torch/one_hot torch/one_hot
...@@ -328,7 +321,6 @@ vm.torch ...@@ -328,7 +321,6 @@ vm.torch
torch/tensor torch/tensor
torch/topk torch/topk
torch/topk_acc torch/topk_acc
torch/uniform
torch/unsqueeze torch/unsqueeze
torch/where torch/where
torch/zeros_like torch/zeros_like
......
...@@ -26,6 +26,10 @@ id ...@@ -26,6 +26,10 @@ id
### ###
.. autoattribute:: dragon.vm.torch.Tensor.id .. autoattribute:: dragon.vm.torch.Tensor.id
is_leaf
#######
.. autoattribute:: dragon.vm.torch.Tensor.is_leaf
requires_grad requires_grad
############# #############
.. autoattribute:: dragon.vm.torch.Tensor.requires_grad .. autoattribute:: dragon.vm.torch.Tensor.requires_grad
...@@ -341,6 +345,10 @@ reshape\_ ...@@ -341,6 +345,10 @@ reshape\_
######### #########
.. automethod:: dragon.vm.torch.Tensor.reshape_ .. automethod:: dragon.vm.torch.Tensor.reshape_
retain_grad
###########
.. automethod:: dragon.vm.torch.Tensor.retain_grad
round round
##### #####
.. automethod:: dragon.vm.torch.Tensor.round .. automethod:: dragon.vm.torch.Tensor.round
......
normal
======
.. autofunction:: dragon.vm.torch.normal
.. raw:: html
<style>
h1:before {
content: "torch.";
color: #103d3e;
}
</style>
...@@ -7,17 +7,17 @@ vm.torch.optim ...@@ -7,17 +7,17 @@ vm.torch.optim
------- -------
`class Adam <optim/Adam.html>`_ `class Adam <optim/Adam.html>`_
: The optimizer which implements Adam algorithm. : The optimizer to apply Adam algorithm.
`class Optimizer <optim/Optimizer.html>`_ `class Optimizer <optim/Optimizer.html>`_
: The base class of optimizers. : The base class of optimizers.
`class RMSprop <optim/RMSprop.html>`_ `class RMSprop <optim/RMSprop.html>`_
: The optimizer which implements RMSprop algorithm. : The optimizer to apply RMSprop algorithm.
`[Hinton et.al, 2013] <http://www.cs.utoronto.ca/~bonner/courses/2016s/csc321/lectures/lec6.pdf>`_. `[Hinton et.al, 2013] <http://www.cs.utoronto.ca/~bonner/courses/2016s/csc321/lectures/lec6.pdf>`_.
`class SGD <optim/SGD.html>`_ `class SGD <optim/SGD.html>`_
: The optimizer which implements SGD algorithm. : The optimizer to apply SGD algorithm.
.. toctree:: .. toctree::
:hidden: :hidden:
......
uniform
=======
.. autofunction:: dragon.vm.torch.uniform
.. raw:: html
<style>
h1:before {
content: "torch.";
color: #103d3e;
}
</style>
...@@ -255,7 +255,7 @@ class FunctionGuard(object): ...@@ -255,7 +255,7 @@ class FunctionGuard(object):
) )
shape = input_signature[i].shape shape = input_signature[i].shape
dtype = input_signature[i].dtype dtype = input_signature[i].dtype
inputs.append(Tensor(name, shape, dtype).variable()) inputs.append(Tensor(name, shape, dtype).constant())
with context.name_scope('${%d}' % id(self)), eager_context.graph_mode(): with context.name_scope('${%d}' % id(self)), eager_context.graph_mode():
returns = nest.flatten(self._python_function(*inputs)) returns = nest.flatten(self._python_function(*inputs))
outputs, dummies = [], [] outputs, dummies = [], []
......
...@@ -259,7 +259,7 @@ class Function(object): ...@@ -259,7 +259,7 @@ class Function(object):
""" """
self.outputs = [Tensor(name) for name in graph_def.output] self.outputs = [Tensor(name) for name in graph_def.output]
self.inputs = [Tensor(name).variable() for name in graph_def.input] self.inputs = [Tensor(name).constant() for name in graph_def.input]
# Fill with all known graph elements. # Fill with all known graph elements.
add_device_option(graph_def) add_device_option(graph_def)
...@@ -294,7 +294,7 @@ def create_function(inputs=None, outputs=None, givens=None, updater=None): ...@@ -294,7 +294,7 @@ def create_function(inputs=None, outputs=None, givens=None, updater=None):
Tensors that catch any operators can be used to create a graph: Tensors that catch any operators can be used to create a graph:
```python ```python
x = dragon.Tensor('x', dtype='float32').variable() x = dragon.Tensor('x', dtype='float32').constant()
y = x * 2 y = x * 2
f = dragon.create_function(outputs=y) f = dragon.create_function(outputs=y)
``` ```
...@@ -316,12 +316,12 @@ def create_function(inputs=None, outputs=None, givens=None, updater=None): ...@@ -316,12 +316,12 @@ def create_function(inputs=None, outputs=None, givens=None, updater=None):
Specify ``givens`` to substitute tensors before creating: Specify ``givens`` to substitute tensors before creating:
```python ```python
x = dragon.Tensor('x', dtype='float32').variable() x = dragon.Tensor('x', dtype='float32').constant()
y = x * 2 y = x * 2
foo = dragon.create_function(outputs=y) foo = dragon.create_function(outputs=y)
# "bar" takes "x2" as input, and also writes to "y" # "bar" takes "x2" as input, and also writes to "y"
x2 = dragon.Tensor('x2', dtype='float32').variable() x2 = dragon.Tensor('x2', dtype='float32').constant()
bar = dragon.create_function(outputs=y, givens={x: x2}) bar = dragon.create_function(outputs=y, givens={x: x2})
``` ```
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
# #
# ------------------------------------------------------------ # ------------------------------------------------------------
"""Define the symbolic tensor abstraction.""" """The graph executing tensor."""
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
...@@ -26,12 +26,7 @@ from dragon.core.util import nest ...@@ -26,12 +26,7 @@ from dragon.core.util import nest
class Tensor(types.TensorMetaclass): class Tensor(types.TensorMetaclass):
"""Tensor abstraction under the graph execution. """Tensor abstraction for graph executing."""
It is provided to construct operators symbolically,
while can also be a navigation to the storage.
"""
def __init__(self, name=None, shape=None, dtype=None): def __init__(self, name=None, shape=None, dtype=None):
"""Create a ``Tensor``. """Create a ``Tensor``.
...@@ -180,12 +175,12 @@ class Tensor(types.TensorMetaclass): ...@@ -180,12 +175,12 @@ class Tensor(types.TensorMetaclass):
return math_util.prod(self._shape) return math_util.prod(self._shape)
def astype(self, dtype, inplace=False): def astype(self, dtype, inplace=False):
"""Cast the data type to a specific one. """Cast to the specified data type.
Parameters Parameters
---------- ----------
dtype : str dtype : str
The specific data type. The data type to cast to.
inplace : bool, optional, default=False inplace : bool, optional, default=False
Whether to do the cast in-place. Whether to do the cast in-place.
...@@ -201,14 +196,14 @@ class Tensor(types.TensorMetaclass): ...@@ -201,14 +196,14 @@ class Tensor(types.TensorMetaclass):
""" """
def constant(self, value=0): def constant(self, value=0):
r"""Register as a variable with constant initializer. r"""Register self to initialize from a scalar value.
.. math:: \text{self} \leftarrow \text{value} .. math:: \text{self} \leftarrow \text{value}
Parameters Parameters
---------- ----------
value : number, optional, default=0 value : number, optional, default=0
The constant value. The value to initialize.
Returns Returns
------- -------
...@@ -219,7 +214,7 @@ class Tensor(types.TensorMetaclass): ...@@ -219,7 +214,7 @@ class Tensor(types.TensorMetaclass):
return self._register_as('constant', value=value) return self._register_as('constant', value=value)
def copy(self): def copy(self):
"""Return a tensor with containing data copied. """Return a tensor with data copied.
Returns Returns
------- -------
...@@ -228,29 +223,31 @@ class Tensor(types.TensorMetaclass): ...@@ -228,29 +223,31 @@ class Tensor(types.TensorMetaclass):
See Also See Also
-------- --------
`dragon.copy(...)`_ : Copy the value to ref. `dragon.copy(...)`_ : Copy the input.
""" """
def get_value(self): def get_value(self):
"""Copy the data from storage. """Return the value of implementation.
Returns Returns
------- -------
numpy.ndarray numpy.ndarray
The deep copied value. The deep-copied value.
""" """
def glorot_normal(self, scale=2.): def glorot_normal(self, mode='fan_in', scale=2.0):
r"""Register as a variable with glorot normal initializer. r"""Register self to initialize from a glorot uniform distribution.
.. math:: \text{self} \leftarrow N(0, \sqrt{\frac{scale}{\text{FanIn}}}) .. math:: \text{self} \sim \mathcal{N}(0, \sqrt{\frac{scale}{\text{fan}}})
Parameters Parameters
---------- ----------
scale : number, optional, default=2. mode : {'fan_in, 'fan_out', 'fan_avg'}, optional
The scale factor of distribution. The mode to compute fans.
scale : float, optional, default=2.0
The scale factor to distribution.
Returns Returns
------- -------
...@@ -258,20 +255,20 @@ class Tensor(types.TensorMetaclass): ...@@ -258,20 +255,20 @@ class Tensor(types.TensorMetaclass):
The self. The self.
""" """
return self._register_as('glorot_normal', scale=scale) return self._register_as('glorot_normal', mode=mode, scale=scale)
def glorot_uniform(self, scale=3.): def glorot_uniform(self, mode='fan_in', scale=3.0):
r"""Register as a variable with glorot uniform initializer. r"""Register self to initialize from a glorot uniform distribution.
.. math:: \text{self} \leftarrow U( .. math:: \text{self} \sim \mathcal{U}(-\sqrt{\frac{scale}{\text{fan}}},
-\sqrt{\frac{scale}{\text{FanIn}}}, \sqrt{\frac{scale}{\text{fan}}})
\sqrt{\frac{scale}{\text{FanIn}}}
)
Parameters Parameters
---------- ----------
scale : number, optional, default=3. mode : {'fan_in, 'fan_out', 'fan_avg'}, optional
The scale factor of distribution. The mode to compute fans.
scale : float, optional, default=3.0
The scale factor to distribution.
Returns Returns
------- -------
...@@ -279,19 +276,19 @@ class Tensor(types.TensorMetaclass): ...@@ -279,19 +276,19 @@ class Tensor(types.TensorMetaclass):
The self. The self.
""" """
return self._register_as('glorot_uniform', scale=scale) return self._register_as('glorot_uniform', mode=mode, scale=scale)
def normal(self, mean=0, std=1): def normal(self, mean=0, std=1):
r"""Register as a variable with normal initializer. r"""Register self to initialize from a normal distribution.
.. math:: \text{self} \leftarrow N(\mu, \sigma) .. math:: \text{self} \sim \mathcal{N}(\mu, \sigma)
Parameters Parameters
---------- ----------
mean : number, optional, default=0 mean : number, optional, default=0
The value of :math:`\mu`. The value to :math:`\mu`.
std : number, optional, default=1 std : number, optional, default=1
The value of :math:`\sigma`. The value to :math:`\sigma`.
Returns Returns
------- -------
...@@ -301,19 +298,6 @@ class Tensor(types.TensorMetaclass): ...@@ -301,19 +298,6 @@ class Tensor(types.TensorMetaclass):
""" """
return self._register_as('normal', mean=mean, std=std) return self._register_as('normal', mean=mean, std=std)
def placeholder(self):
r"""Register as a placeholder with zero initializer.
.. math:: \text{self} \leftarrow 0
Returns
-------
dragon.Tensor
The self.
"""
return self._register_as('placeholder')
def reshape(self, shape): def reshape(self, shape):
"""Return a tensor containing the same data with new shape. """Return a tensor containing the same data with new shape.
...@@ -334,12 +318,12 @@ class Tensor(types.TensorMetaclass): ...@@ -334,12 +318,12 @@ class Tensor(types.TensorMetaclass):
""" """
def set_value(self, value): def set_value(self, value):
"""Feed the const value to the storage. """Set value to the implementation.
Parameters Parameters
---------- ----------
value : array_like value : array_like
The const value. The value to set.
Returns Returns
------- -------
...@@ -349,16 +333,16 @@ class Tensor(types.TensorMetaclass): ...@@ -349,16 +333,16 @@ class Tensor(types.TensorMetaclass):
""" """
def truncated_normal(self, mean=0, std=1): def truncated_normal(self, mean=0, std=1):
r"""Register as a variable with truncated normal initializer. r"""Register self to initialize from a truncated normal distribution.
.. math:: \text{self} \leftarrow TN(\mu, \sigma, \mu - 2\sigma, \mu + 2\sigma) .. math:: \text{self} \sim \mathcal{TN}(\mu, \sigma, \mu - 2\sigma, \mu + 2\sigma)
Parameters Parameters
---------- ----------
mean : number, optional, default=0 mean : number, optional, default=0
The value of :math:`\mu`. The value to :math:`\mu`.
std : number, optional, default=1 std : number, optional, default=1
The value of :math:`\sigma`. The value to :math:`\sigma`.
Returns Returns
------- -------
...@@ -369,16 +353,16 @@ class Tensor(types.TensorMetaclass): ...@@ -369,16 +353,16 @@ class Tensor(types.TensorMetaclass):
return self._register_as('truncated_normal', mean=mean, std=std) return self._register_as('truncated_normal', mean=mean, std=std)
def uniform(self, low=0, high=1): def uniform(self, low=0, high=1):
r"""Register as a variable with uniform initializer. r"""Register self to initialize from an uniform distribution.
.. math:: \text{self} \leftarrow U(\alpha, \beta) .. math:: \text{self} \sim \mathcal{U}(\alpha, \beta)
Parameters Parameters
---------- ----------
low : number, optional, default=0 low : number, optional, default=0
The value of :math:`\alpha`. The value to :math:`\alpha`.
high : number, optional, default=1 high : number, optional, default=1
The value of :math:`\beta`. The value to :math:`\beta`.
Returns Returns
------- -------
...@@ -388,19 +372,6 @@ class Tensor(types.TensorMetaclass): ...@@ -388,19 +372,6 @@ class Tensor(types.TensorMetaclass):
""" """
return self._register_as('uniform', low=low, high=high) return self._register_as('uniform', low=low, high=high)
def variable(self):
r"""Register as a variable with zero initializer.
.. math:: self \leftarrow 0
Returns
-------
dragon.Tensor
The self.
"""
return self._register_as('variable')
@classmethod @classmethod
def convert_to(cls, value, dtype=None, name=None): def convert_to(cls, value, dtype=None, name=None):
"""Convert the given ``value`` to a ``dragon.Tensor``. """Convert the given ``value`` to a ``dragon.Tensor``.
...@@ -435,6 +406,7 @@ class Tensor(types.TensorMetaclass): ...@@ -435,6 +406,7 @@ class Tensor(types.TensorMetaclass):
"""Fill self with the specific type of filler.""" """Fill self with the specific type of filler."""
filler = dragon_pb2.FillerInfo() filler = dragon_pb2.FillerInfo()
filler.type = type.lower() filler.type = type.lower()
variance_norm = {'fan_in': 0, 'fan_out': 1, 'fan_avg': 2}
if filler.type == 'constant': if filler.type == 'constant':
filler.value = kwargs['value'] if 'value' in kwargs else 0 filler.value = kwargs['value'] if 'value' in kwargs else 0
elif filler.type in ['normal', 'gaussian']: elif filler.type in ['normal', 'gaussian']:
...@@ -451,15 +423,15 @@ class Tensor(types.TensorMetaclass): ...@@ -451,15 +423,15 @@ class Tensor(types.TensorMetaclass):
filler.high = filler.mean + 2.0 * filler.std filler.high = filler.mean + 2.0 * filler.std
elif filler.type in ['glorot_uniform', 'xavier']: elif filler.type in ['glorot_uniform', 'xavier']:
filler.scale = kwargs['scale'] if 'scale' in kwargs else 3 filler.scale = kwargs['scale'] if 'scale' in kwargs else 3
filler.variance_norm = variance_norm[kwargs.get('mode', 'fan_in')]
elif filler.type in ['glorot_normal', 'msra']: elif filler.type in ['glorot_normal', 'msra']:
filler.scale = kwargs['scale'] if 'scale' in kwargs else 2 filler.scale = kwargs['scale'] if 'scale' in kwargs else 2
filler.variance_norm = variance_norm[kwargs.get('mode', 'fan_in')]
workspace.get_workspace().create_tensor(self.name, filler) workspace.get_workspace().create_tensor(self.name, filler)
return self return self
def __add__(self, other): def __add__(self, other):
r"""Compute the element-wise addition. """Compute the element-wise addition.
.. math:: \text{out} = \text{self} + \text{other}
Parameters Parameters
---------- ----------
...@@ -469,33 +441,8 @@ class Tensor(types.TensorMetaclass): ...@@ -469,33 +441,8 @@ class Tensor(types.TensorMetaclass):
Returns Returns
------- -------
dragon.Tensor dragon.Tensor
The **y**.
See Also
--------
`dragon.math.add(...)`_ : Compute the element-wise addition.
"""
def __div__(self, other):
r"""Compute the element-wise division.
.. math:: \text{out} = \text{self} \div \text{other}
Parameters
----------
other : Union[dragon.Tensor, number]
The value to divide.
Returns
-------
dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.div(...)`_ : Compute the element-wise division.
""" """
def __float__(self): def __float__(self):
...@@ -510,9 +457,7 @@ class Tensor(types.TensorMetaclass): ...@@ -510,9 +457,7 @@ class Tensor(types.TensorMetaclass):
return float(self.get_value()) return float(self.get_value())
def __ge__(self, other): def __ge__(self, other):
r"""Compute element-wise greater-equal comparison. """Compute element-wise greater-equal comparison.
.. math:: \text{out} = (\text{self} \geq \text{other})
Parameters Parameters
---------- ----------
...@@ -524,39 +469,25 @@ class Tensor(types.TensorMetaclass): ...@@ -524,39 +469,25 @@ class Tensor(types.TensorMetaclass):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.greater_equal(...)`_ : Compute element-wise greater-equal comparison.
""" """
def __getitem__(self, item): def __getitem__(self, item):
"""Select the elements at the specific indices. """Select elements at the specific index.
Parameters Parameters
---------- ----------
item : Union[int, slice, dragon.Tensor] item : Union[int, slice, dragon.Tensor]
The indices. The index.
Returns Returns
------- -------
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.slice(...)`_ : Select the elements according to the given sections.
See Also
--------
`dragon.masked_select(...)`_ : Select the elements where the given mask is 1.
""" """
def __gt__(self, other): def __gt__(self, other):
r"""Compute element-wise greater comparison. """Compute element-wise greater comparison.
.. math:: \text{out} = (\text{self} > \text{other})
Parameters Parameters
---------- ----------
...@@ -568,30 +499,24 @@ class Tensor(types.TensorMetaclass): ...@@ -568,30 +499,24 @@ class Tensor(types.TensorMetaclass):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.greater(...)`_ : Compute element-wise greater comparison.
""" """
def __hash__(self): def __hash__(self):
return id(self) return id(self)
def __int__(self): def __int__(self):
"""Return a int python scalar. """Return an integer python scalar.
Returns Returns
------- -------
int int
The int value. The integer value.
""" """
return int(self.get_value()) return int(self.get_value())
def __le__(self, other): def __le__(self, other):
r"""Compute element-wise less-equal comparison. """Compute element-wise less-equal comparison.
.. math:: \text{out} = (\text{self} \leq \text{other})
Parameters Parameters
---------- ----------
...@@ -603,16 +528,10 @@ class Tensor(types.TensorMetaclass): ...@@ -603,16 +528,10 @@ class Tensor(types.TensorMetaclass):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.less_equal(...)`_ : Compute element-wise less-equal comparison.
""" """
def __lt__(self, other): def __lt__(self, other):
r"""Compute element-wise less comparison. """Compute element-wise less comparison.
.. math:: \text{out} = (\text{self} < \text{other})
Parameters Parameters
---------- ----------
...@@ -624,16 +543,10 @@ class Tensor(types.TensorMetaclass): ...@@ -624,16 +543,10 @@ class Tensor(types.TensorMetaclass):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.less(...)`_ : Compute element-wise less comparison.
""" """
def __mul__(self, other): def __mul__(self, other):
r"""Compute the element-wise multiplication. """Compute the element-wise multiplication.
.. math:: \text{out} = \text{self} \times \text{other}
Parameters Parameters
---------- ----------
...@@ -645,32 +558,20 @@ class Tensor(types.TensorMetaclass): ...@@ -645,32 +558,20 @@ class Tensor(types.TensorMetaclass):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.mul(...)`_ : Compute the element-wise multiplication.
""" """
def __neg__(self): def __neg__(self):
r"""Compute the element-wise negative. """Compute the element-wise negative.
.. math:: y = -x
Returns Returns
------- -------
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.negative(...)`_ : Compute the element-wise negative.
""" """
def __radd__(self, other): def __radd__(self, other):
r"""Compute the element-wise addition. """Compute the element-wise addition.
.. math:: \text{out} = \text{other} + \text{self}
Parameters Parameters
---------- ----------
...@@ -682,10 +583,6 @@ class Tensor(types.TensorMetaclass): ...@@ -682,10 +583,6 @@ class Tensor(types.TensorMetaclass):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.add(...)`_ : Compute the element-wise addition.
""" """
def __repr__(self): def __repr__(self):
...@@ -697,10 +594,8 @@ class Tensor(types.TensorMetaclass): ...@@ -697,10 +594,8 @@ class Tensor(types.TensorMetaclass):
return 'Tensor("{}", shape={}, dtype={})' \ return 'Tensor("{}", shape={}, dtype={})' \
.format(self.name, shape_str, self.dtype) .format(self.name, shape_str, self.dtype)
def __rdiv__(self, other): def __rtruediv__(self, other):
r"""Compute the element-wise division. """Compute the element-wise division.
.. math:: \text{out} = \text{other} \div \text{self}
Parameters Parameters
---------- ----------
...@@ -712,16 +607,10 @@ class Tensor(types.TensorMetaclass): ...@@ -712,16 +607,10 @@ class Tensor(types.TensorMetaclass):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.div(...)`_ : Compute the element-wise division.
""" """
def __rmul__(self, other): def __rmul__(self, other):
r"""Compute the element-wise multiplication. """Compute the element-wise multiplication.
.. math:: \text{out} = \text{other} \times \text{self}
Parameters Parameters
---------- ----------
...@@ -733,16 +622,10 @@ class Tensor(types.TensorMetaclass): ...@@ -733,16 +622,10 @@ class Tensor(types.TensorMetaclass):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.mul(...)`_ : Compute the element-wise multiplication.
""" """
def __rsub__(self, other): def __rsub__(self, other):
r"""Compute the element-wise subtraction. """Compute the element-wise subtraction.
.. math:: \text{out} = \text{other} - \text{self}
Parameters Parameters
---------- ----------
...@@ -754,36 +637,22 @@ class Tensor(types.TensorMetaclass): ...@@ -754,36 +637,22 @@ class Tensor(types.TensorMetaclass):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.sub(...)`_ : Compute the element-wise subtraction.
""" """
def __setitem__(self, key, value): def __setitem__(self, key, value):
"""Set the value at the specific indices. """Set elements at the specific index.
Parameters Parameters
---------- ----------
key : Union[int, slice, dragon.Tensor] key : Union[int, slice, dragon.Tensor]
The indices. The index.
value : number or dragon.Tensor value : Union[dragon.Tensor, number]
The value. The value to set.
See Also
--------
`dragon.assign(...)`_ : Assign the value to ref.
See Also
--------
`dragon.masked_assign(...)`_ : Assign the value to ref where mask is 1.
""" """
def __sub__(self, other): def __sub__(self, other):
r"""Compute the element-wise subtraction. """Compute the element-wise subtraction.
.. math:: \text{out} = \text{self} - \text{value}
Parameters Parameters
---------- ----------
...@@ -795,9 +664,20 @@ class Tensor(types.TensorMetaclass): ...@@ -795,9 +664,20 @@ class Tensor(types.TensorMetaclass):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also """
--------
`dragon.math.sub(...)`_ : Compute the element-wise subtraction. def __truediv__(self, other):
"""Compute the element-wise division.
Parameters
----------
other : Union[dragon.Tensor, number]
The value to divide.
Returns
-------
dragon.Tensor
The output tensor.
""" """
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
# #
# ------------------------------------------------------------ # ------------------------------------------------------------
"""Define the eager tensor abstraction.""" """The eager executing tensor."""
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
...@@ -23,10 +23,9 @@ from dragon.core.framework import workspace ...@@ -23,10 +23,9 @@ from dragon.core.framework import workspace
class EagerTensor(Tensor): class EagerTensor(Tensor):
"""Tensor abstraction under the eager execution. """Tensor abstraction for eager executing.
This abstraction involves the garbage collection, Examples:
thus, life-cycle should be considered to avoid memory leak.
```python ```python
# Create a tensor with shape and dtype # Create a tensor with shape and dtype
...@@ -173,12 +172,12 @@ class EagerTensor(Tensor): ...@@ -173,12 +172,12 @@ class EagerTensor(Tensor):
return self._impl.size return self._impl.size
def astype(self, dtype, inplace=False): def astype(self, dtype, inplace=False):
"""Cast the data type to a specific one. """Cast to the specified data type.
Parameters Parameters
---------- ----------
dtype : str dtype : str
The specific data type. The data type to cast to.
inplace : bool, optional, default=False inplace : bool, optional, default=False
Whether to do the cast in-place. Whether to do the cast in-place.
...@@ -194,14 +193,14 @@ class EagerTensor(Tensor): ...@@ -194,14 +193,14 @@ class EagerTensor(Tensor):
""" """
def constant(self, value=0): def constant(self, value=0):
r"""Fill self with a constant value. r"""Fill self from a scalar value.
.. math:: \text{self} \leftarrow \text{value} .. math:: \text{self} \leftarrow \text{value}
Parameters Parameters
---------- ----------
value : number, optional, default=0 value : number, optional, default=0
The constant value. The value to fill.
Returns Returns
------- -------
...@@ -211,7 +210,7 @@ class EagerTensor(Tensor): ...@@ -211,7 +210,7 @@ class EagerTensor(Tensor):
""" """
def copy(self): def copy(self):
"""Return a tensor with containing data copied. """Return a tensor with data copied.
Returns Returns
------- -------
...@@ -220,32 +219,32 @@ class EagerTensor(Tensor): ...@@ -220,32 +219,32 @@ class EagerTensor(Tensor):
See Also See Also
-------- --------
`dragon.copy(...)`_ : Copy the value to ref. `dragon.copy(...)`_ : Copy the input.
""" """
def get_value(self): def get_value(self):
"""Return the value from storage. """Return the value of implementation.
Returns Returns
------- -------
numpy.ndarray numpy.ndarray
The shallow copied value. The zero-copied value.
""" """
return self.numpy() return self.numpy()
def glorot_normal(self, mode='FAN_IN', scale=2.): def glorot_normal(self, mode='fan_in', scale=2.0):
r"""Fill self from a glorot normal distribution. r"""Fill self from a glorot normal distribution.
.. math:: \text{self} \leftarrow N(0, \sqrt{\frac{scale}{\text{FAN}}}) .. math:: \text{self} \sim \mathcal{N}(0, \sqrt{\frac{scale}{\text{fan}}})
Parameters Parameters
---------- ----------
mode : {'FAN_IN, 'FAN_OUT', 'FAN_AVG'}, optional mode : {'fan_in, 'fan_out', 'fan_avg'}, optional
The mode to compute fans. The mode to compute fans.
scale : number, optional, default=2. scale : float, optional, default=2.0
The scale factor of distribution. The scale factor to distribution.
Returns Returns
------- -------
...@@ -254,20 +253,18 @@ class EagerTensor(Tensor): ...@@ -254,20 +253,18 @@ class EagerTensor(Tensor):
""" """
def glorot_uniform(self, mode='FAN_IN', scale=3.): def glorot_uniform(self, mode='fan_in', scale=3.0):
r"""Fill self from a glorot uniform distribution. r"""Fill self from a glorot uniform distribution.
.. math:: \text{self} \leftarrow U( .. math:: \text{self} \sim \mathcal{U}(-\sqrt{\frac{scale}{\text{fan}}},
-\sqrt{\frac{scale}{\text{FAN}}}, \sqrt{\frac{scale}{\text{fan}}})
\sqrt{\frac{scale}{\text{FAN}}}
)
Parameters Parameters
---------- ----------
mode : {'FAN_IN, 'FAN_OUT', 'FAN_AVG'}, optional mode : {'fan_in, 'fan_out', 'fan_avg'}, optional
The mode to compute fans. The mode to compute fans.
scale : number, optional, default=3. scale : float, optional, default=3.0
The scale factor of distribution. The scale factor to distribution.
Returns Returns
------- -------
...@@ -295,14 +292,14 @@ class EagerTensor(Tensor): ...@@ -295,14 +292,14 @@ class EagerTensor(Tensor):
def normal(self, mean=0, std=1): def normal(self, mean=0, std=1):
r"""Fill self from a normal distribution. r"""Fill self from a normal distribution.
.. math:: \text{self} \leftarrow N(\mu, \sigma) .. math:: \text{self} \sim \mathcal{N}(\mu, \sigma)
Parameters Parameters
---------- ----------
mean : number, optional, default=0 mean : number, optional, default=0
The value of :math:`\mu`. The value to :math:`\mu`.
std : number, optional, default=1 std : number, optional, default=1
The value of :math:`\sigma`. The value to :math:`\sigma`.
Returns Returns
------- -------
...@@ -331,12 +328,12 @@ class EagerTensor(Tensor): ...@@ -331,12 +328,12 @@ class EagerTensor(Tensor):
""" """
def set_value(self, value): def set_value(self, value):
"""Map the value to storage. """Set value to the implementation.
Parameters Parameters
---------- ----------
value : array_like value : array_like
The value. The value to set.
Returns Returns
------- -------
...@@ -352,14 +349,14 @@ class EagerTensor(Tensor): ...@@ -352,14 +349,14 @@ class EagerTensor(Tensor):
def truncated_normal(self, mean=0, std=1): def truncated_normal(self, mean=0, std=1):
r"""Fill self from a truncated normal distribution. r"""Fill self from a truncated normal distribution.
.. math:: \text{self} \leftarrow TN(\mu, \sigma, \mu - 2\sigma, \mu + 2\sigma) .. math:: \text{self} \sim \mathcal{TN}(\mu, \sigma, \mu - 2\sigma, \mu + 2\sigma)
Parameters Parameters
---------- ----------
mean : number, optional, default=0 mean : number, optional, default=0
The value of :math:`\mu`. The value to :math:`\mu`.
std : number, optional, default=1 std : number, optional, default=1
The value of :math:`\sigma`. The value to :math:`\sigma`.
Returns Returns
------- -------
...@@ -369,16 +366,16 @@ class EagerTensor(Tensor): ...@@ -369,16 +366,16 @@ class EagerTensor(Tensor):
""" """
def uniform(self, low=0, high=1): def uniform(self, low=0, high=1):
self.self__ = r"""Fill self from a uniform distribution. r"""Fill self from an uniform distribution.
.. math:: \text{self} \leftarrow U(\alpha, \beta) .. math:: \text{self} \sim \mathcal{U}(\alpha, \beta)
Parameters Parameters
---------- ----------
low : number, optional, default=0 low : number, optional, default=0
The value of :math:`\alpha`. The value to :math:`\alpha`.
high : number, optional, default=1 high : number, optional, default=1
The value of :math:`\beta`. The value to :math:`\beta`.
Returns Returns
------- -------
...@@ -404,9 +401,7 @@ class EagerTensor(Tensor): ...@@ -404,9 +401,7 @@ class EagerTensor(Tensor):
context.get_eager_scope())).FromShape(shape, dtype) context.get_eager_scope())).FromShape(shape, dtype)
def __add__(self, other): def __add__(self, other):
r"""Compute the element-wise addition. """Compute the element-wise addition.
.. math:: \text{out} = \text{self} + \text{value}
Parameters Parameters
---------- ----------
...@@ -418,10 +413,6 @@ class EagerTensor(Tensor): ...@@ -418,10 +413,6 @@ class EagerTensor(Tensor):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.add(...)`_ : Compute the element-wise addition.
""" """
def __del__(self): def __del__(self):
...@@ -430,27 +421,6 @@ class EagerTensor(Tensor): ...@@ -430,27 +421,6 @@ class EagerTensor(Tensor):
# PyGC will detect them automatically. # PyGC will detect them automatically.
self._gc.collect(self.id) self._gc.collect(self.id)
def __div__(self, other):
r"""Compute the element-wise division.
.. math:: \text{out} = \text{self} \div \text{value}
Parameters
----------
other : Union[dragon.EagerTensor, number]
The value to divide.
Returns
-------
dragon.EagerTensor
The output tensor.
See Also
--------
`dragon.math.div(...)`_ : Compute the element-wise division.
"""
def __float__(self): def __float__(self):
"""Return a float python scalar. """Return a float python scalar.
...@@ -463,9 +433,7 @@ class EagerTensor(Tensor): ...@@ -463,9 +433,7 @@ class EagerTensor(Tensor):
return float(self.numpy()) return float(self.numpy())
def __ge__(self, other): def __ge__(self, other):
r"""Compute element-wise greater-equal comparison. """Compute element-wise greater-equal comparison.
.. math:: \text{out} = (\text{self} \geq \text{other})
Parameters Parameters
---------- ----------
...@@ -477,39 +445,25 @@ class EagerTensor(Tensor): ...@@ -477,39 +445,25 @@ class EagerTensor(Tensor):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.greater_equal(...)`_ : Compute element-wise greater-equal comparison.
""" """
def __getitem__(self, item): def __getitem__(self, item):
"""Select the elements at the specific indices. """Select elements at the specific index.
Parameters Parameters
---------- ----------
item : Union[int, slice, dragon.EagerTensor] item : Union[int, slice, dragon.EagerTensor]
The indices. The index.
Returns Returns
------- -------
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.slice(...)`_ : Select the elements according to the given sections.
See Also
--------
`dragon.masked_select(...)`_ : Select the elements where the given mask is 1.
""" """
def __gt__(self, other): def __gt__(self, other):
r"""Compute element-wise greater comparison. """Compute element-wise greater comparison.
.. math:: \text{out} = (\text{self} > \text{other})
Parameters Parameters
---------- ----------
...@@ -521,19 +475,13 @@ class EagerTensor(Tensor): ...@@ -521,19 +475,13 @@ class EagerTensor(Tensor):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.greater(...)`_ : Compute element-wise greater comparison.
""" """
def __hash__(self): def __hash__(self):
return id(self) return id(self)
def __iadd__(self, other): def __iadd__(self, other):
r"""Compute the element-wise addition. """Compute the element-wise addition.
.. math:: \text{self} \mathrel{+}= \text{other}
Parameters Parameters
---------- ----------
...@@ -545,37 +493,10 @@ class EagerTensor(Tensor): ...@@ -545,37 +493,10 @@ class EagerTensor(Tensor):
dragon.EagerTensor dragon.EagerTensor
The self. The self.
See Also
--------
`dragon.math.add(...)`_ : Compute the element-wise addition.
"""
def __idiv__(self, other):
r"""Compute the element-wise division.
.. math:: \text{self} \mathrel{\div}= \text{other}
Parameters
----------
other : Union[dragon.EagerTensor, number]
The value to divide.
Returns
-------
dragon.EagerTensor
The self.
See Also
--------
`dragon.math.div(...)`_ : Compute the element-wise division.
""" """
def __imul__(self, other): def __imul__(self, other):
r"""Compute the element-wise multiplication. """Compute the element-wise multiplication.
.. math:: \text{self} \mathrel{\times}= \text{other}
Parameters Parameters
---------- ----------
...@@ -587,27 +508,21 @@ class EagerTensor(Tensor): ...@@ -587,27 +508,21 @@ class EagerTensor(Tensor):
dragon.EagerTensor dragon.EagerTensor
The self. The self.
See Also
--------
`dragon.math.mul(...)`_ : Compute the element-wise multiplication.
""" """
def __int__(self): def __int__(self):
"""Return a int python scalar. """Return a integer python scalar.
Returns Returns
------- -------
int int
The int value. The integer value.
""" """
return int(self.__float__()) return int(self.__float__())
def __isub__(self, other): def __isub__(self, other):
r"""Compute the element-wise division. """Compute the element-wise subtraction.
.. math:: \text{self} \mathrel{-}= \text{other}
Parameters Parameters
---------- ----------
...@@ -619,16 +534,10 @@ class EagerTensor(Tensor): ...@@ -619,16 +534,10 @@ class EagerTensor(Tensor):
dragon.EagerTensor dragon.EagerTensor
The self. The self.
See Also
--------
`dragon.math.sub(...)`_ : Compute the element-wise subtraction.
""" """
def __le__(self, other): def __le__(self, other):
r"""Compute element-wise less-equal comparison. """Compute element-wise less-equal comparison.
.. math:: \text{out} = (\text{self} \leq \text{other})
Parameters Parameters
---------- ----------
...@@ -640,16 +549,10 @@ class EagerTensor(Tensor): ...@@ -640,16 +549,10 @@ class EagerTensor(Tensor):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.less_equal(...)`_ : Compute element-wise less-equal comparison.
""" """
def __lt__(self, other): def __lt__(self, other):
r"""Compute element-wise less comparison. """Compute element-wise less comparison.
.. math:: \text{out} = (\text{self} < \text{other})
Parameters Parameters
---------- ----------
...@@ -661,16 +564,10 @@ class EagerTensor(Tensor): ...@@ -661,16 +564,10 @@ class EagerTensor(Tensor):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.less(...)`_ : Compute element-wise less comparison.
""" """
def __mul__(self, other): def __mul__(self, other):
r"""Compute the element-wise multiplication. """Compute the element-wise multiplication.
.. math:: \text{out} = \text{self} \times \text{other}
Parameters Parameters
---------- ----------
...@@ -682,32 +579,20 @@ class EagerTensor(Tensor): ...@@ -682,32 +579,20 @@ class EagerTensor(Tensor):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.mul(...)`_ : Compute the element-wise multiplication.
""" """
def __neg__(self): def __neg__(self):
r"""Compute the element-wise negative. """Compute the element-wise negative.
.. math:: y = -x
Returns Returns
------- -------
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.negative(...)`_ : Compute the element-wise negative.
""" """
def __radd__(self, other): def __radd__(self, other):
r"""Compute the element-wise addition. """Compute the element-wise addition.
.. math:: \text{out} = \text{other} + \text{self}
Parameters Parameters
---------- ----------
...@@ -719,10 +604,6 @@ class EagerTensor(Tensor): ...@@ -719,10 +604,6 @@ class EagerTensor(Tensor):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.add(...)`_ : Compute the element-wise addition.
""" """
def __repr__(self): def __repr__(self):
...@@ -738,10 +619,8 @@ class EagerTensor(Tensor): ...@@ -738,10 +619,8 @@ class EagerTensor(Tensor):
.format(shape_str, self.dtype, str(self._device)) .format(shape_str, self.dtype, str(self._device))
return content_str + meta_str return content_str + meta_str
def __rdiv__(self, other): def __rtruediv__(self, other):
r"""Compute the element-wise division. """Compute the element-wise division.
.. math:: \text{out} = \text{value} \div \text{self}
Parameters Parameters
---------- ----------
...@@ -753,16 +632,10 @@ class EagerTensor(Tensor): ...@@ -753,16 +632,10 @@ class EagerTensor(Tensor):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.div(...)`_ : Compute the element-wise division.
""" """
def __rmul__(self, other): def __rmul__(self, other):
r"""Compute the element-wise multiplication. """Compute the element-wise multiplication.
.. math:: \text{out} = \text{other} \times \text{self}
Parameters Parameters
---------- ----------
...@@ -774,16 +647,10 @@ class EagerTensor(Tensor): ...@@ -774,16 +647,10 @@ class EagerTensor(Tensor):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.mul(...)`_ : Compute the element-wise multiplication.
""" """
def __rsub__(self, other): def __rsub__(self, other):
r"""Compute the element-wise subtraction. """Compute the element-wise subtraction.
.. math:: \text{out} = \text{other} - \text{self}
Parameters Parameters
---------- ----------
...@@ -795,36 +662,22 @@ class EagerTensor(Tensor): ...@@ -795,36 +662,22 @@ class EagerTensor(Tensor):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.sub(...)`_ : Compute the element-wise subtraction.
""" """
def __setitem__(self, key, value): def __setitem__(self, key, value):
"""Set the value at the specific indices. """Set elements at the specific index.
Parameters Parameters
---------- ----------
key : Union[int, slice, dragon.EagerTensor] key : Union[int, slice, dragon.EagerTensor]
The indices. The index.
value : number or dragon.EagerTensor value : Union[dragon.EagerTensor, number]
The value. The value to set.
See Also
--------
`dragon.assign(...)`_ : Assign the value to ref.
See Also
--------
`dragon.masked_assign(...)`_ : Assign the value to ref where mask is 1.
""" """
def __sub__(self, other): def __sub__(self, other):
r"""Compute the element-wise subtraction. """Compute the element-wise subtraction.
.. math:: \text{out} = \text{self} - \text{other}
Parameters Parameters
---------- ----------
...@@ -836,8 +689,19 @@ class EagerTensor(Tensor): ...@@ -836,8 +689,19 @@ class EagerTensor(Tensor):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also """
--------
`dragon.math.sub(...)`_ : Compute the element-wise subtraction. def __truediv__(self, other):
"""Compute the element-wise division.
Parameters
----------
other : Union[dragon.EagerTensor, number]
The value to divide.
Returns
-------
dragon.EagerTensor
The output tensor.
""" """
...@@ -261,7 +261,7 @@ class Workspace(backend.Workspace): ...@@ -261,7 +261,7 @@ class Workspace(backend.Workspace):
return self.HasTensor(_stringify_object(tensor)) return self.HasTensor(_stringify_object(tensor))
def merge_from(self, other): def merge_from(self, other):
"""Merge resources from another workspace. """Merge resources from the other.
The ``other`` will not be reset until ``self`` is reset. The ``other`` will not be reset until ``self`` is reset.
Carefulness should be taken to associate with the workspaces. Carefulness should be taken to associate with the workspaces.
......
...@@ -202,7 +202,7 @@ def elu(inputs, alpha=1., **kwargs): ...@@ -202,7 +202,7 @@ def elu(inputs, alpha=1., **kwargs):
inputs : dragon.Tensor inputs : dragon.Tensor
The input tensor. The input tensor.
alpha : float, optional, default=1. alpha : float, optional, default=1.
The value of :math:`\alpha`. The value to :math:`\alpha`.
Returns Returns
------- -------
...@@ -247,7 +247,7 @@ def leaky_relu(inputs, alpha=0.2, **kwargs): ...@@ -247,7 +247,7 @@ def leaky_relu(inputs, alpha=0.2, **kwargs):
inputs : dragon.Tensor inputs : dragon.Tensor
The input tensor. The input tensor.
alpha : number, optional, default=0.2 alpha : number, optional, default=0.2
The value of :math:`\alpha`. The value to :math:`\alpha`.
Returns Returns
------- -------
...@@ -466,9 +466,9 @@ def selu(inputs, alpha=1.67326, gamma=1.0507, **kwargs): ...@@ -466,9 +466,9 @@ def selu(inputs, alpha=1.67326, gamma=1.0507, **kwargs):
inputs : dragon.Tensor inputs : dragon.Tensor
The input tensor. The input tensor.
alpha : float, optional, default=1.67326 alpha : float, optional, default=1.67326
The value of :math:`\alpha`. The value to :math:`\alpha`.
gamma : float, optional, default=1.0507 gamma : float, optional, default=1.0507
The value of :math:`\gamma`. The value to :math:`\gamma`.
Returns Returns
------- -------
......
...@@ -25,7 +25,7 @@ from dragon.core.util import nest ...@@ -25,7 +25,7 @@ from dragon.core.util import nest
def arange(start, stop=None, step=1, dtype='int64', **kwargs): def arange(start, stop=None, step=1, dtype='int64', **kwargs):
r"""Return a tensor with evenly spaced values within a interval. r"""Return a tensor of evenly spaced values within a interval.
Specify ``start`` and ``stop`` to determine an interval: Specify ``start`` and ``stop`` to determine an interval:
...@@ -537,7 +537,7 @@ def flatten(inputs, axis=0, num_axes=-1, keep_axes=None, **kwargs): ...@@ -537,7 +537,7 @@ def flatten(inputs, axis=0, num_axes=-1, keep_axes=None, **kwargs):
Examples: Examples:
```python ```python
x = dragon.Tensor(shape=[1, 2, 3, 4]).variable() x = dragon.Tensor(shape=[1, 2, 3, 4]).constant()
print(dragon.flatten(x, axis=1, num_axes=-1).shape) # (1, 24) print(dragon.flatten(x, axis=1, num_axes=-1).shape) # (1, 24)
print(dragon.flatten(x, axis=1, num_axes=2).shape) # (1, 6, 4) print(dragon.flatten(x, axis=1, num_axes=2).shape) # (1, 6, 4)
print(dragon.flatten(x, keep_axes=1)) # (24,) print(dragon.flatten(x, keep_axes=1)) # (24,)
...@@ -634,7 +634,7 @@ def index_select(inputs, indices, axis=0, **kwargs): ...@@ -634,7 +634,7 @@ def index_select(inputs, indices, axis=0, **kwargs):
@OpSchema.num_inputs(2) @OpSchema.num_inputs(2)
def masked_select(inputs, **kwargs): def masked_select(inputs, **kwargs):
"""Select the elements where the given mask is **1**. """Select the elements of input where mask is 1.
Parameters Parameters
---------- ----------
...@@ -908,14 +908,14 @@ def multinomial(inputs, num_samples=1, eps=0., normalize=False, **kwargs): ...@@ -908,14 +908,14 @@ def multinomial(inputs, num_samples=1, eps=0., normalize=False, **kwargs):
@OpSchema.num_inputs(1) @OpSchema.num_inputs(1)
def nonzero(inputs, **kwargs): def nonzero(inputs, **kwargs):
r"""Return the indices of non-zero elements. r"""Return the index of non-zero elements.
.. math:: y = \{i, \text{ if } x[i] \text{ is True }\} .. math:: \text{out} = \{i, \text{ if } \text{input}[i] \neq 0
Parameters Parameters
---------- ----------
inputs : dragon.Tensor inputs : dragon.Tensor
The tensor :math:`x`. The input tensor.
Returns Returns
------- -------
...@@ -938,8 +938,8 @@ def one_hot(inputs, depth, on_value=1, off_value=0, **kwargs): ...@@ -938,8 +938,8 @@ def one_hot(inputs, depth, on_value=1, off_value=0, **kwargs):
.. math:: .. math::
\text{out}[i][j] = \text{out}[i][j] =
\begin{cases} \begin{cases}
\text{Val}_{off}, & \text{ if } x[i] \neq j \\ \text{off\_value}, & \text{ if } \text{input}[i] \neq j \\
\text{Val}_{on}, & \text{ otherwise } \text{on\_value}, & \text{ otherwise }
\end{cases} \end{cases}
The max value of indices, i.e., the ``depth`` should be specified: The max value of indices, i.e., the ``depth`` should be specified:
......
...@@ -26,23 +26,23 @@ from dragon.core.util import nest ...@@ -26,23 +26,23 @@ from dragon.core.util import nest
@ArgHelper.repeated_desc('starts') @ArgHelper.repeated_desc('starts')
@ArgHelper.repeated_desc('sizes') @ArgHelper.repeated_desc('sizes')
def assign(inputs, starts=None, sizes=None, **kwargs): def assign(inputs, starts=None, sizes=None, **kwargs):
r"""Assign the value to ref. r"""Assign the value to input.
.. math:: \text{Ref}[start:start + size, ...] = \text{Value} .. math:: \text{input}[\text{start}:\text{start} + \text{size}, ...] = \text{value}
Parameters Parameters
---------- ----------
inputs : Sequence[dragon.Tensor] inputs : Sequence[dragon.Tensor]
The **ref** and **value**. The input and value tensor.
starts : Sequence[Union[int, dragon.Tensor]], optional starts : Sequence[Union[int, dragon.Tensor]], optional
The start pos of each dimension. The start location for each dimension.
sizes : Sequence[Union[int, dragon.Tensor]], optional sizes : Sequence[Union[int, dragon.Tensor]], optional
The size of each dimension. The number of elements assigned from start.
Returns Returns
------- -------
dragon.Tensor dragon.Tensor
The **ref**. The input tensor.
""" """
args = parse_args(locals()) args = parse_args(locals())
...@@ -104,24 +104,24 @@ def copy(inputs, **kwargs): ...@@ -104,24 +104,24 @@ def copy(inputs, **kwargs):
@OpSchema.num_inputs(3) @OpSchema.num_inputs(3)
def masked_assign(inputs, **kwargs): def masked_assign(inputs, **kwargs):
r"""Assign the value to ref where mask is **1**. r"""Assign the value to input where mask is 1.
.. math:: .. math::
\text{Ref}[i] = \text{input}[i] =
\begin{cases} \begin{cases}
\text{Value}[i], & \text{ if } \text{Mask}[i] = 1 \\ \text{value}[i], & \text{ if } \text{mask}[i] = 1 \\
\text{Ref}[i], & \text{ otherwise } \text{input}[i], & \text{ otherwise }
\end{cases} \end{cases}
Parameters Parameters
---------- ----------
inputs : Sequence[dragon.Tensor] inputs : Sequence[dragon.Tensor]
The **ref**, **value** and **mask** tensor. The input, value and mask tensor.
Returns Returns
------- -------
dragon.Tensor dragon.Tensor
The **ref** tensor.. The input tensor.
""" """
args = parse_args(locals()) args = parse_args(locals())
......
...@@ -27,7 +27,7 @@ from dragon.core.ops.utils import parse_args ...@@ -27,7 +27,7 @@ from dragon.core.ops.utils import parse_args
def constant(value, dtype=None, shape=None, name=None): def constant(value, dtype=None, shape=None, name=None):
r"""Return a tensor taking the value content. r"""Return a tensor initialized from the value.
Examples: Examples:
...@@ -40,7 +40,7 @@ def constant(value, dtype=None, shape=None, name=None): ...@@ -40,7 +40,7 @@ def constant(value, dtype=None, shape=None, name=None):
Parameters Parameters
---------- ----------
value : array_like value : array_like
The constant value. The value to initialize from.
dtype : str, optional dtype : str, optional
The optional data type. The optional data type.
shape : Sequence[int], optional shape : Sequence[int], optional
...@@ -68,7 +68,7 @@ def constant(value, dtype=None, shape=None, name=None): ...@@ -68,7 +68,7 @@ def constant(value, dtype=None, shape=None, name=None):
def eye(n, m=None, k=0, dtype='float32', **kwargs): def eye(n, m=None, k=0, dtype='float32', **kwargs):
r"""Return a tensor constructed as the identity matrix. """Return a tensor constructed as the identity matrix.
The rows and cols of matrix are determined by ``n`` and ``m``: The rows and cols of matrix are determined by ``n`` and ``m``:
...@@ -131,7 +131,7 @@ def eye(n, m=None, k=0, dtype='float32', **kwargs): ...@@ -131,7 +131,7 @@ def eye(n, m=None, k=0, dtype='float32', **kwargs):
@OpSchema.num_inputs(1) @OpSchema.num_inputs(1)
def eye_like(other, k=0, dtype='float32', **kwargs): def eye_like(other, k=0, dtype='float32', **kwargs):
r"""Return a tensor shaping like another constructed as the identity matrix. """Return a tensor of identity matrix with shape as the other.
The rows and cols of matrix are hinted by the input tensor: The rows and cols of matrix are hinted by the input tensor:
...@@ -183,14 +183,14 @@ def eye_like(other, k=0, dtype='float32', **kwargs): ...@@ -183,14 +183,14 @@ def eye_like(other, k=0, dtype='float32', **kwargs):
@ArgHelper.repeated_desc(name='shape', name_v2='dims') @ArgHelper.repeated_desc(name='shape', name_v2='dims')
def fill(shape, value=0, dtype=None, **kwargs): def fill(shape, value=0, dtype=None, **kwargs):
r"""Return a tensor filled with the specific value. r"""Return a tensor filled with the scalar value.
.. math:: y \leftarrow \text{value} .. math:: \text{out} \leftarrow \text{value}
Parameters Parameters
---------- ----------
shape : Sequence[Union[int, dragon.Tensor]] shape : Sequence[Union[int, dragon.Tensor]]
The shape of the tensor. The tensor shape.
value : number, optional, default=0 value : number, optional, default=0
The value to fill. The value to fill.
dtype : str, optional, default='float32' dtype : str, optional, default='float32'
...@@ -224,21 +224,19 @@ def fill(shape, value=0, dtype=None, **kwargs): ...@@ -224,21 +224,19 @@ def fill(shape, value=0, dtype=None, **kwargs):
@ArgHelper.repeated_desc(name='shape', name_v2='dims') @ArgHelper.repeated_desc(name='shape', name_v2='dims')
def glorot_normal(shape, scale=2, mode='FAN_IN', dtype='float32', **kwargs): def glorot_normal(shape, scale=2.0, mode='fan_in', dtype='float32', **kwargs):
r"""Return a tensor initialized from the glorot normal distribution. r"""Return a tensor initialized from the glorot normal distribution.
The **GlorotNormal** distribution is defined as: .. math:: \text{out} \sim \mathcal{N}(0, \sqrt{\frac{scale}{\text{fan}}})
.. math:: X \sim N(0, \sqrt{\frac{scale}{\text{FAN}}})
Parameters Parameters
---------- ----------
shape : Sequence[Union[int, dragon.Tensor]] shape : Sequence[Union[int, dragon.Tensor]]
The shape of the tensor. The tensor shape.
scale : number, optional, default=2 mode : {'fan_in', 'fan_out', 'fan_avg'}, optional
The scale factor of distribution. The mode to compute fans.
mode : {'FAN_IN', 'FAN_OUT', 'FAN_AVG'}, optional scale : float, optional, default=2.0
The mode to compute the normalizer. The scale factor to distribution.
dtype : str, optional, default='float32' dtype : str, optional, default='float32'
The optional data type. The optional data type.
...@@ -266,25 +264,21 @@ def glorot_normal(shape, scale=2, mode='FAN_IN', dtype='float32', **kwargs): ...@@ -266,25 +264,21 @@ def glorot_normal(shape, scale=2, mode='FAN_IN', dtype='float32', **kwargs):
@ArgHelper.repeated_desc(name='shape', name_v2='dims') @ArgHelper.repeated_desc(name='shape', name_v2='dims')
def glorot_uniform(shape, scale=3, mode='FAN_IN', dtype='float32', **kwargs): def glorot_uniform(shape, mode='fan_in', scale=3.0, dtype='float32', **kwargs):
r"""Return a tensor initialized from the glorot uniform distribution. r"""Return a tensor initialized from the glorot uniform distribution.
The **GlorotUniform** distribution is defined as:
.. math:: .. math::
X \sim U( \text{out} \sim \mathcal{U}(-\sqrt{\frac{scale}{\text{fan}}},
-\sqrt{\frac{scale}{\text{FAN}}}, \sqrt{\frac{scale}{\text{fan}}})
\sqrt{\frac{scale}{\text{FAN}}}
)
Parameters Parameters
---------- ----------
shape : Sequence[Union[int, dragon.Tensor]] shape : Sequence[Union[int, dragon.Tensor]]
The shape of the tensor. The tensor shape.
scale : number, optional, default=3 mode : {'fan_in', 'fan_out', 'fan_avg'}, optional
The scale factor of distribution. The mode to compute fans.
mode : {'FAN_IN', 'FAN_OUT', 'FAN_AVG'}, optional scale : float, optional, default=3.0
The mode to compute the normalizer. The scale factor to distribution.
dtype : str, optional, default='float32' dtype : str, optional, default='float32'
The optional data type. The optional data type.
...@@ -315,7 +309,7 @@ def glorot_uniform(shape, scale=3, mode='FAN_IN', dtype='float32', **kwargs): ...@@ -315,7 +309,7 @@ def glorot_uniform(shape, scale=3, mode='FAN_IN', dtype='float32', **kwargs):
def ones(shape, dtype='float32', **kwargs): def ones(shape, dtype='float32', **kwargs):
r"""Return a tensor filled with ones. r"""Return a tensor filled with ones.
.. math:: y \leftarrow 1 .. math:: \text{out} \leftarrow 1
```python ```python
x = dragon.ones(shape=(2, 3), dtype='float32') x = dragon.ones(shape=(2, 3), dtype='float32')
...@@ -324,7 +318,7 @@ def ones(shape, dtype='float32', **kwargs): ...@@ -324,7 +318,7 @@ def ones(shape, dtype='float32', **kwargs):
Parameters Parameters
---------- ----------
shape : Sequence[Union[int, dragon.Tensor]] shape : Sequence[Union[int, dragon.Tensor]]
The shape of the tensor. The tensor shape.
dtype : str, optional, default='float32' dtype : str, optional, default='float32'
The optional data type. The optional data type.
...@@ -338,9 +332,9 @@ def ones(shape, dtype='float32', **kwargs): ...@@ -338,9 +332,9 @@ def ones(shape, dtype='float32', **kwargs):
def ones_like(other, dtype='float32', **kwargs): def ones_like(other, dtype='float32', **kwargs):
r"""Return a tensor shaping like another filled with ones. r"""Return a tensor of ones with shape as the other.
.. math:: y \leftarrow 1 .. math:: \text{out} \leftarrow 1
Examples: Examples:
...@@ -384,18 +378,16 @@ def ones_like(other, dtype='float32', **kwargs): ...@@ -384,18 +378,16 @@ def ones_like(other, dtype='float32', **kwargs):
def random_normal(shape, mean=0, std=1, dtype='float32', **kwargs): def random_normal(shape, mean=0, std=1, dtype='float32', **kwargs):
r"""Return a tensor initialized from the normal distribution. r"""Return a tensor initialized from the normal distribution.
The **RandomNormal** distribution is defined as: .. math:: \text{out} \sim \mathcal{N}(\mu, \sigma)
.. math:: X \sim N(\mu, \sigma)
Parameters Parameters
---------- ----------
shape : Sequence[Union[int, dragon.Tensor]] shape : Sequence[Union[int, dragon.Tensor]]
The shape of the tensor. The tensor shape.
mean : number, optional, default=0 mean : number, optional, default=0
The value of :math:`\mu`. The value to :math:`\mu`.
std : number, optional, default=1 std : number, optional, default=1
The value of :math:`\sigma`. The value to :math:`\sigma`.
dtype : str, optional, default='float32' dtype : str, optional, default='float32'
The optional data type. The optional data type.
...@@ -424,20 +416,18 @@ def random_normal(shape, mean=0, std=1, dtype='float32', **kwargs): ...@@ -424,20 +416,18 @@ def random_normal(shape, mean=0, std=1, dtype='float32', **kwargs):
@OpSchema.num_inputs(1) @OpSchema.num_inputs(1)
def random_normal_like(other, mean=0, std=1, dtype='float32', **kwargs): def random_normal_like(other, mean=0, std=1, dtype='float32', **kwargs):
r"""Return a tensor shaping like another initialized from the normal distribution. r"""Return a tensor initialized from the normal distribution with shape as the other.
The **RandomNormal** distribution is defined as: .. math:: \text{out} \sim \mathcal{N}(\mu, \sigma)
.. math:: X \sim N(\mu, \sigma)
Parameters Parameters
---------- ----------
other : dragon.Tensor other : dragon.Tensor
The tensor to hint the shape. The tensor to hint the shape.
mean : number, optional, default=0 mean : number, optional, default=0
The value of :math:`\mu`. The value to :math:`\mu`.
std : number, optional, default=1 std : number, optional, default=1
The value of :math:`\sigma`. The value to :math:`\sigma`.
dtype : str, optional, default='float32' dtype : str, optional, default='float32'
The optional data type. The optional data type.
...@@ -472,18 +462,16 @@ def random_normal_like(other, mean=0, std=1, dtype='float32', **kwargs): ...@@ -472,18 +462,16 @@ def random_normal_like(other, mean=0, std=1, dtype='float32', **kwargs):
def random_uniform(shape, low=-1, high=1, dtype='float32', **kwargs): def random_uniform(shape, low=-1, high=1, dtype='float32', **kwargs):
r"""Return a tensor initialized from the uniform distribution. r"""Return a tensor initialized from the uniform distribution.
The **RandomUniform** distribution is defined as: .. math:: \text{out} \sim \mathcal{U}(\alpha, \beta)
.. math:: X \sim U(\alpha, \beta)
Parameters Parameters
---------- ----------
shape : Sequence[Union[int, dragon.Tensor]] shape : Sequence[Union[int, dragon.Tensor]]
The shape of the tensor. The tensor shape.
low : number, optional, default=-1 low : number, optional, default=-1
The value of :math:`\alpha`. The value to :math:`\alpha`.
high : number, optional, default=1 high : number, optional, default=1
The value of :math:`\beta`. The value to :math:`\beta`.
dtype : str, optional, default='float32' dtype : str, optional, default='float32'
The optional data type. The optional data type.
...@@ -511,20 +499,18 @@ def random_uniform(shape, low=-1, high=1, dtype='float32', **kwargs): ...@@ -511,20 +499,18 @@ def random_uniform(shape, low=-1, high=1, dtype='float32', **kwargs):
@OpSchema.num_inputs(1) @OpSchema.num_inputs(1)
def random_uniform_like(other, low=-1, high=1, dtype='float32', **kwargs): def random_uniform_like(other, low=-1, high=1, dtype='float32', **kwargs):
r"""Return a tensor shaping like another initialized from the uniform distribution. r"""Return a tensor initialized from the uniform distribution with shape as the other.
The **RandomUniform** distribution is defined as: .. math:: \text{out} \sim \mathcal{U}(\alpha, \beta)
.. math:: X \sim U(\alpha, \beta)
Parameters Parameters
---------- ----------
other : dragon.Tensor other : dragon.Tensor
The tensor to hint the shape. The tensor to hint the shape.
low : number, optional, default=-1 low : number, optional, default=-1
The value of :math:`\alpha`. The value to :math:`\alpha`.
high : number, optional, default=1 high : number, optional, default=1
The value of :math:`\beta`. The value to :math:`\beta`.
dtype : str, optional, default='float32' dtype : str, optional, default='float32'
The optional data type. The optional data type.
...@@ -558,18 +544,16 @@ def random_uniform_like(other, low=-1, high=1, dtype='float32', **kwargs): ...@@ -558,18 +544,16 @@ def random_uniform_like(other, low=-1, high=1, dtype='float32', **kwargs):
def truncated_normal(shape, mean=0, std=1, dtype='float32', **kwargs): def truncated_normal(shape, mean=0, std=1, dtype='float32', **kwargs):
r"""Return a tensor initialized from the truncated normal distribution. r"""Return a tensor initialized from the truncated normal distribution.
The **TruncatedNormal** distribution is defined as: .. math:: \text{out} \sim \mathcal{TN}(\mu, \sigma, \mu - 2\sigma, \mu + 2\sigma)
.. math:: X \sim TN(\mu, \sigma, \mu - 2\sigma, \mu + 2\sigma)
Parameters Parameters
---------- ----------
shape : Sequence[Union[int, dragon.Tensor]] shape : Sequence[Union[int, dragon.Tensor]]
The shape of the tensor. The tensor shape.
mean : number, optional, default=0 mean : number, optional, default=0
The value of :math:`\mu`. The value to :math:`\mu`.
std : number, optional, default=1 std : number, optional, default=1
The value of :math:`\sigma`. The value to :math:`\sigma`.
dtype : str, optional, default='float32' dtype : str, optional, default='float32'
The optional data type. The optional data type.
...@@ -599,7 +583,7 @@ def truncated_normal(shape, mean=0, std=1, dtype='float32', **kwargs): ...@@ -599,7 +583,7 @@ def truncated_normal(shape, mean=0, std=1, dtype='float32', **kwargs):
def zeros(shape, dtype='float32', **kwargs): def zeros(shape, dtype='float32', **kwargs):
r"""Return a tensor filled with zeros. r"""Return a tensor filled with zeros.
.. math:: y \leftarrow 0 .. math:: \text{out} \leftarrow 0
```python ```python
x = dragon.zeros(shape=(2, 3), dtype='float32') x = dragon.zeros(shape=(2, 3), dtype='float32')
...@@ -608,7 +592,7 @@ def zeros(shape, dtype='float32', **kwargs): ...@@ -608,7 +592,7 @@ def zeros(shape, dtype='float32', **kwargs):
Parameters Parameters
---------- ----------
shape : Sequence[Union[int, dragon.Tensor]] shape : Sequence[Union[int, dragon.Tensor]]
The shape of the tensor. The tensor shape.
dtype : str, optional, default='float32' dtype : str, optional, default='float32'
The optional data type. The optional data type.
...@@ -623,9 +607,9 @@ def zeros(shape, dtype='float32', **kwargs): ...@@ -623,9 +607,9 @@ def zeros(shape, dtype='float32', **kwargs):
@OpSchema.num_inputs(1) @OpSchema.num_inputs(1)
def zeros_like(other, dtype='float32', **kwargs): def zeros_like(other, dtype='float32', **kwargs):
r"""Return a tensor shaping like another filled with zeros. r"""Return a tensor of zeros with shape as the other.
.. math:: y \leftarrow 0 .. math:: \text{out} \leftarrow 0
Examples: Examples:
......
...@@ -90,7 +90,7 @@ class GlorotNormal(Initializer): ...@@ -90,7 +90,7 @@ class GlorotNormal(Initializer):
def __init__(self, key, dev, **kwargs): def __init__(self, key, dev, **kwargs):
super(GlorotNormal, self).__init__(key, dev, **kwargs) super(GlorotNormal, self).__init__(key, dev, **kwargs)
self.scale = kwargs.get('scale', 2.) self.scale = kwargs.get('scale', 2.)
self.mode = kwargs.get('mode', 'FAN_IN') self.mode = kwargs.get('mode', 'fan_in')
def attributes(self): def attributes(self):
return { return {
...@@ -110,7 +110,7 @@ class GlorotUniform(Initializer): ...@@ -110,7 +110,7 @@ class GlorotUniform(Initializer):
def __init__(self, key, dev, **kwargs): def __init__(self, key, dev, **kwargs):
super(GlorotUniform, self).__init__(key, dev, **kwargs) super(GlorotUniform, self).__init__(key, dev, **kwargs)
self.scale = kwargs.get('scale', 3.) self.scale = kwargs.get('scale', 3.)
self.mode = kwargs.get('mode', 'FAN_IN') self.mode = kwargs.get('mode', 'fan_in')
def attributes(self): def attributes(self):
return { return {
......
...@@ -144,9 +144,9 @@ def axpby(inputs, outputs=None, alpha=1., beta=1., **kwargs): ...@@ -144,9 +144,9 @@ def axpby(inputs, outputs=None, alpha=1., beta=1., **kwargs):
outputs : Union[dragon.Tensor, Sequence[dragon.Tensor]], optional outputs : Union[dragon.Tensor, Sequence[dragon.Tensor]], optional
The tensor :math:`y`. The tensor :math:`y`.
alpha : number, optional, default=1. alpha : number, optional, default=1.
The value of :math:`\alpha`. The value to :math:`\alpha`.
beta : number, optional, default=1. beta : number, optional, default=1.
The value of :math:`\beta`. The value to :math:`\beta`.
Returns Returns
------- -------
...@@ -301,9 +301,9 @@ def clip(inputs, low=None, high=None, **kwargs): ...@@ -301,9 +301,9 @@ def clip(inputs, low=None, high=None, **kwargs):
inputs : dragon.Tensor inputs : dragon.Tensor
The tensor :math:`x`. The tensor :math:`x`.
low : number, optional low : number, optional
The value of :math:`\text{low}`. The value to :math:`\text{low}`.
high : number, optional high : number, optional
The value of :math:`\text{high}`. The value to :math:`\text{high}`.
Returns Returns
------- -------
......
...@@ -58,7 +58,7 @@ def batch_norm( ...@@ -58,7 +58,7 @@ def batch_norm(
momentum : float, optional, default=0.9 momentum : float, optional, default=0.9
The momentum for running average. The momentum for running average.
eps : float, optional, default=1e-5 eps : float, optional, default=1e-5
The value of :math:`\epsilon`. The value to :math:`\epsilon`.
use_stats : int, optional, default=-1 use_stats : int, optional, default=-1
Whether to use estimated statistics or not. Whether to use estimated statistics or not.
...@@ -111,7 +111,7 @@ def group_norm(inputs, axis=-1, group=32, eps=1e-5, **kwargs): ...@@ -111,7 +111,7 @@ def group_norm(inputs, axis=-1, group=32, eps=1e-5, **kwargs):
group : int, optional, default=32 group : int, optional, default=32
The group size. The group size.
eps : float, optional, default=1e-5 eps : float, optional, default=1e-5
The value of :math:`\epsilon`. The value to :math:`\epsilon`.
Returns Returns
------- -------
...@@ -156,7 +156,7 @@ def instance_norm(inputs, axis=-1, eps=1e-5, **kwargs): ...@@ -156,7 +156,7 @@ def instance_norm(inputs, axis=-1, eps=1e-5, **kwargs):
axis : int, optional, default=-1 axis : int, optional, default=-1
The channel axis. The channel axis.
eps : float, optional, default=1e-5 eps : float, optional, default=1e-5
The value of :math:`\epsilon`. The value to :math:`\epsilon`.
Returns Returns
------- -------
...@@ -201,7 +201,7 @@ def lp_normalize(inputs, axis=None, p=2, eps=1e-12, reduction='sum', **kwargs): ...@@ -201,7 +201,7 @@ def lp_normalize(inputs, axis=None, p=2, eps=1e-12, reduction='sum', **kwargs):
axis : Union[int, Sequence[int]], optional axis : Union[int, Sequence[int]], optional
The axis to compute the norm. The axis to compute the norm.
eps : float, optional, default=1e-12 eps : float, optional, default=1e-12
The value of :math:`\epsilon`. The value to :math:`\epsilon`.
reduction : {'sum', 'mean'}, optional reduction : {'sum', 'mean'}, optional
The reduction method for norm. The reduction method for norm.
...@@ -260,7 +260,7 @@ def layer_norm(inputs, axis=-1, eps=1e-5, **kwargs): ...@@ -260,7 +260,7 @@ def layer_norm(inputs, axis=-1, eps=1e-5, **kwargs):
axis : int, optional, default=-1 axis : int, optional, default=-1
The channel axis. The channel axis.
eps : float, optional, default=1e-5 eps : float, optional, default=1e-5
The value of :math:`\epsilon`. The value to :math:`\epsilon`.
Returns Returns
------- -------
...@@ -369,7 +369,7 @@ def sync_batch_norm( ...@@ -369,7 +369,7 @@ def sync_batch_norm(
momentum : float, optional, default=0.9 momentum : float, optional, default=0.9
The momentum for average. The momentum for average.
eps : float, optional, default=1e-5 eps : float, optional, default=1e-5
The value of :math:`\epsilon`. The value to :math:`\epsilon`.
use_stats : int, optional, default=-1 use_stats : int, optional, default=-1
Whether to use estimated statistics or not. Whether to use estimated statistics or not.
process_group : ProcessGroup, optional process_group : ProcessGroup, optional
......
...@@ -26,9 +26,7 @@ from dragon.core.ops import math_ops_lib ...@@ -26,9 +26,7 @@ from dragon.core.ops import math_ops_lib
def add(self, other): def add(self, other):
r"""Compute the element-wise addition. """Compute the element-wise addition.
.. math:: \text{out} = \text{self} + \text{value}
Parameters Parameters
---------- ----------
...@@ -40,10 +38,6 @@ def add(self, other): ...@@ -40,10 +38,6 @@ def add(self, other):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.add(...)`_ : Compute the element-wise addition.
""" """
return _binary_op(self, other, 'Add') return _binary_op(self, other, 'Add')
...@@ -73,14 +67,14 @@ def astype(self, dtype, inplace=False): ...@@ -73,14 +67,14 @@ def astype(self, dtype, inplace=False):
def constant(self, value=0): def constant(self, value=0):
r"""Fill self with a constant value. r"""Fill self with a scalar value.
.. math:: \text{self} \leftarrow \text{value} .. math:: \text{self} \leftarrow \text{value}
Parameters Parameters
---------- ----------
value : number, optional, default=0 value : number, optional, default=0
The constant value. The value to fill.
Returns Returns
------- -------
...@@ -115,9 +109,7 @@ def copy(self): ...@@ -115,9 +109,7 @@ def copy(self):
def div(self, other): def div(self, other):
r"""Compute the element-wise division. """Compute the element-wise division.
.. math:: \text{out} = \text{self} \div \text{value}
Parameters Parameters
---------- ----------
...@@ -129,18 +121,12 @@ def div(self, other): ...@@ -129,18 +121,12 @@ def div(self, other):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.div(...)`_ : Compute the element-wise division.
""" """
return _binary_op(self, other, 'Div') return _binary_op(self, other, 'Div')
def ge(self, other): def ge(self, other):
r"""Compute element-wise greater-equal comparison. """Compute element-wise greater-equal comparison.
.. math:: \text{out} = (\text{self} \geq \text{other})
Parameters Parameters
---------- ----------
...@@ -152,35 +138,23 @@ def ge(self, other): ...@@ -152,35 +138,23 @@ def ge(self, other):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.greater_equal(...)`_ : Compute element-wise greater-equal comparison.
""" """
return _binary_op(self, other, 'GreaterEqual') return _binary_op(self, other, 'GreaterEqual')
def getitem(self, item): def getitem(self, item):
"""Select the elements at the specific indices. """Select elements at the specific index.
Parameters Parameters
---------- ----------
item : Union[int, slice, dragon.EagerTensor] item : Union[int, slice, dragon.EagerTensor]
The indices. The index.
Returns Returns
------- -------
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.slice(...)`_ : Select the elements according to the given sections.
See Also
--------
`dragon.masked_select(...)`_ : Select the elements where the given mask is 1.
""" """
if isinstance(item, EagerTensor): if isinstance(item, EagerTensor):
return _masked_select(self, item) return _masked_select(self, item)
...@@ -189,16 +163,16 @@ def getitem(self, item): ...@@ -189,16 +163,16 @@ def getitem(self, item):
return _section_select(self, starts, sizes) return _section_select(self, starts, sizes)
def glorot_normal(self, mode='FAN_IN', scale=2.): def glorot_normal(self, mode='fan_in', scale=2.0):
r"""Fill self from a glorot normal distribution. r"""Fill self from a glorot normal distribution.
.. math:: \text{self} \leftarrow N(0, \sqrt{\frac{scale}{\text{FAN}}}) .. math:: \text{self} \sim \mathcal{N}(0, \sqrt{\frac{scale}{\text{fan}}})
Parameters Parameters
---------- ----------
mode : {'FAN_IN, 'FAN_OUT', 'FAN_AVG'}, optional mode : {'fan_in, 'fan_out', 'fan_avg'}, optional
The mode to compute fans. The mode to compute fans.
scale : number, optional, default=2. scale : float, optional, default=2.0
The scale factor of distribution. The scale factor of distribution.
Returns Returns
...@@ -217,19 +191,17 @@ def glorot_normal(self, mode='FAN_IN', scale=2.): ...@@ -217,19 +191,17 @@ def glorot_normal(self, mode='FAN_IN', scale=2.):
).apply(shape, out=self) ).apply(shape, out=self)
def glorot_uniform(self, mode='FAN_IN', scale=3.): def glorot_uniform(self, mode='fan_in', scale=3.0):
r"""Fill self from a glorot uniform distribution. r"""Fill self from a glorot uniform distribution.
.. math:: \text{self} \leftarrow U( .. math:: \text{self} \sim \mathcal{U}(-\sqrt{\frac{scale}{\text{fan}}},
-\sqrt{\frac{scale}{\text{FAN}}}, \sqrt{\frac{scale}{\text{fan}}})
\sqrt{\frac{scale}{\text{FAN}}}
)
Parameters Parameters
---------- ----------
mode : {'FAN_IN, 'FAN_OUT', 'FAN_AVG'}, optional mode : {'fan_in, 'fan_out', 'fan_avg'}, optional
The mode to compute fans. The mode to compute fans.
scale : number, optional, default=3. scale : float, optional, default=3.0
The scale factor of distribution. The scale factor of distribution.
Returns Returns
...@@ -249,9 +221,7 @@ def glorot_uniform(self, mode='FAN_IN', scale=3.): ...@@ -249,9 +221,7 @@ def glorot_uniform(self, mode='FAN_IN', scale=3.):
def gt(self, other): def gt(self, other):
r"""Compute element-wise greater comparison. """Compute element-wise greater comparison.
.. math:: \text{out} = (\text{self} > \text{other})
Parameters Parameters
---------- ----------
...@@ -263,18 +233,12 @@ def gt(self, other): ...@@ -263,18 +233,12 @@ def gt(self, other):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.greater(...)`_ : Compute element-wise greater comparison.
""" """
return _binary_op(self, other, 'Greater') return _binary_op(self, other, 'Greater')
def iadd(self, other): def iadd(self, other):
r"""Compute the element-wise addition. """Compute the element-wise addition.
.. math:: \text{self} \mathrel{+}= \text{other}
Parameters Parameters
---------- ----------
...@@ -286,18 +250,12 @@ def iadd(self, other): ...@@ -286,18 +250,12 @@ def iadd(self, other):
dragon.EagerTensor dragon.EagerTensor
The self. The self.
See Also
--------
`dragon.math.add(...)`_ : Compute the element-wise addition.
""" """
return _binary_op(self, other, 'Add', [self]) return _binary_op(self, other, 'Add', [self])
def idiv(self, other): def idiv(self, other):
r"""Compute the element-wise division. """Compute the element-wise division.
.. math:: \text{self} \mathrel{\div}= \text{other}
Parameters Parameters
---------- ----------
...@@ -309,18 +267,12 @@ def idiv(self, other): ...@@ -309,18 +267,12 @@ def idiv(self, other):
dragon.EagerTensor dragon.EagerTensor
The self. The self.
See Also
--------
`dragon.math.div(...)`_ : Compute the element-wise division.
""" """
return _binary_op(self, other, 'Div', [self]) return _binary_op(self, other, 'Div', [self])
def imul(self, other): def imul(self, other):
r"""Compute the element-wise multiplication. """Compute the element-wise multiplication.
.. math:: \text{self} \mathrel{\times}= \text{other}
Parameters Parameters
---------- ----------
...@@ -332,18 +284,12 @@ def imul(self, other): ...@@ -332,18 +284,12 @@ def imul(self, other):
dragon.EagerTensor dragon.EagerTensor
The self. The self.
See Also
--------
`dragon.math.mul(...)`_ : Compute the element-wise multiplication.
""" """
return _binary_op(self, other, 'Mul', [self]) return _binary_op(self, other, 'Mul', [self])
def isub(self, other): def isub(self, other):
r"""Compute the element-wise division. """Compute the element-wise subtraction.
.. math:: \text{self} \mathrel{-}= \text{other}
Parameters Parameters
---------- ----------
...@@ -355,18 +301,12 @@ def isub(self, other): ...@@ -355,18 +301,12 @@ def isub(self, other):
dragon.EagerTensor dragon.EagerTensor
The self. The self.
See Also
--------
`dragon.math.sub(...)`_ : Compute the element-wise subtraction.
""" """
return _binary_op(self, other, 'Sub', [self]) return _binary_op(self, other, 'Sub', [self])
def le(self, other): def le(self, other):
r"""Compute element-wise less-equal comparison. """Compute element-wise less-equal comparison.
.. math:: \text{out} = (\text{self} \leq \text{other})
Parameters Parameters
---------- ----------
...@@ -378,18 +318,12 @@ def le(self, other): ...@@ -378,18 +318,12 @@ def le(self, other):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.less_equal(...)`_ : Compute element-wise less-equal comparison.
""" """
return _binary_op(self, other, 'LessEqual') return _binary_op(self, other, 'LessEqual')
def lt(self, other): def lt(self, other):
r"""Compute element-wise less comparison. """Compute element-wise less comparison.
.. math:: \text{out} = (\text{self} < \text{other})
Parameters Parameters
---------- ----------
...@@ -401,18 +335,12 @@ def lt(self, other): ...@@ -401,18 +335,12 @@ def lt(self, other):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.less(...)`_ : Compute element-wise less comparison.
""" """
return _binary_op(self, other, 'Less') return _binary_op(self, other, 'Less')
def mul(self, other): def mul(self, other):
r"""Compute the element-wise multiplication. """Compute the element-wise multiplication.
.. math:: \text{out} = \text{self} \times \text{other}
Parameters Parameters
---------- ----------
...@@ -424,28 +352,18 @@ def mul(self, other): ...@@ -424,28 +352,18 @@ def mul(self, other):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.mul(...)`_ : Compute the element-wise multiplication.
""" """
return _binary_op(self, other, 'Mul') return _binary_op(self, other, 'Mul')
def neg(self): def neg(self):
r"""Compute the element-wise negative. """Compute the element-wise negative.
.. math:: y = -x
Returns Returns
------- -------
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.negative(...)`_ : Compute the element-wise negative.
""" """
return _unary_op(self, 'Neg') return _unary_op(self, 'Neg')
...@@ -453,14 +371,14 @@ def neg(self): ...@@ -453,14 +371,14 @@ def neg(self):
def normal(self, mean=0, std=1): def normal(self, mean=0, std=1):
r"""Fill self from a normal distribution. r"""Fill self from a normal distribution.
.. math:: \text{self} \leftarrow N(\mu, \sigma) .. math:: \text{self} \sim \mathcal{N}(\mu, \sigma)
Parameters Parameters
---------- ----------
mean : number, optional, default=0 mean : number, optional, default=0
The value of :math:`\mu`. The value to :math:`\mu`.
std : number, optional, default=1 std : number, optional, default=1
The value of :math:`\sigma`. The value to :math:`\sigma`.
Returns Returns
------- -------
...@@ -479,9 +397,7 @@ def normal(self, mean=0, std=1): ...@@ -479,9 +397,7 @@ def normal(self, mean=0, std=1):
def radd(self, other): def radd(self, other):
r"""Compute the element-wise addition. """Compute the element-wise addition.
.. math:: \text{out} = \text{other} + \text{self}
Parameters Parameters
---------- ----------
...@@ -493,18 +409,12 @@ def radd(self, other): ...@@ -493,18 +409,12 @@ def radd(self, other):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.add(...)`_ : Compute the element-wise addition.
""" """
return _binary_op(other, self, 'Add') return _binary_op(other, self, 'Add')
def rdiv(self, other): def rdiv(self, other):
r"""Compute the element-wise division. """Compute the element-wise division.
.. math:: \text{out} = \text{value} \div \text{self}
Parameters Parameters
---------- ----------
...@@ -516,10 +426,6 @@ def rdiv(self, other): ...@@ -516,10 +426,6 @@ def rdiv(self, other):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.div(...)`_ : Compute the element-wise division.
""" """
return _binary_op(other, self, 'Div') return _binary_op(other, self, 'Div')
...@@ -547,9 +453,7 @@ def reshape(self, shape): ...@@ -547,9 +453,7 @@ def reshape(self, shape):
def rmul(self, other): def rmul(self, other):
r"""Compute the element-wise multiplication. """Compute the element-wise multiplication.
.. math:: \text{out} = \text{other} \times \text{self}
Parameters Parameters
---------- ----------
...@@ -561,18 +465,12 @@ def rmul(self, other): ...@@ -561,18 +465,12 @@ def rmul(self, other):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.mul(...)`_ : Compute the element-wise multiplication.
""" """
return _binary_op(other, self, 'Mul') return _binary_op(other, self, 'Mul')
def rsub(self, other): def rsub(self, other):
r"""Compute the element-wise subtraction. """Compute the element-wise subtraction.
.. math:: \text{out} = \text{other} - \text{self}
Parameters Parameters
---------- ----------
...@@ -584,31 +482,19 @@ def rsub(self, other): ...@@ -584,31 +482,19 @@ def rsub(self, other):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.sub(...)`_ : Compute the element-wise subtraction.
""" """
return _binary_op(other, self, 'Sub') return _binary_op(other, self, 'Sub')
def setitem(self, key, value): def setitem(self, key, value):
"""Set the value at the specific indices. """Set elements at the specific index.
Parameters Parameters
---------- ----------
key : Union[int, slice, dragon.EagerTensor] key : Union[int, slice, dragon.EagerTensor]
The indices. The index.
value : number or dragon.EagerTensor value : Union[dragon.EagerTensor, number]
The value. The value to set.
See Also
--------
`dragon.assign(...)`_ : Assign the value to ref.
See Also
--------
`dragon.masked_assign(...)`_ : Assign the value to ref where mask is 1.
""" """
if isinstance(key, EagerTensor): if isinstance(key, EagerTensor):
...@@ -619,9 +505,7 @@ def setitem(self, key, value): ...@@ -619,9 +505,7 @@ def setitem(self, key, value):
def sub(self, other): def sub(self, other):
r"""Compute the element-wise subtraction. """Compute the element-wise subtraction.
.. math:: \text{out} = \text{self} - \text{other}
Parameters Parameters
---------- ----------
...@@ -633,10 +517,6 @@ def sub(self, other): ...@@ -633,10 +517,6 @@ def sub(self, other):
dragon.EagerTensor dragon.EagerTensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.sub(...)`_ : Compute the element-wise subtraction.
""" """
return _binary_op(self, other, 'Sub') return _binary_op(self, other, 'Sub')
...@@ -644,14 +524,14 @@ def sub(self, other): ...@@ -644,14 +524,14 @@ def sub(self, other):
def truncated_normal(self, mean=0, std=1): def truncated_normal(self, mean=0, std=1):
r"""Fill self from a truncated normal distribution. r"""Fill self from a truncated normal distribution.
.. math:: \text{self} \leftarrow TN(\mu, \sigma, \mu - 2\sigma, \mu + 2\sigma) .. math:: \text{self} \sim \mathcal{TN}(\mu, \sigma, \mu - 2\sigma, \mu + 2\sigma)
Parameters Parameters
---------- ----------
mean : number, optional, default=0 mean : number, optional, default=0
The value of :math:`\mu`. The value to :math:`\mu`.
std : number, optional, default=1 std : number, optional, default=1
The value of :math:`\sigma`. The value to :math:`\sigma`.
Returns Returns
------- -------
...@@ -672,14 +552,14 @@ def truncated_normal(self, mean=0, std=1): ...@@ -672,14 +552,14 @@ def truncated_normal(self, mean=0, std=1):
def uniform(self, low=0, high=1): def uniform(self, low=0, high=1):
r"""Fill self from a uniform distribution. r"""Fill self from a uniform distribution.
.. math:: \text{self} \leftarrow U(\alpha, \beta) .. math:: \text{self} \sim \mathcal{U}(\alpha, \beta)
Parameters Parameters
---------- ----------
low : number, optional, default=0 low : number, optional, default=0
The value of :math:`\alpha`. The value to :math:`\alpha`.
high : number, optional, default=1 high : number, optional, default=1
The value of :math:`\beta`. The value to :math:`\beta`.
Returns Returns
------- -------
...@@ -789,12 +669,10 @@ EagerTensor.reshape = reshape ...@@ -789,12 +669,10 @@ EagerTensor.reshape = reshape
EagerTensor.truncated_normal = truncated_normal EagerTensor.truncated_normal = truncated_normal
EagerTensor.uniform = uniform EagerTensor.uniform = uniform
EagerTensor.__add__ = add EagerTensor.__add__ = add
EagerTensor.__div__ = div
EagerTensor.__ge__ = ge EagerTensor.__ge__ = ge
EagerTensor.__getitem__ = getitem EagerTensor.__getitem__ = getitem
EagerTensor.__gt__ = gt EagerTensor.__gt__ = gt
EagerTensor.__iadd__ = iadd EagerTensor.__iadd__ = iadd
EagerTensor.__idiv__ = idiv
EagerTensor.__imul__ = imul EagerTensor.__imul__ = imul
EagerTensor.__isub__ = isub EagerTensor.__isub__ = isub
EagerTensor.__itruediv__ = idiv EagerTensor.__itruediv__ = idiv
...@@ -803,7 +681,6 @@ EagerTensor.__lt__ = lt ...@@ -803,7 +681,6 @@ EagerTensor.__lt__ = lt
EagerTensor.__mul__ = mul EagerTensor.__mul__ = mul
EagerTensor.__neg__ = neg EagerTensor.__neg__ = neg
EagerTensor.__radd__ = radd EagerTensor.__radd__ = radd
EagerTensor.__rdiv__ = rdiv
EagerTensor.__rmul__ = rmul EagerTensor.__rmul__ = rmul
EagerTensor.__rsub__ = rsub EagerTensor.__rsub__ = rsub
EagerTensor.__rtruediv__ = rdiv EagerTensor.__rtruediv__ = rdiv
......
...@@ -24,9 +24,7 @@ from dragon.core.ops import array_ops ...@@ -24,9 +24,7 @@ from dragon.core.ops import array_ops
def add(self, other): def add(self, other):
r"""Compute the element-wise addition. """Compute the element-wise addition.
.. math:: \text{out} = \text{self} + \text{other}
Parameters Parameters
---------- ----------
...@@ -36,11 +34,7 @@ def add(self, other): ...@@ -36,11 +34,7 @@ def add(self, other):
Returns Returns
------- -------
dragon.Tensor dragon.Tensor
The **y**. The output tensor.
See Also
--------
`dragon.math.add(...)`_ : Compute the element-wise addition.
""" """
return _binary_op(self, other, 'Add') return _binary_op(self, other, 'Add')
...@@ -90,9 +84,7 @@ def copy(self): ...@@ -90,9 +84,7 @@ def copy(self):
def div(self, other): def div(self, other):
r"""Compute the element-wise division. """Compute the element-wise division.
.. math:: \text{out} = \text{self} \div \text{other}
Parameters Parameters
---------- ----------
...@@ -104,18 +96,12 @@ def div(self, other): ...@@ -104,18 +96,12 @@ def div(self, other):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.div(...)`_ : Compute the element-wise division.
""" """
return _binary_op(self, other, 'Div') return _binary_op(self, other, 'Div')
def ge(self, other): def ge(self, other):
r"""Compute element-wise greater-equal comparison. """Compute element-wise greater-equal comparison.
.. math:: \text{out} = (\text{self} \geq \text{other})
Parameters Parameters
---------- ----------
...@@ -127,35 +113,23 @@ def ge(self, other): ...@@ -127,35 +113,23 @@ def ge(self, other):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.greater_equal(...)`_ : Compute element-wise greater-equal comparison.
""" """
return _binary_op(self, other, 'GreaterEqual') return _binary_op(self, other, 'GreaterEqual')
def getitem(self, item): def getitem(self, item):
"""Select the elements at the specific indices. """Select elements at the specific index.
Parameters Parameters
---------- ----------
item : Union[int, slice, dragon.Tensor] item : Union[int, slice, dragon.Tensor]
The indices. The index.
Returns Returns
------- -------
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.slice(...)`_ : Select the elements according to the given sections.
See Also
--------
`dragon.masked_select(...)`_ : Select the elements where the given mask is 1.
""" """
if isinstance(item, Tensor): if isinstance(item, Tensor):
return _masked_select(self, item) return _masked_select(self, item)
...@@ -165,21 +139,19 @@ def getitem(self, item): ...@@ -165,21 +139,19 @@ def getitem(self, item):
def get_value(self): def get_value(self):
"""Copy the data from storage. """Return the value of implementation.
Returns Returns
------- -------
numpy.ndarray numpy.ndarray
The deep copied value. The deep-copied value.
""" """
return workspace.get_workspace().fetch_tensor(self) return workspace.get_workspace().fetch_tensor(self)
def gt(self, other): def gt(self, other):
r"""Compute element-wise greater comparison. """Compute element-wise greater comparison.
.. math:: \text{out} = (\text{self} > \text{other})
Parameters Parameters
---------- ----------
...@@ -191,18 +163,12 @@ def gt(self, other): ...@@ -191,18 +163,12 @@ def gt(self, other):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.greater(...)`_ : Compute element-wise greater comparison.
""" """
return _binary_op(self, other, 'Greater') return _binary_op(self, other, 'Greater')
def le(self, other): def le(self, other):
r"""Compute element-wise less-equal comparison. """Compute element-wise less-equal comparison.
.. math:: \text{out} = (\text{self} \leq \text{other})
Parameters Parameters
---------- ----------
...@@ -214,18 +180,12 @@ def le(self, other): ...@@ -214,18 +180,12 @@ def le(self, other):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.less_equal(...)`_ : Compute element-wise less-equal comparison.
""" """
return _binary_op(self, other, 'LessEqual') return _binary_op(self, other, 'LessEqual')
def lt(self, other): def lt(self, other):
r"""Compute element-wise less comparison. """Compute element-wise less comparison.
.. math:: \text{out} = (\text{self} < \text{other})
Parameters Parameters
---------- ----------
...@@ -237,18 +197,12 @@ def lt(self, other): ...@@ -237,18 +197,12 @@ def lt(self, other):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.less(...)`_ : Compute element-wise less comparison.
""" """
return _binary_op(self, other, 'Less') return _binary_op(self, other, 'Less')
def mul(self, other): def mul(self, other):
r"""Compute the element-wise multiplication. """Compute the element-wise multiplication.
.. math:: \text{out} = \text{self} \times \text{other}
Parameters Parameters
---------- ----------
...@@ -260,36 +214,24 @@ def mul(self, other): ...@@ -260,36 +214,24 @@ def mul(self, other):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.mul(...)`_ : Compute the element-wise multiplication.
""" """
return _binary_op(self, other, 'Mul') return _binary_op(self, other, 'Mul')
def neg(self): def neg(self):
r"""Compute the element-wise negative. """Compute the element-wise negative.
.. math:: y = -x
Returns Returns
------- -------
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.negative(...)`_ : Compute the element-wise negative.
""" """
return _unary_op(self, 'Neg') return _unary_op(self, 'Neg')
def radd(self, other): def radd(self, other):
r"""Compute the element-wise addition. """Compute the element-wise addition.
.. math:: \text{out} = \text{other} + \text{self}
Parameters Parameters
---------- ----------
...@@ -301,18 +243,12 @@ def radd(self, other): ...@@ -301,18 +243,12 @@ def radd(self, other):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.add(...)`_ : Compute the element-wise addition.
""" """
return _binary_op(other, self, 'Add') return _binary_op(other, self, 'Add')
def rdiv(self, other): def rdiv(self, other):
r"""Compute the element-wise division. """Compute the element-wise division.
.. math:: \text{out} = \text{other} \div \text{self}
Parameters Parameters
---------- ----------
...@@ -324,10 +260,6 @@ def rdiv(self, other): ...@@ -324,10 +260,6 @@ def rdiv(self, other):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.div(...)`_ : Compute the element-wise division.
""" """
return _binary_op(other, self, 'Div') return _binary_op(other, self, 'Div')
...@@ -355,9 +287,7 @@ def reshape(self, shape): ...@@ -355,9 +287,7 @@ def reshape(self, shape):
def rmul(self, other): def rmul(self, other):
r"""Compute the element-wise multiplication. """Compute the element-wise multiplication.
.. math:: \text{out} = \text{other} \times \text{self}
Parameters Parameters
---------- ----------
...@@ -369,18 +299,12 @@ def rmul(self, other): ...@@ -369,18 +299,12 @@ def rmul(self, other):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.mul(...)`_ : Compute the element-wise multiplication.
""" """
return _binary_op(other, self, 'Mul') return _binary_op(other, self, 'Mul')
def rsub(self, other): def rsub(self, other):
r"""Compute the element-wise subtraction. """Compute the element-wise subtraction.
.. math:: \text{out} = \text{other} - \text{self}
Parameters Parameters
---------- ----------
...@@ -392,31 +316,19 @@ def rsub(self, other): ...@@ -392,31 +316,19 @@ def rsub(self, other):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.sub(...)`_ : Compute the element-wise subtraction.
""" """
return _binary_op(other, self, 'Sub') return _binary_op(other, self, 'Sub')
def setitem(self, key, value): def setitem(self, key, value):
"""Set the value at the specific indices. """Set elements at the specific index.
Parameters Parameters
---------- ----------
key : Union[int, slice, dragon.Tensor] key : Union[int, slice, dragon.Tensor]
The indices. The index.
value : number or dragon.Tensor value : Union[dragon.Tensor, number]
The value. The value to set.
See Also
--------
`dragon.assign(...)`_ : Assign the value to ref.
See Also
--------
`dragon.masked_assign(...)`_ : Assign the value to ref where mask is 1.
""" """
if isinstance(key, Tensor): if isinstance(key, Tensor):
...@@ -427,12 +339,12 @@ def setitem(self, key, value): ...@@ -427,12 +339,12 @@ def setitem(self, key, value):
def set_value(self, value): def set_value(self, value):
"""Feed the const value to storage. """Set value to the implementation.
Parameters Parameters
---------- ----------
value : array_like value : array_like
The const value. The value to set.
Returns Returns
------- -------
...@@ -445,9 +357,7 @@ def set_value(self, value): ...@@ -445,9 +357,7 @@ def set_value(self, value):
def sub(self, other): def sub(self, other):
r"""Compute the element-wise subtraction. """Compute the element-wise subtraction.
.. math:: \text{out} = \text{self} - \text{value}
Parameters Parameters
---------- ----------
...@@ -459,10 +369,6 @@ def sub(self, other): ...@@ -459,10 +369,6 @@ def sub(self, other):
dragon.Tensor dragon.Tensor
The output tensor. The output tensor.
See Also
--------
`dragon.math.sub(...)`_ : Compute the element-wise subtraction.
""" """
return _binary_op(self, other, 'Sub') return _binary_op(self, other, 'Sub')
...@@ -547,7 +453,6 @@ Tensor.get_value = get_value ...@@ -547,7 +453,6 @@ Tensor.get_value = get_value
Tensor.reshape = reshape Tensor.reshape = reshape
Tensor.set_value = set_value Tensor.set_value = set_value
Tensor.__add__ = add Tensor.__add__ = add
Tensor.__div__ = div
Tensor.__ge__ = ge Tensor.__ge__ = ge
Tensor.__getitem__ = getitem Tensor.__getitem__ = getitem
Tensor.__gt__ = gt Tensor.__gt__ = gt
...@@ -556,7 +461,6 @@ Tensor.__lt__ = lt ...@@ -556,7 +461,6 @@ Tensor.__lt__ = lt
Tensor.__mul__ = mul Tensor.__mul__ = mul
Tensor.__neg__ = neg Tensor.__neg__ = neg
Tensor.__radd__ = radd Tensor.__radd__ = radd
Tensor.__rdiv__ = rdiv
Tensor.__rmul__ = rmul Tensor.__rmul__ = rmul
Tensor.__rtruediv__ = rdiv Tensor.__rtruediv__ = rdiv
Tensor.__rsub__ = rsub Tensor.__rsub__ = rsub
......
...@@ -23,7 +23,7 @@ from dragon.core.framework import workspace ...@@ -23,7 +23,7 @@ from dragon.core.framework import workspace
def constant(value, dtype=None, shape=None, name='Const'): def constant(value, dtype=None, shape=None, name='Const'):
r"""Return a tensor taking the value content. """Return a tensor initialized from the value.
Examples: Examples:
...@@ -47,7 +47,7 @@ def constant(value, dtype=None, shape=None, name='Const'): ...@@ -47,7 +47,7 @@ def constant(value, dtype=None, shape=None, name='Const'):
Returns Returns
------- -------
dragon.Tensor dragon.Tensor
The constant tensor. The output tensor.
""" """
if dtype is not None: if dtype is not None:
...@@ -82,7 +82,7 @@ def constant(value, dtype=None, shape=None, name='Const'): ...@@ -82,7 +82,7 @@ def constant(value, dtype=None, shape=None, name='Const'):
else: else:
return TensorRef( return TensorRef(
name=workspace.get_workspace().unique_name( name=workspace.get_workspace().unique_name(
name, ':0', 'dragon.Tensor'), name, ':0', 'Tensor'),
shape=list(value.shape), shape=list(value.shape),
dtype=str(value.dtype), dtype=str(value.dtype),
).set_value(value) ).set_value(value)
...@@ -47,7 +47,7 @@ def elu(x, alpha=1., **kwargs): ...@@ -47,7 +47,7 @@ def elu(x, alpha=1., **kwargs):
x : dragon.Tensor x : dragon.Tensor
The tensor :math:`x`. The tensor :math:`x`.
alpha : float, optional, default=1. alpha : float, optional, default=1.
The value of :math:`\alpha`. The value to :math:`\alpha`.
Returns Returns
------- -------
...@@ -141,7 +141,7 @@ def relu(x, alpha=0, max_value=None, **kwargs): ...@@ -141,7 +141,7 @@ def relu(x, alpha=0, max_value=None, **kwargs):
alpha : number, optional, default=0 alpha : number, optional, default=0
The valve of :math:`\alpha`. The valve of :math:`\alpha`.
max_value : number, optional max_value : number, optional
The value of :math:`v_{max}`. The value to :math:`v_{max}`.
""" """
if max_value is not None: if max_value is not None:
......
...@@ -43,7 +43,7 @@ def Input( ...@@ -43,7 +43,7 @@ def Input(
x = tf.keras.Input(shape=(8,), batch_size=8, dtype='float32') x = tf.keras.Input(shape=(8,), batch_size=8, dtype='float32')
# Create a placeholder aliasing an existing tensor # Create a placeholder aliasing an existing tensor
x = dragon.Tensor('x', shape=(8,), dtype='float32').variable() x = dragon.Tensor('x', shape=(8,), dtype='float32').constant()
xx = tf.keras.Input(tensor=x) xx = tf.keras.Input(tensor=x)
``` ```
......
...@@ -49,7 +49,7 @@ class ELU(Layer): ...@@ -49,7 +49,7 @@ class ELU(Layer):
Parameters Parameters
---------- ----------
alpha : float, optional, default=0.3 alpha : float, optional, default=0.3
The value of :math:`\alpha`. The value to :math:`\alpha`.
""" """
super(ELU, self).__init__(**kwargs) super(ELU, self).__init__(**kwargs)
...@@ -92,7 +92,7 @@ class LeakyReLU(Layer): ...@@ -92,7 +92,7 @@ class LeakyReLU(Layer):
Parameters Parameters
---------- ----------
alpha : float, optional, default=0.3 alpha : float, optional, default=0.3
The value of :math:`\alpha`. The value to :math:`\alpha`.
""" """
super(LeakyReLU, self).__init__(**kwargs) super(LeakyReLU, self).__init__(**kwargs)
...@@ -135,9 +135,9 @@ class ReLU(Layer): ...@@ -135,9 +135,9 @@ class ReLU(Layer):
Parameters Parameters
---------- ----------
max_value : number, optional max_value : number, optional
The value of :math:`v_{max}`. The value to :math:`v_{max}`.
negative_slope : float, optional, default=0. negative_slope : float, optional, default=0.
The value of :math:`\alpha`. The value to :math:`\alpha`.
""" """
super(ReLU, self).__init__(**kwargs) super(ReLU, self).__init__(**kwargs)
......
...@@ -26,6 +26,16 @@ class Loss(object): ...@@ -26,6 +26,16 @@ class Loss(object):
"""The base class for loss criterion.""" """The base class for loss criterion."""
def __init__(self, reduction=losses_utils.Reduction.MEAN, name=None): def __init__(self, reduction=losses_utils.Reduction.MEAN, name=None):
"""Create a ``Loss`` criterion.
Parameters
----------
reduction : {'none', 'sum', 'mean', 'valid'}, optional
The reduction method.
name : str, optional
A optional name for the operation.
"""
losses_utils.Reduction.validate(reduction) losses_utils.Reduction.validate(reduction)
self.reduction = reduction self.reduction = reduction
self.name = name self.name = name
......
...@@ -21,7 +21,7 @@ from dragon.vm.tensorflow.core.keras.optimizer import optimizer ...@@ -21,7 +21,7 @@ from dragon.vm.tensorflow.core.keras.optimizer import optimizer
class Adam(optimizer.Optimizer): class Adam(optimizer.Optimizer):
r"""The optimizer which implements Adam algorithm. r"""The optimizer to apply Adam algorithm.
`[Kingma & Ba, 2014] <https://arxiv.org/abs/1412.6980>`_. `[Kingma & Ba, 2014] <https://arxiv.org/abs/1412.6980>`_.
The **Adam** update is defined as: The **Adam** update is defined as:
......
...@@ -21,7 +21,7 @@ from dragon.vm.tensorflow.core.keras.optimizer import optimizer ...@@ -21,7 +21,7 @@ from dragon.vm.tensorflow.core.keras.optimizer import optimizer
class SGD(optimizer.Optimizer): class SGD(optimizer.Optimizer):
r"""The optimizer which implements SGD algorithm. r"""The optimizer to apply SGD algorithm.
Following SGD algorithms are supported: Following SGD algorithms are supported:
......
...@@ -17,7 +17,7 @@ from dragon.vm.tensorflow.core.keras.optimizer import optimizer ...@@ -17,7 +17,7 @@ from dragon.vm.tensorflow.core.keras.optimizer import optimizer
class RMSprop(optimizer.Optimizer): class RMSprop(optimizer.Optimizer):
r"""The optimizer which implements RMSprop algorithm. r"""The optimizer to apply RMSprop algorithm.
`[Hinton et.al, 2013] <http://www.cs.utoronto.ca/~bonner/courses/2016s/csc321/lectures/lec6.pdf>`_. `[Hinton et.al, 2013] <http://www.cs.utoronto.ca/~bonner/courses/2016s/csc321/lectures/lec6.pdf>`_.
The **RMSprop** update is defined as: The **RMSprop** update is defined as:
......
...@@ -56,9 +56,9 @@ class L1L2(Regularizer): ...@@ -56,9 +56,9 @@ class L1L2(Regularizer):
Parameters Parameters
---------- ----------
l1 : float, optional, default=0.01 l1 : float, optional, default=0.01
The value of :math:`\alpha`. The value to :math:`\alpha`.
l2 : float, optional, default=0.01 l2 : float, optional, default=0.01
The value of :math:`\beta`. The value to :math:`\beta`.
""" """
if l1 <= 0. or l2 <= 0.: if l1 <= 0. or l2 <= 0.:
...@@ -90,7 +90,7 @@ def l1(l=0.01): ...@@ -90,7 +90,7 @@ def l1(l=0.01):
Parameters Parameters
---------- ----------
l : float, optional, default=0.01 l : float, optional, default=0.01
The value of :math:`\alpha`. The value to :math:`\alpha`.
Returns Returns
------- -------
...@@ -111,9 +111,9 @@ def l1_l2(l1=0.01, l2=0.01): ...@@ -111,9 +111,9 @@ def l1_l2(l1=0.01, l2=0.01):
Parameters Parameters
---------- ----------
l1 : float, optional, default=0.01 l1 : float, optional, default=0.01
The value of :math:`\alpha`. The value to :math:`\alpha`.
l2 : float, optional, default=0.01 l2 : float, optional, default=0.01
The value of :math:`\beta`. The value to :math:`\beta`.
Returns Returns
------- -------
...@@ -134,7 +134,7 @@ def l2(l=0.01): ...@@ -134,7 +134,7 @@ def l2(l=0.01):
Parameters Parameters
---------- ----------
l : float, optional, default=0.01 l : float, optional, default=0.01
The value of :math:`\beta`. The value to :math:`\beta`.
Returns Returns
------- -------
......
...@@ -19,5 +19,4 @@ from __future__ import print_function ...@@ -19,5 +19,4 @@ from __future__ import print_function
from dragon.vm.tensorflow.core.ops.losses import loss_reduction from dragon.vm.tensorflow.core.ops.losses import loss_reduction
Reduction = loss_reduction.Reduction Reduction = loss_reduction.Reduction
...@@ -183,9 +183,9 @@ def expand_dims(input, axis, name=None): ...@@ -183,9 +183,9 @@ def expand_dims(input, axis, name=None):
def fill(dims, value=0, dtype=None, name=None): def fill(dims, value=0, dtype=None, name=None):
r"""Return a tensor filled with the specific value. r"""Return a tensor filled with the scalar value.
.. math:: y \leftarrow \text{Constant} .. math:: \text{out} \leftarrow \text{value}
Examples: Examples:
...@@ -298,7 +298,7 @@ def identity(input, name=None): ...@@ -298,7 +298,7 @@ def identity(input, name=None):
def ones(shape, dtype='float32', name=None): def ones(shape, dtype='float32', name=None):
r"""Return a tensor filled with ones. r"""Return a tensor filled with ones.
.. math:: y \leftarrow 1 .. math:: \text{out} \leftarrow 1
```python ```python
x = tf.ones(shape=(2, 3), dtype=tf.float32) x = tf.ones(shape=(2, 3), dtype=tf.float32)
...@@ -318,9 +318,9 @@ def ones(shape, dtype='float32', name=None): ...@@ -318,9 +318,9 @@ def ones(shape, dtype='float32', name=None):
def ones_like(input, dtype='float32', name=None): def ones_like(input, dtype='float32', name=None):
r"""Return a tensor shaping like another filled with ones. r"""Return a tensor of ones with shape as the other.
.. math:: y \leftarrow 1 .. math:: \text{out} \leftarrow 1
Examples: Examples:
...@@ -349,14 +349,14 @@ def one_hot( ...@@ -349,14 +349,14 @@ def one_hot(
off_value=0, off_value=0,
name=None, name=None,
): ):
r"""Return the one-hot representation from indices. r"""Return the one-hot representation for input.
.. math:: .. math::
y[i][j] = \text{out}[i][j] =
\begin{cases} \begin{cases}
\text{Val}_{off}, & \text{ if } \text{indices}[i] \neq j \\ \text{off\_value}, & \text{ if } \text{input}[i] \neq j \\
\text{Val}_{on}, & \text{ otherwise } \text{on\_value}, & \text{ otherwise }
\end{cases} \end{cases}
The max value of indices, i.e., the ``depth`` should be specified: The max value of indices, i.e., the ``depth`` should be specified:
...@@ -374,7 +374,7 @@ def one_hot( ...@@ -374,7 +374,7 @@ def one_hot(
Parameters Parameters
---------- ----------
indices : dragon.Tensor indices : dragon.Tensor
The tensor ``indices``. The input tensor.
depth : int depth : int
The depth of representation. The depth of representation.
on_value : int, optional, default=1 on_value : int, optional, default=1
...@@ -390,7 +390,13 @@ def one_hot( ...@@ -390,7 +390,13 @@ def one_hot(
The output tensor. The output tensor.
""" """
return array_ops.one_hot(indices, depth, on_value, off_value, name=name) return array_ops.one_hot(
indices,
depth=depth,
on_value=on_value,
off_value=off_value,
name=name,
)
def pad( def pad(
...@@ -483,7 +489,7 @@ def placeholder(dtype=None, shape=None, name=None): ...@@ -483,7 +489,7 @@ def placeholder(dtype=None, shape=None, name=None):
suffix=':0', namespace='Tensor'), suffix=':0', namespace='Tensor'),
dtype=str(dtype) if dtype else dtype, dtype=str(dtype) if dtype else dtype,
shape=shape, shape=shape,
).placeholder() ).constant()
def reshape(tensor, shape, name=None): def reshape(tensor, shape, name=None):
...@@ -752,7 +758,7 @@ def transpose(a, perm=None, name=None): ...@@ -752,7 +758,7 @@ def transpose(a, perm=None, name=None):
def zeros(shape, dtype='float32', name=None): def zeros(shape, dtype='float32', name=None):
r"""Return a tensor filled with zeros. r"""Return a tensor filled with zeros.
.. math:: y \leftarrow 0 .. math:: \text{out} \leftarrow 0
```python ```python
x = tf.zeros(shape=(2, 3), dtype=tf.float32) x = tf.zeros(shape=(2, 3), dtype=tf.float32)
...@@ -772,9 +778,9 @@ def zeros(shape, dtype='float32', name=None): ...@@ -772,9 +778,9 @@ def zeros(shape, dtype='float32', name=None):
def zeros_like(input, dtype='float32', name=None): def zeros_like(input, dtype='float32', name=None):
r"""Return a tensor shaping like another filled with zeros. r"""Return a tensor of zeros with shape as the other.
.. math:: y \leftarrow 0 .. math:: \text{out} \leftarrow 0
Examples: Examples:
......
...@@ -42,9 +42,9 @@ def clip_by_value( ...@@ -42,9 +42,9 @@ def clip_by_value(
t : dragon.Tensor t : dragon.Tensor
The tensor :math:`x`. The tensor :math:`x`.
clip_value_min : number, optional clip_value_min : number, optional
The value of :math:`\text{low}`. The value to :math:`\text{low}`.
clip_value_max : number, optional clip_value_max : number, optional
The value of :math:`\text{high}`. The value to :math:`\text{high}`.
name : str, optional name : str, optional
A optional name for the operation. A optional name for the operation.
......
...@@ -43,7 +43,11 @@ class Initializer(object): ...@@ -43,7 +43,11 @@ class Initializer(object):
class Constant(Initializer): class Constant(Initializer):
"""Fill tensors with a scalar.""" r"""Fill tensor with a scalar value.
.. math:: \text{tensor} \leftarrow \text{value}
"""
def __init__(self, value=0, dtype='float32'): def __init__(self, value=0, dtype='float32'):
"""Create a ``Constant`` initializer. """Create a ``Constant`` initializer.
...@@ -78,19 +82,27 @@ class Constant(Initializer): ...@@ -78,19 +82,27 @@ class Constant(Initializer):
return init_ops.fill(shape, value=self.value, dtype=dtype) return init_ops.fill(shape, value=self.value, dtype=dtype)
class Ones(Initializer): class RandomNormal(Initializer):
"""Fill tensors with ones.""" r"""Fill tensor from a normal distribution.
def __init__(self, dtype='float32'): .. math:: \text{tensor} \sim \mathcal{N}(\mu, \sigma)
"""Create a ``Ones`` initializer.
"""
def __init__(self, mean=0, stddev=1, dtype='float32'):
r"""Create a ``RandomNormal`` initializer.
Parameters Parameters
---------- ----------
mean : number, optional, default=0
The value to :math:`\mu`.
stddev : number, optional, default=1
The value to :math:`\sigma`.
dtype : str, optional, default='float32' dtype : str, optional, default='float32'
The data type to set as default. The data type to set as default.
""" """
self.dtype = dtype self.mean, self.stddev, self.dtype = mean, stddev, dtype
def __call__(self, shape, dtype=None, **kwargs): def __call__(self, shape, dtype=None, **kwargs):
"""Return a tensor initialized from the initializer. """Return a tensor initialized from the initializer.
...@@ -108,22 +120,30 @@ class Ones(Initializer): ...@@ -108,22 +120,30 @@ class Ones(Initializer):
The output tensor. The output tensor.
""" """
dtype = str(self.dtype) if dtype is None else str(dtype) return init_ops.random_normal(
return init_ops.fill(shape, value=1, dtype=str(dtype)) shape=shape,
mean=self.mean,
std=self.stddev,
dtype=str(self.dtype) if dtype is None else str(dtype),
)
class RandomUniform(Initializer): class RandomUniform(Initializer):
"""Fill tensors according to a uniform distribution.""" r"""Fill tensor from an uniform distribution.
.. math:: \text{tensor} \sim \mathcal{U}(\alpha, \beta)
"""
def __init__(self, minval=0, maxval=1, dtype='float32'): def __init__(self, minval=0, maxval=1, dtype='float32'):
"""Create a ``RandomUniform`` initializer. r"""Create a ``RandomUniform`` initializer.
Parameters Parameters
---------- ----------
minval : number, optional, default=0 minval : number, optional, default=0
The lower bound of distribution. The value to :math:`\alpha`.
maxval : number, optional, default=1 maxval : number, optional, default=1
The higher bound of distribution. The value to :math:`\beta`.
dtype : str, optional, default='float32' dtype : str, optional, default='float32'
The data type to set as default. The data type to set as default.
...@@ -155,67 +175,22 @@ class RandomUniform(Initializer): ...@@ -155,67 +175,22 @@ class RandomUniform(Initializer):
) )
class RandomNormal(Initializer):
"""Fill tensors according to a normal distribution."""
def __init__(self, mean=0, stddev=1, dtype='float32'):
"""Create a ``RandomNormal`` initializer.
Parameters
----------
mean : number, optional, default=0
The mean of distribution.
stddev : number, optional, default=1
The stddev of distribution.
dtype : str, optional, default='float32'
The data type to set as default.
"""
self.mean, self.stddev, self.dtype = mean, stddev, dtype
def __call__(self, shape, dtype=None, **kwargs):
"""Return a tensor initialized from the initializer.
Parameters
----------
shape : Sequence[int]
The tensor shape.
dtype : str, optional
The optional data type.
Returns
-------
dragon.Tensor
The output tensor.
"""
return init_ops.random_normal(
shape=shape,
mean=self.mean,
std=self.stddev,
dtype=str(self.dtype) if dtype is None else str(dtype),
)
class TruncatedNormal(Initializer): class TruncatedNormal(Initializer):
r"""Fill tensors according to a truncated normal distribution. r"""Fill tensor from a truncated normal distribution.
The **TruncatedNormal** distribution is defined as:
.. math:: .. math:: \text{tensor} \sim \mathcal{TN}(\mu, \sigma, \mu - 2\sigma, \mu + 2\sigma)
X \sim TN(\mu, \sigma, \mu - 2\sigma, \mu + 2\sigma)
""" """
def __init__(self, mean=0, stddev=1, dtype='float32'): def __init__(self, mean=0, stddev=1, dtype='float32'):
"""Create a ``TruncatedNormal`` initializer. r"""Create a ``TruncatedNormal`` initializer.
Parameters Parameters
---------- ----------
mean : number, optional, default=0 mean : number, optional, default=0
The mean of distribution. The value to :math:`\mu`.
stddev : number, optional, default=1 stddev : number, optional, default=1
The stddev of distribution. The value to :math:`\sigma`.
dtype : str, optional, default='float32' dtype : str, optional, default='float32'
The data type to set as default. The data type to set as default.
...@@ -247,11 +222,11 @@ class TruncatedNormal(Initializer): ...@@ -247,11 +222,11 @@ class TruncatedNormal(Initializer):
class VarianceScaling(Initializer): class VarianceScaling(Initializer):
"""Fill tensors with the random values adapting to shape.""" """Fill tensor from a scaled random distribution."""
def __init__( def __init__(
self, self,
scale=1., scale=1.0,
mode='fan_in', mode='fan_in',
distribution='normal', distribution='normal',
dtype='float32', dtype='float32',
...@@ -260,8 +235,8 @@ class VarianceScaling(Initializer): ...@@ -260,8 +235,8 @@ class VarianceScaling(Initializer):
Parameters Parameters
---------- ----------
scale : float, optional, default=1. scale : float, optional, default=1
The scale factor applied to distribution. The scale factor to distribution.
mode : {'fan_in', 'fan_out', 'fan_avg'}, optional mode : {'fan_in', 'fan_out', 'fan_avg'}, optional
The mode for adapting to shape. The mode for adapting to shape.
distribution : {'normal', 'uniform'}, optional distribution : {'normal', 'uniform'}, optional
...@@ -271,15 +246,13 @@ class VarianceScaling(Initializer): ...@@ -271,15 +246,13 @@ class VarianceScaling(Initializer):
""" """
if scale <= 0.: if scale <= 0.:
raise ValueError("`scale` must be positive float.") raise ValueError('<scale> must be positive float.')
mode = mode.lower()
if mode not in {"fan_in", "fan_out", "fan_avg"}: if mode not in {'fan_in', 'fan_out', 'fan_avg'}:
raise ValueError("Invalid `mode` argument:", mode) raise ValueError('Invalid <mode> argument:', mode)
distribution = distribution.lower() distribution = distribution.lower()
if distribution not in {"normal", "uniform"}: if distribution not in {'normal', 'uniform'}:
raise ValueError("Invalid `distribution` argument:", distribution) raise ValueError("Invalid `distribution` argument:", distribution)
self.scale = scale self.scale = scale
self.mode = mode self.mode = mode
self.distribution = distribution self.distribution = distribution
...@@ -304,21 +277,25 @@ class VarianceScaling(Initializer): ...@@ -304,21 +277,25 @@ class VarianceScaling(Initializer):
if self.distribution == 'normal': if self.distribution == 'normal':
return init_ops.glorot_normal( return init_ops.glorot_normal(
shape=shape, shape=shape,
scale=self.scale * 2.,
mode=self.mode, mode=self.mode,
scale=self.scale * 2.0,
dtype=str(self.dtype) if dtype is None else str(dtype) dtype=str(self.dtype) if dtype is None else str(dtype)
) )
else: else:
return init_ops.glorot_uniform( return init_ops.glorot_uniform(
shape=shape, shape=shape,
scale=self.scale * 3.,
mode=self.mode, mode=self.mode,
scale=self.scale * 3.0,
dtype=str(self.dtype) if dtype is None else str(dtype) dtype=str(self.dtype) if dtype is None else str(dtype)
) )
class GlorotNormal(VarianceScaling): class GlorotNormal(VarianceScaling):
"""Fill tensors according to a glorot normal distribution.""" r"""Fill tensor from a glorot normal distribution.
.. math:: \text{tensor} \sim \mathcal{N}(0, \sqrt{\frac{2}{\text{fan\_avg}}})
"""
def __init__(self, dtype='float32'): def __init__(self, dtype='float32'):
"""Create a ``GlorotNormal`` initializer. """Create a ``GlorotNormal`` initializer.
...@@ -330,7 +307,7 @@ class GlorotNormal(VarianceScaling): ...@@ -330,7 +307,7 @@ class GlorotNormal(VarianceScaling):
""" """
super(GlorotNormal, self).__init__( super(GlorotNormal, self).__init__(
scale=1., scale=1.0,
mode='fan_avg', mode='fan_avg',
distribution='normal', distribution='normal',
dtype=dtype, dtype=dtype,
...@@ -338,7 +315,12 @@ class GlorotNormal(VarianceScaling): ...@@ -338,7 +315,12 @@ class GlorotNormal(VarianceScaling):
class GlorotUniform(VarianceScaling): class GlorotUniform(VarianceScaling):
"""Fill tensors according to a glorot uniform distribution.""" r"""Fill tensor from a glorot uniform distribution.
.. math:: \text{tensor} \sim \mathcal{U}(-\sqrt{\frac{3}{\text{fan\_avg}}},
\sqrt{\frac{3}{\text{fan\_avg}}})
"""
def __init__(self, dtype='float32'): def __init__(self, dtype='float32'):
"""Create a ``GlorotUniform`` initializer. """Create a ``GlorotUniform`` initializer.
...@@ -357,8 +339,50 @@ class GlorotUniform(VarianceScaling): ...@@ -357,8 +339,50 @@ class GlorotUniform(VarianceScaling):
) )
class Ones(Initializer):
r"""Fill tensor with ones.
.. math:: \text{tensor} \leftarrow 1
"""
def __init__(self, dtype='float32'):
"""Create a ``Ones`` initializer.
Parameters
----------
dtype : str, optional, default='float32'
The data type to set as default.
"""
self.dtype = dtype
def __call__(self, shape, dtype=None, **kwargs):
"""Return a tensor initialized from the initializer.
Parameters
----------
shape : Sequence[int]
The tensor shape.
dtype : str, optional
The optional data type.
Returns
-------
dragon.Tensor
The output tensor.
"""
dtype = str(self.dtype) if dtype is None else str(dtype)
return init_ops.fill(shape, value=1, dtype=str(dtype))
class Zeros(Initializer): class Zeros(Initializer):
"""Fill tensors with zeros.""" r"""Fill tensor with zeros.
.. math:: \text{tensor} \leftarrow 0
"""
def __init__(self, dtype='float32'): def __init__(self, dtype='float32'):
"""Create a ``Zeros`` initializer. """Create a ``Zeros`` initializer.
......
...@@ -846,7 +846,7 @@ def pow(x, y, name=None): ...@@ -846,7 +846,7 @@ def pow(x, y, name=None):
def range(start, limit=None, delta=1, dtype='int64', name=None): def range(start, limit=None, delta=1, dtype='int64', name=None):
r"""Return a tensor with evenly spaced values within a interval. r"""Return a tensor of evenly spaced values within a interval.
Specify ``start`` and ``limit`` to determine an interval: Specify ``start`` and ``limit`` to determine an interval:
......
...@@ -116,7 +116,7 @@ def l2_normalize(x, axis=None, epsilon=1e-12, name=None): ...@@ -116,7 +116,7 @@ def l2_normalize(x, axis=None, epsilon=1e-12, name=None):
axis : Union[int, Sequence[int]], optional axis : Union[int, Sequence[int]], optional
The axis to compute norm. The axis to compute norm.
epsilon : float, optional, default=1e-5 epsilon : float, optional, default=1e-5
The value of :math:`\epsilon`. The value to :math:`\epsilon`.
name : str, optional name : str, optional
A optional name for the operation. A optional name for the operation.
......
...@@ -493,7 +493,7 @@ def elu(features, alpha=1., name=None, **kwargs): ...@@ -493,7 +493,7 @@ def elu(features, alpha=1., name=None, **kwargs):
features : dragon.Tensor features : dragon.Tensor
The tensor :math:`x`. The tensor :math:`x`.
alpha : float, optional, default=1. alpha : float, optional, default=1.
The value of :math:`\alpha`. The value to :math:`\alpha`.
name : str, optional name : str, optional
A optional name for the operation. A optional name for the operation.
...@@ -527,7 +527,7 @@ def leaky_relu(features, alpha=0.2, name=None, **kwargs): ...@@ -527,7 +527,7 @@ def leaky_relu(features, alpha=0.2, name=None, **kwargs):
features : dragon.Tensor features : dragon.Tensor
The input tensor. The input tensor.
alpha : number, optional, default=0.2 alpha : number, optional, default=0.2
The value of :math:`\alpha`. The value to :math:`\alpha`.
name : str, optional name : str, optional
A optional name for the operation. A optional name for the operation.
......
...@@ -35,9 +35,9 @@ def random_normal( ...@@ -35,9 +35,9 @@ def random_normal(
shape : Sequence[Union[int, dragon.Tensor]] shape : Sequence[Union[int, dragon.Tensor]]
The shape of the tensor. The shape of the tensor.
mean : number, optional, default=0 mean : number, optional, default=0
The value of :math:`\mu`. The value to :math:`\mu`.
stddev : number, optional, default=1 stddev : number, optional, default=1
The value of :math:`\sigma`. The value to :math:`\sigma`.
dtype : str, optional dtype : str, optional
The optional data type. The optional data type.
seed : int, optional seed : int, optional
...@@ -74,9 +74,9 @@ def random_uniform( ...@@ -74,9 +74,9 @@ def random_uniform(
shape : Sequence[Union[int, dragon.Tensor]] shape : Sequence[Union[int, dragon.Tensor]]
The shape of the tensor. The shape of the tensor.
minval : number, optional, default=0 minval : number, optional, default=0
The value of :math:`\alpha`. The value to :math:`\alpha`.
maxval : number, optional, default=1 maxval : number, optional, default=1
The value of :math:`\beta`. The value to :math:`\beta`.
dtype : str, optional dtype : str, optional
The optional data type. The optional data type.
seed : int, optional seed : int, optional
...@@ -114,9 +114,9 @@ def truncated_normal( ...@@ -114,9 +114,9 @@ def truncated_normal(
shape : Sequence[Union[int, dragon.Tensor]] shape : Sequence[Union[int, dragon.Tensor]]
The shape of the tensor. The shape of the tensor.
mean : number, optional, default=0 mean : number, optional, default=0
The value of :math:`\mu`. The value to :math:`\mu`.
stddev : number, optional, default=1 stddev : number, optional, default=1
The value of :math:`\sigma`. The value to :math:`\sigma`.
dtype : str, optional dtype : str, optional
The optional data type. The optional data type.
seed : int, optional seed : int, optional
......
...@@ -46,7 +46,7 @@ def leaky_relu(x, alpha=0.2, name="leaky_relu", **kwargs): ...@@ -46,7 +46,7 @@ def leaky_relu(x, alpha=0.2, name="leaky_relu", **kwargs):
x : dragon.Tensor x : dragon.Tensor
The input tensor. The input tensor.
alpha : float, optional, default=0.2 alpha : float, optional, default=0.2
The value of :math:`\alpha`. The value to :math:`\alpha`.
name : str, optional name : str, optional
The optional operator name. The optional operator name.
......
...@@ -50,9 +50,9 @@ class Initializer(object): ...@@ -50,9 +50,9 @@ class Initializer(object):
class Constant(Initializer): class Constant(Initializer):
r"""Fill tensors with a scalar value. r"""Fill tensor with a scalar value.
.. math:: y \leftarrow \text{Value} .. math:: \text{tensor} \leftarrow \text{value}
""" """
...@@ -78,32 +78,30 @@ class Constant(Initializer): ...@@ -78,32 +78,30 @@ class Constant(Initializer):
class GlorotNormal(Initializer): class GlorotNormal(Initializer):
r"""Fill tensors according to a glorot normal distribution. r"""Fill tensor from a glorot normal distribution.
The **GlorotNormal** distribution is defined as: .. math:: \text{tensor} \sim \mathcal{N}(0, \sqrt{\frac{\text{scale}}{\text{fan}}})
.. math:: X \sim N(0, \sqrt{\frac{\text{scale}}{\text{FAN}}})
""" """
def __init__(self, scale=2., mode='FAN_IN'): def __init__(self, mode='fan_in', scale=2.0):
"""Create a ``GlorotNormal`` initializer. """Create a ``GlorotNormal`` initializer.
Parameters Parameters
---------- ----------
scale : float, optional, default=2. mode : {'fan_in', 'fan_out', 'fan_avg'}, optional
The scale factor of distribution. The mode to compute the fans.
mode : {'FAN_IN', 'FAN_OUT', 'FAN_AVG'}, optional scale : float, optional, default=2.0
The mode to compute the normalizer. The scale factor to distribution.
""" """
self.scale, self.mode = scale, mode self.mode, self.scale = mode, scale
def __call__(self, shape, dtype='float32', **kwargs): def __call__(self, shape, dtype='float32', **kwargs):
return self._getter( return self._getter(
init_ops.glorot_normal, init_ops.glorot_normal,
scale=self.scale,
mode=self.mode, mode=self.mode,
scale=self.scale,
shape=shape, shape=shape,
dtype=dtype, dtype=dtype,
**kwargs **kwargs
...@@ -111,37 +109,32 @@ class GlorotNormal(Initializer): ...@@ -111,37 +109,32 @@ class GlorotNormal(Initializer):
class GlorotUniform(Initializer): class GlorotUniform(Initializer):
r"""Fill tensors according to a glorot uniform distribution. r"""Fill tensor from a glorot uniform distribution.
The **GlorotUniform** distribution is defined as: .. math:: \text{tensor} \sim \mathcal{U}(-\sqrt{\frac{\text{scale}}{\text{fan}}},
\sqrt{\frac{\text{scale}}{\text{fan}}})
.. math::
X \sim U(
-\sqrt{\frac{\text{scale}}{\text{FAN}}},
\sqrt{\frac{\text{scale}}{\text{FAN}}}
)
""" """
def __init__(self, scale=3., mode='FAN_IN'): def __init__(self, mode='fan_in', scale=3.0):
"""Create a ``GlorotUniform`` initializer. """Create a ``GlorotUniform`` initializer.
Parameters Parameters
---------- ----------
scale : float, optional, default=3. mode : {'fan_in', 'fan_out', 'fan_avg'}, optional
The scale factor of distribution. The mode to compute the fans.
mode : {'FAN_IN', 'FAN_OUT', 'FAN_AVG'}, optional scale : float, optional, default=3.0
The mode to compute the normalizer. The scale factor to distribution.
""" """
super(GlorotUniform, self).__init__() super(GlorotUniform, self).__init__()
self.scale, self.mode = scale, mode self.mode, self.scale = mode, scale
def __call__(self, shape, dtype='float32', **kwargs): def __call__(self, shape, dtype='float32', **kwargs):
return self._getter( return self._getter(
init_ops.glorot_uniform, init_ops.glorot_uniform,
scale=self.scale,
mode=self.mode, mode=self.mode,
scale=self.scale,
shape=shape, shape=shape,
dtype=dtype, dtype=dtype,
**kwargs **kwargs
...@@ -149,14 +142,14 @@ class GlorotUniform(Initializer): ...@@ -149,14 +142,14 @@ class GlorotUniform(Initializer):
class Ones(Initializer): class Ones(Initializer):
r"""Fill tensors with ones. r"""Fill tensor with ones.
.. math:: y \leftarrow 0 .. math:: \text{tensor} \leftarrow 1
""" """
def __init__(self): def __init__(self):
"""Create a ``Zeros`` initializer.""" """Create a ``Ones`` initializer."""
super(Ones, self).__init__() super(Ones, self).__init__()
def __call__(self, shape, dtype='float32', **kwargs): def __call__(self, shape, dtype='float32', **kwargs):
...@@ -170,11 +163,9 @@ class Ones(Initializer): ...@@ -170,11 +163,9 @@ class Ones(Initializer):
class RandomNormal(Initializer): class RandomNormal(Initializer):
r"""Fill tensors according to a random normal distribution. r"""Fill tensor from a normal distribution.
The **RandomNormal** distribution is defined as:
.. math:: X \sim N(\mu, \sigma) .. math:: \text{tensor} \sim \mathcal{N}(\mu, \sigma)
""" """
...@@ -184,9 +175,9 @@ class RandomNormal(Initializer): ...@@ -184,9 +175,9 @@ class RandomNormal(Initializer):
Parameters Parameters
---------- ----------
mean : number, optional, default=0. mean : number, optional, default=0.
The value of :math:`\mu`. The value to :math:`\mu`.
stddev : number, optional, default=0.05 stddev : number, optional, default=0.05
The value of :math:`\sigma`. The value to :math:`\sigma`.
""" """
self.mean, self.stddev = mean, stddev self.mean, self.stddev = mean, stddev
...@@ -203,11 +194,9 @@ class RandomNormal(Initializer): ...@@ -203,11 +194,9 @@ class RandomNormal(Initializer):
class RandomUniform(Initializer): class RandomUniform(Initializer):
r"""Fill tensors according to a random uniform distribution. r"""Fill tensors from an uniform distribution.
The **RandomUniform** distribution is defined as: .. math:: \text{tensor} \sim \mathcal{U}(\alpha, \beta)
.. math:: X \sim U(\alpha, \beta)
""" """
...@@ -217,9 +206,9 @@ class RandomUniform(Initializer): ...@@ -217,9 +206,9 @@ class RandomUniform(Initializer):
Parameters Parameters
---------- ----------
minval : number, optional, default=-0.05 minval : number, optional, default=-0.05
The value of :math:`\alpha`. The value to :math:`\alpha`.
maxval : number, optional, default=0.05 maxval : number, optional, default=0.05
The value of :math:`\beta`. The value to :math:`\beta`.
""" """
self.minval, self.maxval = minval, maxval self.minval, self.maxval = minval, maxval
...@@ -236,12 +225,9 @@ class RandomUniform(Initializer): ...@@ -236,12 +225,9 @@ class RandomUniform(Initializer):
class TruncatedNormal(Initializer): class TruncatedNormal(Initializer):
r"""Fill tensors according to a truncated normal distribution. r"""Fill tensor from a truncated normal distribution.
The **TruncatedNormal** distribution is defined as:
.. math:: .. math:: \text{tensor} \sim \mathcal{TN}(\mu, \sigma, \mu - 2\sigma, \mu + 2\sigma)
X \sim TN(\mu, \sigma, \mu - 2\sigma, \mu + 2\sigma)
""" """
...@@ -251,9 +237,9 @@ class TruncatedNormal(Initializer): ...@@ -251,9 +237,9 @@ class TruncatedNormal(Initializer):
Parameters Parameters
---------- ----------
mean : number, optional, default=0. mean : number, optional, default=0.
The value of :math:`\mu`. The value to :math:`\mu`.
stddev : number, optional, default=0.05 stddev : number, optional, default=0.05
The value of :math:`\sigma`. The value to :math:`\sigma`.
""" """
self.mean, self.stddev = mean, stddev self.mean, self.stddev = mean, stddev
...@@ -270,9 +256,9 @@ class TruncatedNormal(Initializer): ...@@ -270,9 +256,9 @@ class TruncatedNormal(Initializer):
class Zeros(Initializer): class Zeros(Initializer):
r"""Fill tensors with zeros. r"""Fill tensor with zeros.
.. math:: y \leftarrow 1 .. math:: \text{tensor} \leftarrow 0
""" """
......
...@@ -46,7 +46,7 @@ class TestTensor(unittest.TestCase): ...@@ -46,7 +46,7 @@ class TestTensor(unittest.TestCase):
self.assertNotEqual(a.__hash__(), b.__hash__()) self.assertNotEqual(a.__hash__(), b.__hash__())
self.assertNotEqual(a.__repr__(), b.__repr__()) self.assertNotEqual(a.__repr__(), b.__repr__())
self.assertNotEqual(b.__repr__(), dragon.EagerTensor([2]).__repr__()) self.assertNotEqual(b.__repr__(), dragon.EagerTensor([2]).__repr__())
self.assertEqual(int(a.variable().placeholder().set_value(1)), 1) self.assertEqual(int(a.constant().set_value(1)), 1)
self.assertEqual(float(dragon.Tensor.convert_to(1)), 1.) self.assertEqual(float(dragon.Tensor.convert_to(1)), 1.)
self.assertEqual(int(b.set_value(1)), 1) self.assertEqual(int(b.set_value(1)), 1)
self.assertEqual(float(b), 1.) self.assertEqual(float(b), 1.)
...@@ -73,6 +73,8 @@ class TestTensor(unittest.TestCase): ...@@ -73,6 +73,8 @@ class TestTensor(unittest.TestCase):
with dragon.name_scope(''): with dragon.name_scope(''):
b.name = 'b' b.name = 'b'
self.assertEqual(b.name, 'b') self.assertEqual(b.name, 'b')
b.requires_grad = True
self.assertEqual(b.requires_grad, True)
def test_dlpack_converter(self): def test_dlpack_converter(self):
data = np.array([0., 1., 2.], 'float32') data = np.array([0., 1., 2.], 'float32')
...@@ -101,14 +103,56 @@ class TestTensor(unittest.TestCase): ...@@ -101,14 +103,56 @@ class TestTensor(unittest.TestCase):
class TestWorkspace(unittest.TestCase): class TestWorkspace(unittest.TestCase):
"""Test the workspace class.""" """Test the workspace class."""
def test_clear(self):
w = dragon.Workspace()
with w.as_default():
x = dragon.EagerTensor(1)
self.assertEqual(x.size, 1)
w.clear()
self.assertEqual(x.size, 0)
def test_feed_tensor(self):
w = dragon.Workspace()
with w.as_default():
v1, v2 = dragon.EagerTensor(1), np.array(2)
x = dragon.Tensor('test_feed_tensor/x')
w.feed_tensor(x, v1)
self.assertEqual(int(x), 1)
w.feed_tensor(x, v2)
self.assertEqual(int(x), 2)
def test_merge_form(self): def test_merge_form(self):
w1, w2 = dragon.Workspace(), dragon.Workspace() w1, w2 = dragon.Workspace(), dragon.Workspace()
with w1.as_default(): with w1.as_default():
x = dragon.Tensor(str(id(w1))).set_value(0) x = dragon.Tensor('test_merge_from/x').set_value(0)
w2.merge_from(w1) w2.merge_from(w1)
with w2.as_default(): with w2.as_default():
self.assertEqual(int(x), 0) self.assertEqual(int(x), 0)
def test_register_alias(self):
w = dragon.Workspace()
with w.as_default():
x = dragon.EagerTensor(1)
w.register_alias(x.id, 'test_register_alias/y')
self.assertEqual(int(w.fetch_tensor('test_register_alias/y')), 1)
def test_reset_tensor(self):
w = dragon.Workspace()
with w.as_default():
x = dragon.EagerTensor(1)
self.assertEqual(x.size, 1)
w.reset_tensor(x)
self.assertEqual(x.size, 0)
def test_reset_workspace(self):
w = dragon.Workspace()
with w.as_default():
try:
dragon.reset_workspace()
except AssertionError:
pass
dragon.reset_workspace()
if __name__ == '__main__': if __name__ == '__main__':
run_tests() run_tests()
...@@ -2706,7 +2706,7 @@ class TestTensorOps(OpTestCase): ...@@ -2706,7 +2706,7 @@ class TestTensorOps(OpTestCase):
for a_shape, b_shape in self.binary_test_shapes: for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape, 1), arange(b_shape) data1, data2 = arange(a_shape, 1), arange(b_shape)
a, b = new_tensor(data1), new_tensor(data2) a, b = new_tensor(data1), new_tensor(data2)
self.assertEqual(a.__rdiv__(b), data2 / data1) self.assertEqual(a.__rtruediv__(b), data2 / data1)
def test_reshape(self): def test_reshape(self):
entries = [(0, 0), (0, -1)] entries = [(0, 0), (0, -1)]
......
...@@ -43,7 +43,6 @@ from dragon.vm.torch.ops import tensorbind as _ ...@@ -43,7 +43,6 @@ from dragon.vm.torch.ops import tensorbind as _
from dragon.vm.torch.ops.array.functional import argmax from dragon.vm.torch.ops.array.functional import argmax
from dragon.vm.torch.ops.array.functional import argmin from dragon.vm.torch.ops.array.functional import argmin
from dragon.vm.torch.ops.array.functional import assign from dragon.vm.torch.ops.array.functional import assign
from dragon.vm.torch.ops.array.functional import cast
from dragon.vm.torch.ops.array.functional import cat from dragon.vm.torch.ops.array.functional import cat
from dragon.vm.torch.ops.array.functional import channel_normalize from dragon.vm.torch.ops.array.functional import channel_normalize
from dragon.vm.torch.ops.array.functional import channel_shuffle from dragon.vm.torch.ops.array.functional import channel_shuffle
...@@ -72,12 +71,10 @@ from dragon.vm.torch.ops.array.functional import unsqueeze ...@@ -72,12 +71,10 @@ from dragon.vm.torch.ops.array.functional import unsqueeze
from dragon.vm.torch.ops.array.functional import where from dragon.vm.torch.ops.array.functional import where
from dragon.vm.torch.ops.init.functional import arange from dragon.vm.torch.ops.init.functional import arange
from dragon.vm.torch.ops.init.functional import eye from dragon.vm.torch.ops.init.functional import eye
from dragon.vm.torch.ops.init.functional import normal
from dragon.vm.torch.ops.init.functional import ones from dragon.vm.torch.ops.init.functional import ones
from dragon.vm.torch.ops.init.functional import ones_like from dragon.vm.torch.ops.init.functional import ones_like
from dragon.vm.torch.ops.init.functional import rand from dragon.vm.torch.ops.init.functional import rand
from dragon.vm.torch.ops.init.functional import randn from dragon.vm.torch.ops.init.functional import randn
from dragon.vm.torch.ops.init.functional import uniform
from dragon.vm.torch.ops.init.functional import zeros from dragon.vm.torch.ops.init.functional import zeros
from dragon.vm.torch.ops.init.functional import zeros_like from dragon.vm.torch.ops.init.functional import zeros_like
from dragon.vm.torch.ops.math.functional import abs from dragon.vm.torch.ops.math.functional import abs
......
...@@ -77,7 +77,7 @@ class Tape(object): ...@@ -77,7 +77,7 @@ class Tape(object):
self._sources.add(tensor_id) self._sources.add(tensor_id)
def merge_from(self, other): def merge_from(self, other):
"""Merge operations from another.""" """Merge operations from the other."""
if other is not None: if other is not None:
self._operations = {**self._operations, **other._operations} self._operations = {**self._operations, **other._operations}
self._sources = self._sources.union(other._sources) self._sources = self._sources.union(other._sources)
......
...@@ -1043,7 +1043,7 @@ def normalize(input, p=2, dim=1, eps=1e-12, out=None): ...@@ -1043,7 +1043,7 @@ def normalize(input, p=2, dim=1, eps=1e-12, out=None):
dim : int, optional, default=1 dim : int, optional, default=1
The dimension to reduce. The dimension to reduce.
eps : float, optional, default=1e-12 eps : float, optional, default=1e-12
The value of :math:`\epsilon`. The value to :math:`\epsilon`.
out : dragon.vm.torch.Tensor, optional out : dragon.vm.torch.Tensor, optional
The optional output tensor. The optional output tensor.
......
...@@ -53,7 +53,7 @@ class ELU(Module): ...@@ -53,7 +53,7 @@ class ELU(Module):
Parameters Parameters
---------- ----------
alpha : float, optional, default=1. alpha : float, optional, default=1.
The value of :math:`\alpha`. The value to :math:`\alpha`.
inplace : bool, optional, default=False inplace : bool, optional, default=False
Whether to do the operation in-place. Whether to do the operation in-place.
......
...@@ -106,7 +106,7 @@ def assign(out, starts, sizes, input): ...@@ -106,7 +106,7 @@ def assign(out, starts, sizes, input):
def cast(input, dtype='float32', inplace=False): def cast(input, dtype='float32', inplace=False):
"""Cast the data type of input to another. """Cast the data type of input.
Parameters Parameters
---------- ----------
......
...@@ -27,7 +27,7 @@ def arange( ...@@ -27,7 +27,7 @@ def arange(
device=None, device=None,
requires_grad=False, requires_grad=False,
): ):
r"""Return a tensor with evenly spaced values within a interval. """Return a tensor of evenly spaced values within a interval.
Specify ``start`` and ``end`` to determine an interval: Specify ``start`` and ``end`` to determine an interval:
...@@ -79,7 +79,7 @@ def arange( ...@@ -79,7 +79,7 @@ def arange(
device if device else cpp.device(), device if device else cpp.device(),
num_args=len(slice_args), num_args=len(slice_args),
dtype=dtype if dtype else 'int64', dtype=dtype if dtype else 'int64',
).apply(slice_args) ).apply(slice_args, out)
out.requires_grad = requires_grad out.requires_grad = requires_grad
return out return out
...@@ -145,51 +145,21 @@ def fill_like(out, shape_like, value): ...@@ -145,51 +145,21 @@ def fill_like(out, shape_like, value):
.apply(out, [], shape_like) .apply(out, [], shape_like)
def normal(*size, **kwargs): def normal_fill(input, mean=0, std=1):
"""Return a tensor with a normal distribution. """Fill input from the normal distribution."""
shape = input.shape
Parameters
----------
size : int...
The size(s) indicating the out shape.
mean : number, optional, default=0
The mean of distribution.
std : number, optional, default=1
The stddev of distribution.
out : dragon.vm.torch.Tensor, optional
The optional output tensor.
dtype : str, optional, default='float32'
The optional data type.
device : dragon.vm.torch.device, optional
The optional device of returned tensor.
requires_grad : bool, optional, default=False
**True** to record gradient for returned tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
out = kwargs.get('out', utils.new_leaf(size, kwargs))
return normal_fill(out, kwargs.get('mean', 0), kwargs.get('std', 1))
def normal_fill(out, mean=0, std=1):
"""Fill a tensor with a normal distribution."""
shape = out.shape
return _functions.RandomNormal \ return _functions.RandomNormal \
.instantiate( .instantiate(
out.device, input.device,
ndim=len(shape), ndim=len(shape),
mean=float(mean), mean=float(mean),
std=float(std), std=float(std),
dtype=out.dtype, dtype=input.dtype,
).apply(out, shape) ).apply(input, shape)
def ones(*size, **kwargs): def ones(*size, **kwargs):
r"""Return a tensor with value **1** filled. r"""Return a tensor filled with ones.
.. math:: \text{out} \leftarrow 1 .. math:: \text{out} \leftarrow 1
...@@ -217,7 +187,7 @@ def ones(*size, **kwargs): ...@@ -217,7 +187,7 @@ def ones(*size, **kwargs):
def ones_like(input, **kwargs): def ones_like(input, **kwargs):
r"""Return a tensor with value **1** filled, shape as input. r"""Return a tensor of ones with shape as the other.
.. math:: \text{out} \leftarrow 1 .. math:: \text{out} \leftarrow 1
...@@ -245,7 +215,7 @@ def ones_like(input, **kwargs): ...@@ -245,7 +215,7 @@ def ones_like(input, **kwargs):
def rand(*size, **kwargs): def rand(*size, **kwargs):
"""Return a float tensor with a uniform distribution of U(0, 1). """Return a tensor from the uniform distribution of U(0, 1).
Parameters Parameters
---------- ----------
...@@ -271,7 +241,7 @@ def rand(*size, **kwargs): ...@@ -271,7 +241,7 @@ def rand(*size, **kwargs):
def randn(*size, **kwargs): def randn(*size, **kwargs):
"""Return a float tensor with a normal distribution of N(0, 1). """Return a tensor from the normal distribution of N(0, 1).
Parameters Parameters
---------- ----------
...@@ -296,51 +266,21 @@ def randn(*size, **kwargs): ...@@ -296,51 +266,21 @@ def randn(*size, **kwargs):
return normal_fill(out, 0, 1) return normal_fill(out, 0, 1)
def uniform(*size, **kwargs): def uniform_fill(input, low=0, high=1):
"""Return a tensor with a normal distribution. """Fill input from the uniform distribution."""
shape = input.shape
Parameters
----------
size : int...
The size(s) indicating the out shape.
low : number, optional, default=0
The low bound of distribution.
high : number, optional, default=1
The high bound of distribution.
out : dragon.vm.torch.Tensor, optional
The optional output tensor.
dtype : str, optional, default='float32'
The optional data type.
device : dragon.vm.torch.device, optional
The optional device of returned tensor.
requires_grad : bool, optional, default=False
**True** to record gradient for returned tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
out = kwargs.get('out', utils.new_leaf(size, kwargs))
return uniform_fill(out, kwargs.get('low', 0), kwargs.get('high', 1))
def uniform_fill(out, low=0, high=1):
"""Fill a tensor with a uniform distribution."""
shape = out.shape
return _functions.RandomUniform \ return _functions.RandomUniform \
.instantiate( .instantiate(
out.device, input.device,
ndim=len(shape), ndim=len(shape),
low=float(low), low=float(low),
high=float(high), high=float(high),
dtype=out.dtype, dtype=input.dtype,
).apply(out, shape) ).apply(input, shape)
def zeros(*size, **kwargs): def zeros(*size, **kwargs):
r"""Return a tensor with value **0** filled. r"""Return a tensor filled with zeros.
.. math:: \text{out} \leftarrow 0 .. math:: \text{out} \leftarrow 0
...@@ -368,7 +308,7 @@ def zeros(*size, **kwargs): ...@@ -368,7 +308,7 @@ def zeros(*size, **kwargs):
def zeros_like(input, **kwargs): def zeros_like(input, **kwargs):
r"""Return a tensor with value **0** filled, shape as input. r"""Return a tensor of zeros with shape as the other.
.. math:: \text{out} \leftarrow 0 .. math:: \text{out} \leftarrow 0
......
...@@ -54,9 +54,9 @@ def axpby(input, alpha=1., beta=1., out=None): ...@@ -54,9 +54,9 @@ def axpby(input, alpha=1., beta=1., out=None):
input : dragon.vm.torch.Tensor input : dragon.vm.torch.Tensor
The input tensor. The input tensor.
alpha : float, optional, default=1. alpha : float, optional, default=1.
The value of :math:`\alpha`. The value to :math:`\alpha`.
beta : float, optional, default=1. beta : float, optional, default=1.
The value of :math:`\beta`. The value to :math:`\beta`.
out : dragon.vm.torch.Tensor, optional out : dragon.vm.torch.Tensor, optional
The optional output tensor. The optional output tensor.
......
...@@ -40,14 +40,14 @@ def abs(self): ...@@ -40,14 +40,14 @@ def abs(self):
return math_funcs.abs(self) return math_funcs.abs(self)
def add(self, value): def add(self, other):
r"""Compute the element-wise addition. r"""Compute the element-wise addition.
.. math:: \text{out} = \text{self} + \text{value} .. math:: \text{out} = \text{self} + \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to add. The value to add.
Returns Returns
...@@ -60,17 +60,17 @@ def add(self, value): ...@@ -60,17 +60,17 @@ def add(self, value):
`torch.add(...)`_ : Compute the element-wise addition. `torch.add(...)`_ : Compute the element-wise addition.
""" """
return math_funcs.add(self, value) return math_funcs.add(self, other)
def add_(self, value): def add_(self, other):
r"""Compute the element-wise addition. r"""Compute the element-wise addition.
.. math:: \text{self} \mathrel{+}= \text{value} .. math:: \text{self} \mathrel{+}= \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to add. The value to add.
Returns Returns
...@@ -83,7 +83,7 @@ def add_(self, value): ...@@ -83,7 +83,7 @@ def add_(self, value):
`torch.add(...)`_ : Compute the element-wise addition. `torch.add(...)`_ : Compute the element-wise addition.
""" """
return math_funcs.add(self, value, self) return math_funcs.add(self, other, self)
def backward(self, gradient=None, retain_graph=False): def backward(self, gradient=None, retain_graph=False):
...@@ -398,14 +398,14 @@ def cumsum(self, dim): ...@@ -398,14 +398,14 @@ def cumsum(self, dim):
return array_funcs.cumsum(self, dim) return array_funcs.cumsum(self, dim)
def div(self, value): def div(self, other):
r"""Compute the element-wise division. r"""Compute the element-wise division.
.. math:: \text{out} = \text{self} \div \text{value} .. math:: \text{out} = \text{self} \div \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to divide. The value to divide.
Returns Returns
...@@ -418,17 +418,17 @@ def div(self, value): ...@@ -418,17 +418,17 @@ def div(self, value):
`torch.div(...)`_ : Compute the element-wise division. `torch.div(...)`_ : Compute the element-wise division.
""" """
return math_funcs.div(self, value) return math_funcs.div(self, other)
def div_(self, value): def div_(self, other):
r"""Compute the element-wise division. r"""Compute the element-wise division.
.. math:: \text{self} \mathrel{\div}= \text{value} .. math:: \text{self} \mathrel{\div}= \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to be divided. The value to be divided.
Returns Returns
...@@ -441,7 +441,7 @@ def div_(self, value): ...@@ -441,7 +441,7 @@ def div_(self, value):
`torch.div(...)`_ : Compute the element-wise division. `torch.div(...)`_ : Compute the element-wise division.
""" """
return math_funcs.div(self, value, self) return math_funcs.div(self, other, self)
def double(self): def double(self):
...@@ -531,14 +531,14 @@ def expand(self, *sizes): ...@@ -531,14 +531,14 @@ def expand(self, *sizes):
def fill_(self, value): def fill_(self, value):
r"""Fill with the given constant value. r"""Fill self with a scalar value.
.. math:: \text{self} \leftarrow \text{value} .. math:: \text{self} \leftarrow \text{value}
Parameters Parameters
---------- ----------
value : number value : number
The constant value. The value to fill.
Returns Returns
------- -------
...@@ -633,6 +633,19 @@ def ge(self, other): ...@@ -633,6 +633,19 @@ def ge(self, other):
def getitem(self, item): def getitem(self, item):
"""Select elements at the specific index.
Parameters
----------
item : Union[int, slice, dragon.vm.torch.Tensor]
The index.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
if isinstance(item, Tensor): if isinstance(item, Tensor):
return self.masked_select(item) return self.masked_select(item)
else: else:
...@@ -935,14 +948,14 @@ def min(self, dim=None, keepdim=False): ...@@ -935,14 +948,14 @@ def min(self, dim=None, keepdim=False):
return array_funcs.min(self, dim, keepdim) return array_funcs.min(self, dim, keepdim)
def mul(self, value): def mul(self, other):
r"""Compute the element-wise multiplication. r"""Compute the element-wise multiplication.
.. math:: \text{out} = \text{self} \times \text{value} .. math:: \text{out} = \text{self} \times \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to multiply. The value to multiply.
Returns Returns
...@@ -955,17 +968,17 @@ def mul(self, value): ...@@ -955,17 +968,17 @@ def mul(self, value):
`torch.mul(...)`_ : Compute the element-wise multiplication. `torch.mul(...)`_ : Compute the element-wise multiplication.
""" """
return math_funcs.mul(self, value) return math_funcs.mul(self, other)
def mul_(self, value): def mul_(self, other):
r"""Compute the element-wise multiplication. r"""Compute the element-wise multiplication.
.. math:: \text{self} \mathrel{\times}= \text{value} .. math:: \text{self} \mathrel{\times}= \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to multiply. The value to multiply.
Returns Returns
...@@ -978,7 +991,7 @@ def mul_(self, value): ...@@ -978,7 +991,7 @@ def mul_(self, value):
`torch.mul(...)`_ : Compute the element-wise multiplication. `torch.mul(...)`_ : Compute the element-wise multiplication.
""" """
return math_funcs.mul(self, value, self) return math_funcs.mul(self, other, self)
def multinomial(self, num_samples, eps=0.): def multinomial(self, num_samples, eps=0.):
...@@ -1076,16 +1089,16 @@ def nonzero(self): ...@@ -1076,16 +1089,16 @@ def nonzero(self):
def normal_(self, mean=0, std=1): def normal_(self, mean=0, std=1):
r"""Fill self with a normal distribution. r"""Fill self from a normal distribution.
.. math:: \text{self} \leftarrow N(\mu, \sigma) .. math:: \text{self} \sim \mathcal{N}(\mu, \sigma)
Parameters Parameters
---------- ----------
mean : number, optional, default=0 mean : number, optional, default=0
The value of :math:`\mu`. The value to :math:`\mu`.
std : number, optional, default=1 std : number, optional, default=1
The value of :math:`\sigma`. The value to :math:`\sigma`.
Returns Returns
------- -------
...@@ -1302,6 +1315,16 @@ def rsqrt_(self): ...@@ -1302,6 +1315,16 @@ def rsqrt_(self):
def setitem(self, key, value): def setitem(self, key, value):
"""Set elements at the specific index.
Parameters
----------
key : Union[int, slice, dragon.vm.torch.Tensor]
The index.
value : Union[dragon.vm.torch.Tensor, number]
The value to set.
"""
if isinstance(key, Tensor): if isinstance(key, Tensor):
return self.masked_fill_(key, value) return self.masked_fill_(key, value)
else: else:
...@@ -1464,14 +1487,14 @@ def sum(self, dim=None, keepdim=False): ...@@ -1464,14 +1487,14 @@ def sum(self, dim=None, keepdim=False):
return array_funcs.sum(self, dim, keepdim) return array_funcs.sum(self, dim, keepdim)
def sub(self, value): def sub(self, other):
r"""Compute the element-wise subtraction. r"""Compute the element-wise subtraction.
.. math:: \text{out} = \text{self} - \text{value} .. math:: \text{out} = \text{self} - \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to subtract. The value to subtract.
Returns Returns
...@@ -1484,17 +1507,17 @@ def sub(self, value): ...@@ -1484,17 +1507,17 @@ def sub(self, value):
`torch.sub(...)`_ : Compute the element-wise subtraction. `torch.sub(...)`_ : Compute the element-wise subtraction.
""" """
return math_funcs.sub(self, value) return math_funcs.sub(self, other)
def sub_(self, value): def sub_(self, other):
r"""Compute the element-wise subtraction. r"""Compute the element-wise subtraction.
.. math:: \text{self} \mathrel{-}= \text{value} .. math:: \text{self} \mathrel{-}= \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to be subtracted. The value to be subtracted.
Returns Returns
...@@ -1507,7 +1530,7 @@ def sub_(self, value): ...@@ -1507,7 +1530,7 @@ def sub_(self, value):
`torch.sub(...)`_ : Compute the element-wise subtraction. `torch.sub(...)`_ : Compute the element-wise subtraction.
""" """
return math_funcs.sub(self, value, self) return math_funcs.sub(self, other, self)
def type(self, dtype=None): def type(self, dtype=None):
...@@ -1532,16 +1555,16 @@ def type(self, dtype=None): ...@@ -1532,16 +1555,16 @@ def type(self, dtype=None):
def uniform_(self, low=0, high=1): def uniform_(self, low=0, high=1):
r"""Fill self with a uniform distribution. r"""Fill self from a uniform distribution.
.. math:: \text{self} \leftarrow U(\alpha, \beta) .. math:: \text{self} \sim \mathcal{U}(\alpha, \beta)
Parameters Parameters
---------- ----------
low : number, optional, default=0 low : number, optional, default=0
The value of :math:`\alpha`. The value to :math:`\alpha`.
high : number, optional, default=1 high : number, optional, default=1
The value of :math:`\beta`. The value to :math:`\beta`.
Returns Returns
------- -------
...@@ -1741,7 +1764,6 @@ Tensor.unsqueeze_ = unsqueeze_ ...@@ -1741,7 +1764,6 @@ Tensor.unsqueeze_ = unsqueeze_
Tensor.where = where Tensor.where = where
Tensor.__getitem__ = getitem Tensor.__getitem__ = getitem
Tensor.__radd__ = lambda self, value: math_funcs._binary_func(value, self, 'Add') Tensor.__radd__ = lambda self, value: math_funcs._binary_func(value, self, 'Add')
Tensor.__rdiv__ = lambda self, value: math_funcs._binary_func(value, self, 'Div')
Tensor.__rmul__ = lambda self, value: math_funcs._binary_func(value, self, 'Mul') Tensor.__rmul__ = lambda self, value: math_funcs._binary_func(value, self, 'Mul')
Tensor.__rsub__ = lambda self, value: math_funcs._binary_func(value, self, 'Sub') Tensor.__rsub__ = lambda self, value: math_funcs._binary_func(value, self, 'Sub')
Tensor.__rtruediv__ = lambda self, value: math_funcs._binary_func(value, self, 'Div') Tensor.__rtruediv__ = lambda self, value: math_funcs._binary_func(value, self, 'Div')
......
...@@ -21,7 +21,7 @@ from dragon.vm.torch.optim.optimizer import Optimizer ...@@ -21,7 +21,7 @@ from dragon.vm.torch.optim.optimizer import Optimizer
class Adam(Optimizer): class Adam(Optimizer):
r"""The optimizer which implements Adam algorithm. r"""The optimizer to apply Adam algorithm.
`[Kingma & Ba, 2014] <https://arxiv.org/abs/1412.6980>`_. `[Kingma & Ba, 2014] <https://arxiv.org/abs/1412.6980>`_.
The **Adam** update is defined as: The **Adam** update is defined as:
......
...@@ -21,7 +21,7 @@ from dragon.vm.torch.optim.optimizer import Optimizer ...@@ -21,7 +21,7 @@ from dragon.vm.torch.optim.optimizer import Optimizer
class RMSprop(Optimizer): class RMSprop(Optimizer):
r"""The optimizer which implements RMSprop algorithm. r"""The optimizer to apply RMSprop algorithm.
`[Hinton et.al, 2013] <http://www.cs.utoronto.ca/~bonner/courses/2016s/csc321/lectures/lec6.pdf>`_. `[Hinton et.al, 2013] <http://www.cs.utoronto.ca/~bonner/courses/2016s/csc321/lectures/lec6.pdf>`_.
The **RMSprop** update is defined as: The **RMSprop** update is defined as:
......
...@@ -22,7 +22,7 @@ from dragon.vm.torch.optim.optimizer import required ...@@ -22,7 +22,7 @@ from dragon.vm.torch.optim.optimizer import required
class SGD(Optimizer): class SGD(Optimizer):
r"""The optimizer which implements SGD algorithm. r"""The optimizer to apply SGD algorithm.
Following SGD algorithms are supported: Following SGD algorithms are supported:
......
...@@ -171,7 +171,7 @@ class Tensor(object): ...@@ -171,7 +171,7 @@ class Tensor(object):
@property @property
def requires_grad(self): def requires_grad(self):
"""Return a bool report whether the grad is required. """Return whether the grad is required.
Returns Returns
------- -------
...@@ -199,6 +199,14 @@ class Tensor(object): ...@@ -199,6 +199,14 @@ class Tensor(object):
@property @property
def volatile(self): def volatile(self):
"""Return whether this tensor is volatile.
Returns
-------
bool
**True** if volatile otherwise **False**.
"""
warnings.warn('Attribute ``volatile`` was removed (always False).', stacklevel=2) warnings.warn('Attribute ``volatile`` was removed (always False).', stacklevel=2)
return False return False
...@@ -217,16 +225,15 @@ class Tensor(object): ...@@ -217,16 +225,15 @@ class Tensor(object):
`torch.abs(...)`_ : Compute the absolute value of input. `torch.abs(...)`_ : Compute the absolute value of input.
""" """
pass
def add(self, value): def add(self, other):
r"""Compute the element-wise addition. r"""Compute the element-wise addition.
.. math:: \text{out} = \text{self} + \text{value} .. math:: \text{out} = \text{self} + \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to add. The value to add.
Returns Returns
...@@ -239,16 +246,15 @@ class Tensor(object): ...@@ -239,16 +246,15 @@ class Tensor(object):
`torch.add(...)`_ : Compute the element-wise addition. `torch.add(...)`_ : Compute the element-wise addition.
""" """
pass
def add_(self, value): def add_(self, other):
r"""Compute the element-wise addition. r"""Compute the element-wise addition.
.. math:: \text{self} \mathrel{+}= \text{value} .. math:: \text{self} \mathrel{+}= \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to add. The value to add.
Returns Returns
...@@ -261,7 +267,6 @@ class Tensor(object): ...@@ -261,7 +267,6 @@ class Tensor(object):
`torch.add(...)`_ : Compute the element-wise addition. `torch.add(...)`_ : Compute the element-wise addition.
""" """
pass
def backward(self, gradient=None, retain_graph=False): def backward(self, gradient=None, retain_graph=False):
"""Compute the derivatives of this tensor w.r.t. graph leaves. """Compute the derivatives of this tensor w.r.t. graph leaves.
...@@ -290,7 +295,6 @@ class Tensor(object): ...@@ -290,7 +295,6 @@ class Tensor(object):
`torch.bitwise_not(...)`_ : Compute the element-wise NOT bitwise operation. `torch.bitwise_not(...)`_ : Compute the element-wise NOT bitwise operation.
""" """
pass
def bitwise_not_(self): def bitwise_not_(self):
r"""Compute the element-wise NOT bitwise operation. r"""Compute the element-wise NOT bitwise operation.
...@@ -307,7 +311,6 @@ class Tensor(object): ...@@ -307,7 +311,6 @@ class Tensor(object):
`torch.bitwise_not(...)`_ : Compute the element-wise NOT bitwise operation. `torch.bitwise_not(...)`_ : Compute the element-wise NOT bitwise operation.
""" """
pass
def bitwise_xor(self, other): def bitwise_xor(self, other):
r"""Compute the element-wise XOR bitwise operation. r"""Compute the element-wise XOR bitwise operation.
...@@ -329,7 +332,6 @@ class Tensor(object): ...@@ -329,7 +332,6 @@ class Tensor(object):
`torch.bitwise_xor(...)`_ : Compute the element-wise XOR bitwise operation. `torch.bitwise_xor(...)`_ : Compute the element-wise XOR bitwise operation.
""" """
pass
def bitwise_xor_(self, other): def bitwise_xor_(self, other):
r"""Compute the element-wise XOR bitwise operation. r"""Compute the element-wise XOR bitwise operation.
...@@ -351,7 +353,6 @@ class Tensor(object): ...@@ -351,7 +353,6 @@ class Tensor(object):
`torch.bitwise_xor(...)`_ : Compute the element-wise XOR bitwise operation. `torch.bitwise_xor(...)`_ : Compute the element-wise XOR bitwise operation.
""" """
pass
def bool(self): def bool(self):
"""Return a bool tensor with the same data. """Return a bool tensor with the same data.
...@@ -362,7 +363,6 @@ class Tensor(object): ...@@ -362,7 +363,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def bool_(self): def bool_(self):
"""Cast to a bool tensor. """Cast to a bool tensor.
...@@ -373,7 +373,6 @@ class Tensor(object): ...@@ -373,7 +373,6 @@ class Tensor(object):
The self. The self.
""" """
pass
def byte(self): def byte(self):
"""Return an uint8 tensor with the same data. """Return an uint8 tensor with the same data.
...@@ -384,7 +383,6 @@ class Tensor(object): ...@@ -384,7 +383,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def byte_(self): def byte_(self):
"""Cast to an uint8 tensor. """Cast to an uint8 tensor.
...@@ -395,7 +393,6 @@ class Tensor(object): ...@@ -395,7 +393,6 @@ class Tensor(object):
The self. The self.
""" """
pass
def ceil(self): def ceil(self):
r"""Return a tensor taken the ceil of elements. r"""Return a tensor taken the ceil of elements.
...@@ -412,7 +409,6 @@ class Tensor(object): ...@@ -412,7 +409,6 @@ class Tensor(object):
`torch.ceil(...)`_ : Compute the smallest integer not less than input. `torch.ceil(...)`_ : Compute the smallest integer not less than input.
""" """
pass
def ceil_(self): def ceil_(self):
r"""Set to the ceil of elements. r"""Set to the ceil of elements.
...@@ -429,7 +425,6 @@ class Tensor(object): ...@@ -429,7 +425,6 @@ class Tensor(object):
`torch.ceil(...)`_ : Compute the smallest integer not less than input. `torch.ceil(...)`_ : Compute the smallest integer not less than input.
""" """
pass
def char(self): def char(self):
"""Return an int8 tensor with the same data. """Return an int8 tensor with the same data.
...@@ -440,7 +435,6 @@ class Tensor(object): ...@@ -440,7 +435,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def char_(self): def char_(self):
"""Cast to an int8 tensor. """Cast to an int8 tensor.
...@@ -451,7 +445,6 @@ class Tensor(object): ...@@ -451,7 +445,6 @@ class Tensor(object):
The self. The self.
""" """
pass
def chunk(self, chunks, dim=0): def chunk(self, chunks, dim=0):
"""Split self into several parts along the given dim. """Split self into several parts along the given dim.
...@@ -469,7 +462,6 @@ class Tensor(object): ...@@ -469,7 +462,6 @@ class Tensor(object):
The output chunks. The output chunks.
""" """
pass
def clamp(self, min=None, max=None): def clamp(self, min=None, max=None):
"""Return a tensor with elements clamped into a range. """Return a tensor with elements clamped into a range.
...@@ -491,7 +483,6 @@ class Tensor(object): ...@@ -491,7 +483,6 @@ class Tensor(object):
`torch.clamp(...)`_ : Compute the clipped input according to the given bounds. `torch.clamp(...)`_ : Compute the clipped input according to the given bounds.
""" """
pass
def clamp_(self, min=None, max=None): def clamp_(self, min=None, max=None):
"""Clamp elements into the a range. """Clamp elements into the a range.
...@@ -513,7 +504,6 @@ class Tensor(object): ...@@ -513,7 +504,6 @@ class Tensor(object):
`torch.clamp(...)`_ : Compute the clipped input according to the given bounds. `torch.clamp(...)`_ : Compute the clipped input according to the given bounds.
""" """
pass
def copy_(self, src): def copy_(self, src):
"""Copy the elements into this tensor. """Copy the elements into this tensor.
...@@ -557,7 +547,6 @@ class Tensor(object): ...@@ -557,7 +547,6 @@ class Tensor(object):
`torch.cos(...)`_ : Compute the cos of input. `torch.cos(...)`_ : Compute the cos of input.
""" """
pass
def cpu(self): def cpu(self):
"""Switch the internal storage on cpu memory. """Switch the internal storage on cpu memory.
...@@ -611,7 +600,6 @@ class Tensor(object): ...@@ -611,7 +600,6 @@ class Tensor(object):
`torch.cumsum(...)`_ : Compute the cumulative sum of elements along the given axis. `torch.cumsum(...)`_ : Compute the cumulative sum of elements along the given axis.
""" """
pass
def detach(self): def detach(self):
"""Return a data reference detaching the grad. """Return a data reference detaching the grad.
...@@ -635,14 +623,14 @@ class Tensor(object): ...@@ -635,14 +623,14 @@ class Tensor(object):
""" """
return self._impl.ndim return self._impl.ndim
def div(self, value): def div(self, other):
r"""Compute the element-wise division. r"""Compute the element-wise division.
.. math:: \text{out} = \text{self} \div \text{value} .. math:: \text{out} = \text{self} \div \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to divide. The value to divide.
Returns Returns
...@@ -655,16 +643,15 @@ class Tensor(object): ...@@ -655,16 +643,15 @@ class Tensor(object):
`torch.div(...)`_ : Compute the element-wise division. `torch.div(...)`_ : Compute the element-wise division.
""" """
pass
def div_(self, value): def div_(self, other):
r"""Compute the element-wise division. r"""Compute the element-wise division.
.. math:: \text{self} \mathrel{\div}= \text{value} .. math:: \text{self} \mathrel{\div}= \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to be divided. The value to be divided.
Returns Returns
...@@ -677,7 +664,6 @@ class Tensor(object): ...@@ -677,7 +664,6 @@ class Tensor(object):
`torch.div(...)`_ : Compute the element-wise division. `torch.div(...)`_ : Compute the element-wise division.
""" """
pass
def double(self): def double(self):
"""Return a float64 tensor with the same data. """Return a float64 tensor with the same data.
...@@ -688,7 +674,6 @@ class Tensor(object): ...@@ -688,7 +674,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def double_(self): def double_(self):
"""Cast to a float64 tensor. """Cast to a float64 tensor.
...@@ -699,7 +684,6 @@ class Tensor(object): ...@@ -699,7 +684,6 @@ class Tensor(object):
The self. The self.
""" """
pass
def eq(self, other): def eq(self, other):
r"""Compute the element-wise equal comparison. r"""Compute the element-wise equal comparison.
...@@ -721,7 +705,6 @@ class Tensor(object): ...@@ -721,7 +705,6 @@ class Tensor(object):
`torch.eq(...)`_ : Compute the element-wise equal comparison. `torch.eq(...)`_ : Compute the element-wise equal comparison.
""" """
pass
def exp(self): def exp(self):
r"""Compute the exponential. r"""Compute the exponential.
...@@ -738,7 +721,6 @@ class Tensor(object): ...@@ -738,7 +721,6 @@ class Tensor(object):
`torch.exp(...)`_ : Compute the exponential of input. `torch.exp(...)`_ : Compute the exponential of input.
""" """
pass
def expand(self, *sizes): def expand(self, *sizes):
"""Return a tensor with elements broadcast. """Return a tensor with elements broadcast.
...@@ -758,10 +740,9 @@ class Tensor(object): ...@@ -758,10 +740,9 @@ class Tensor(object):
`torch.expand(...)`_ : Broadcast input according to given sizes. `torch.expand(...)`_ : Broadcast input according to given sizes.
""" """
pass
def expand_as(self, other): def expand_as(self, other):
"""Return a tensor with elements broadcast like another. """Return a tensor with elements broadcast like the other.
Parameters Parameters
---------- ----------
...@@ -781,14 +762,14 @@ class Tensor(object): ...@@ -781,14 +762,14 @@ class Tensor(object):
return self.expand(*other.size()) return self.expand(*other.size())
def fill_(self, value): def fill_(self, value):
r"""Fill with the given constant value. r"""Fill self with a scalar value.
.. math:: \text{self} \leftarrow \text{value} .. math:: \text{self} \leftarrow \text{value}
Parameters Parameters
---------- ----------
value : number value : number
The constant value. The value to fill.
Returns Returns
------- -------
...@@ -796,7 +777,6 @@ class Tensor(object): ...@@ -796,7 +777,6 @@ class Tensor(object):
The self. The self.
""" """
pass
def float(self): def float(self):
"""Return a float32 tensor with the same data. """Return a float32 tensor with the same data.
...@@ -807,7 +787,6 @@ class Tensor(object): ...@@ -807,7 +787,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def float_(self): def float_(self):
"""Cast to a float32 tensor. """Cast to a float32 tensor.
...@@ -818,7 +797,6 @@ class Tensor(object): ...@@ -818,7 +797,6 @@ class Tensor(object):
The self. The self.
""" """
pass
def floor(self): def floor(self):
r"""Return a tensor taken the floor of elements. r"""Return a tensor taken the floor of elements.
...@@ -835,7 +813,6 @@ class Tensor(object): ...@@ -835,7 +813,6 @@ class Tensor(object):
`torch.floor(...)`_ : Compute the largest integer not greater than input. `torch.floor(...)`_ : Compute the largest integer not greater than input.
""" """
pass
def floor_(self): def floor_(self):
r"""Set to the floor of elements. r"""Set to the floor of elements.
...@@ -852,7 +829,6 @@ class Tensor(object): ...@@ -852,7 +829,6 @@ class Tensor(object):
`torch.floor(...)`_ : Compute the largest integer not greater than input. `torch.floor(...)`_ : Compute the largest integer not greater than input.
""" """
pass
def ge(self, other): def ge(self, other):
r"""Compute the element-wise greater-equal comparison. r"""Compute the element-wise greater-equal comparison.
...@@ -874,7 +850,6 @@ class Tensor(object): ...@@ -874,7 +850,6 @@ class Tensor(object):
`torch.ge(...)`_ : Compute the element-wise greater-equal comparison. `torch.ge(...)`_ : Compute the element-wise greater-equal comparison.
""" """
pass
def gt(self, other): def gt(self, other):
r"""Compute the element-wise greater comparison. r"""Compute the element-wise greater comparison.
...@@ -896,7 +871,6 @@ class Tensor(object): ...@@ -896,7 +871,6 @@ class Tensor(object):
`torch.gt(...)`_ : Compute the element-wise greater comparison. `torch.gt(...)`_ : Compute the element-wise greater comparison.
""" """
pass
def half(self): def half(self):
"""Return a float16 tensor with the same data. """Return a float16 tensor with the same data.
...@@ -907,7 +881,6 @@ class Tensor(object): ...@@ -907,7 +881,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def half_(self): def half_(self):
"""Cast to a float16 tensor. """Cast to a float16 tensor.
...@@ -918,7 +891,6 @@ class Tensor(object): ...@@ -918,7 +891,6 @@ class Tensor(object):
The self. The self.
""" """
pass
def index_select(self, dim, index): def index_select(self, dim, index):
"""Select the elements along the given dim using index. """Select the elements along the given dim using index.
...@@ -936,7 +908,6 @@ class Tensor(object): ...@@ -936,7 +908,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def int(self): def int(self):
"""Return an int32 tensor with the same data. """Return an int32 tensor with the same data.
...@@ -947,7 +918,6 @@ class Tensor(object): ...@@ -947,7 +918,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def int_(self): def int_(self):
"""Cast to an int32 tensor. """Cast to an int32 tensor.
...@@ -958,10 +928,9 @@ class Tensor(object): ...@@ -958,10 +928,9 @@ class Tensor(object):
The self. The self.
""" """
pass
def is_floating_point(self): def is_floating_point(self):
"""Whether the data type is floating. """Return whether the data type is floating.
Floating types contains: (*float16*, *float32*, *float64*) Floating types contains: (*float16*, *float32*, *float64*)
...@@ -993,7 +962,6 @@ class Tensor(object): ...@@ -993,7 +962,6 @@ class Tensor(object):
`torch.le(...)`_ : Compute the element-wise less-equal comparison. `torch.le(...)`_ : Compute the element-wise less-equal comparison.
""" """
pass
def log(self): def log(self):
r"""Compute the natural logarithm. r"""Compute the natural logarithm.
...@@ -1006,7 +974,6 @@ class Tensor(object): ...@@ -1006,7 +974,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def logsumexp(self, dim, keepdim=False): def logsumexp(self, dim, keepdim=False):
r"""Apply the composite of log, sum, and exp. r"""Apply the composite of log, sum, and exp.
...@@ -1026,7 +993,6 @@ class Tensor(object): ...@@ -1026,7 +993,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def long(self): def long(self):
"""Return an int64 tensor with the same data. """Return an int64 tensor with the same data.
...@@ -1037,7 +1003,6 @@ class Tensor(object): ...@@ -1037,7 +1003,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def long_(self): def long_(self):
"""Cast to an int64 tensor. """Cast to an int64 tensor.
...@@ -1048,7 +1013,6 @@ class Tensor(object): ...@@ -1048,7 +1013,6 @@ class Tensor(object):
The self. The self.
""" """
pass
def lt(self, other): def lt(self, other):
r"""Compute the element-wise less comparison. r"""Compute the element-wise less comparison.
...@@ -1070,7 +1034,6 @@ class Tensor(object): ...@@ -1070,7 +1034,6 @@ class Tensor(object):
`torch.lt(...)`_ : Compute the element-wise less comparison. `torch.lt(...)`_ : Compute the element-wise less comparison.
""" """
pass
def masked_fill_(self, mask, value): def masked_fill_(self, mask, value):
r"""Fill self with the given value where ``mask`` is **1**. r"""Fill self with the given value where ``mask`` is **1**.
...@@ -1095,7 +1058,6 @@ class Tensor(object): ...@@ -1095,7 +1058,6 @@ class Tensor(object):
The self. The self.
""" """
pass
def max(self, dim=None, keepdim=False): def max(self, dim=None, keepdim=False):
"""Compute the max value of elements along the given axis. """Compute the max value of elements along the given axis.
...@@ -1113,7 +1075,6 @@ class Tensor(object): ...@@ -1113,7 +1075,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def masked_select(self, mask): def masked_select(self, mask):
"""Select the elements where mask is **1**. """Select the elements where mask is **1**.
...@@ -1129,7 +1090,6 @@ class Tensor(object): ...@@ -1129,7 +1090,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def mean(self, dim=None, keepdim=False): def mean(self, dim=None, keepdim=False):
"""Compute the mean value of elements along the given axis. """Compute the mean value of elements along the given axis.
...@@ -1147,7 +1107,6 @@ class Tensor(object): ...@@ -1147,7 +1107,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def min(self, dim=None, keepdim=False): def min(self, dim=None, keepdim=False):
"""Compute the min value of elements along the given axis. """Compute the min value of elements along the given axis.
...@@ -1165,16 +1124,15 @@ class Tensor(object): ...@@ -1165,16 +1124,15 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def mul(self, value): def mul(self, other):
r"""Compute the element-wise multiplication. r"""Compute the element-wise multiplication.
.. math:: \text{out} = \text{self} \times \text{value} .. math:: \text{out} = \text{self} \times \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to multiply. The value to multiply.
Returns Returns
...@@ -1187,16 +1145,15 @@ class Tensor(object): ...@@ -1187,16 +1145,15 @@ class Tensor(object):
`torch.mul(...)`_ : Compute the element-wise multiplication. `torch.mul(...)`_ : Compute the element-wise multiplication.
""" """
pass
def mul_(self, value): def mul_(self, other):
r"""Compute the element-wise multiplication. r"""Compute the element-wise multiplication.
.. math:: \text{self} \mathrel{\times}= \text{value} .. math:: \text{self} \mathrel{\times}= \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to multiply. The value to multiply.
Returns Returns
...@@ -1209,7 +1166,6 @@ class Tensor(object): ...@@ -1209,7 +1166,6 @@ class Tensor(object):
`torch.mul(...)`_ : Compute the element-wise multiplication. `torch.mul(...)`_ : Compute the element-wise multiplication.
""" """
pass
def multinomial(self, num_samples, eps=0.): def multinomial(self, num_samples, eps=0.):
"""Return a tensor where each row contains ``num_samples``, """Return a tensor where each row contains ``num_samples``,
...@@ -1228,7 +1184,6 @@ class Tensor(object): ...@@ -1228,7 +1184,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def narrow(self, dimension, start, length): def narrow(self, dimension, start, length):
"""Return a new tensor that is a narrowed version of input tensor. """Return a new tensor that is a narrowed version of input tensor.
...@@ -1248,7 +1203,6 @@ class Tensor(object): ...@@ -1248,7 +1203,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def ndimension(self): def ndimension(self):
"""Alias for ``Tensor.dim()``. """Alias for ``Tensor.dim()``.
...@@ -1281,7 +1235,6 @@ class Tensor(object): ...@@ -1281,7 +1235,6 @@ class Tensor(object):
`torch.ne(...)`_ : Compute the element-wise not-equal comparison. `torch.ne(...)`_ : Compute the element-wise not-equal comparison.
""" """
pass
def neg(self): def neg(self):
r"""Compute the element-wise negative. r"""Compute the element-wise negative.
...@@ -1298,7 +1251,6 @@ class Tensor(object): ...@@ -1298,7 +1251,6 @@ class Tensor(object):
`torch.neg(...)`_ : Compute the element-wise negative. `torch.neg(...)`_ : Compute the element-wise negative.
""" """
pass
def nonzero(self): def nonzero(self):
"""Return the indices of non-zero elements. """Return the indices of non-zero elements.
...@@ -1309,19 +1261,18 @@ class Tensor(object): ...@@ -1309,19 +1261,18 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def normal_(self, mean=0, std=1): def normal_(self, mean=0, std=1):
r"""Fill self with a normal distribution. r"""Fill self from a normal distribution.
.. math:: \text{self} \leftarrow N(\mu, \sigma) .. math:: \text{self} \sim \mathcal{N}(\mu, \sigma)
Parameters Parameters
---------- ----------
mean : number, optional, default=0 mean : number, optional, default=0
The value of :math:`\mu`. The value to :math:`\mu`.
std : number, optional, default=1 std : number, optional, default=1
The value of :math:`\sigma`. The value to :math:`\sigma`.
Returns Returns
------- -------
...@@ -1329,7 +1280,6 @@ class Tensor(object): ...@@ -1329,7 +1280,6 @@ class Tensor(object):
The self. The self.
""" """
pass
def numel(self): def numel(self):
"""Return the total number of elements. """Return the total number of elements.
...@@ -1359,7 +1309,7 @@ class Tensor(object): ...@@ -1359,7 +1309,7 @@ class Tensor(object):
return self._impl.ToNumpy(readonly) return self._impl.ToNumpy(readonly)
def one_(self): def one_(self):
r"""Fill with constant 1. r"""Fill self with ones.
.. math:: \text{self} \leftarrow 1 .. math:: \text{self} \leftarrow 1
...@@ -1369,7 +1319,7 @@ class Tensor(object): ...@@ -1369,7 +1319,7 @@ class Tensor(object):
The self. The self.
""" """
self.fill_(1) return self.fill_(1)
def permute(self, *dims): def permute(self, *dims):
"""Return a new tensor with the specific order of dimensions. """Return a new tensor with the specific order of dimensions.
...@@ -1385,7 +1335,6 @@ class Tensor(object): ...@@ -1385,7 +1335,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def pow(self, exponent): def pow(self, exponent):
"""Compute the power. """Compute the power.
...@@ -1405,7 +1354,6 @@ class Tensor(object): ...@@ -1405,7 +1354,6 @@ class Tensor(object):
`torch.pow(...)`_ : Compute the power of input. `torch.pow(...)`_ : Compute the power of input.
""" """
pass
def reciprocal(self): def reciprocal(self):
r"""Compute the reciprocal. r"""Compute the reciprocal.
...@@ -1422,7 +1370,6 @@ class Tensor(object): ...@@ -1422,7 +1370,6 @@ class Tensor(object):
`torch.reciprocal(...)`_ : Compute the reciprocal of input. `torch.reciprocal(...)`_ : Compute the reciprocal of input.
""" """
pass
def reciprocal_(self): def reciprocal_(self):
r"""Compute the reciprocal. r"""Compute the reciprocal.
...@@ -1439,7 +1386,6 @@ class Tensor(object): ...@@ -1439,7 +1386,6 @@ class Tensor(object):
`torch.reciprocal(...)`_ : Compute the reciprocal of input. `torch.reciprocal(...)`_ : Compute the reciprocal of input.
""" """
pass
def repeat(self, *sizes): def repeat(self, *sizes):
"""Repeat elements along the specified dimensions. """Repeat elements along the specified dimensions.
...@@ -1455,7 +1401,6 @@ class Tensor(object): ...@@ -1455,7 +1401,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def reshape(self, shape): def reshape(self, shape):
"""Return a tensor with the same data but a different shape. """Return a tensor with the same data but a different shape.
...@@ -1475,7 +1420,6 @@ class Tensor(object): ...@@ -1475,7 +1420,6 @@ class Tensor(object):
`torch.reshape(...)`_ : Change the shape of input. `torch.reshape(...)`_ : Change the shape of input.
""" """
pass
def reshape_(self, shape): def reshape_(self, shape):
"""Change into a new shape with the same data. """Change into a new shape with the same data.
...@@ -1495,7 +1439,6 @@ class Tensor(object): ...@@ -1495,7 +1439,6 @@ class Tensor(object):
`torch.reshape(...)`_ : Change the shape of input. `torch.reshape(...)`_ : Change the shape of input.
""" """
pass
def retain_grad(self): def retain_grad(self):
"""Retain grad for the non-leaf tensor.""" """Retain grad for the non-leaf tensor."""
...@@ -1517,7 +1460,6 @@ class Tensor(object): ...@@ -1517,7 +1460,6 @@ class Tensor(object):
`torch.round(...)`_ : Compute the nearest integer of input. `torch.round(...)`_ : Compute the nearest integer of input.
""" """
pass
def round_(self): def round_(self):
r"""Set to the round of elements. r"""Set to the round of elements.
...@@ -1534,7 +1476,6 @@ class Tensor(object): ...@@ -1534,7 +1476,6 @@ class Tensor(object):
`torch.round(...)`_ : Compute the nearest integer of input. `torch.round(...)`_ : Compute the nearest integer of input.
""" """
pass
def rsqrt(self): def rsqrt(self):
r"""Compute the reciprocal square root. r"""Compute the reciprocal square root.
...@@ -1551,7 +1492,6 @@ class Tensor(object): ...@@ -1551,7 +1492,6 @@ class Tensor(object):
`torch.rsqrt(...)`_ : Compute the square root of input. `torch.rsqrt(...)`_ : Compute the square root of input.
""" """
pass
def rsqrt_(self): def rsqrt_(self):
r"""Compute the reciprocal square root. r"""Compute the reciprocal square root.
...@@ -1568,7 +1508,6 @@ class Tensor(object): ...@@ -1568,7 +1508,6 @@ class Tensor(object):
`torch.rsqrt(...)`_ : Compute the square root of input. `torch.rsqrt(...)`_ : Compute the square root of input.
""" """
pass
def sign(self): def sign(self):
r"""Return a tensor taken the sign indication of elements. r"""Return a tensor taken the sign indication of elements.
...@@ -1591,7 +1530,6 @@ class Tensor(object): ...@@ -1591,7 +1530,6 @@ class Tensor(object):
`torch.sign(...)`_ : Compute the sign indication of input. `torch.sign(...)`_ : Compute the sign indication of input.
""" """
pass
def sign_(self): def sign_(self):
r"""Set to the sign indication of elements. r"""Set to the sign indication of elements.
...@@ -1614,7 +1552,6 @@ class Tensor(object): ...@@ -1614,7 +1552,6 @@ class Tensor(object):
`torch.sign(...)`_ : Compute the sign indication of input. `torch.sign(...)`_ : Compute the sign indication of input.
""" """
pass
def sin(self): def sin(self):
r"""Compute the sin. r"""Compute the sin.
...@@ -1631,7 +1568,6 @@ class Tensor(object): ...@@ -1631,7 +1568,6 @@ class Tensor(object):
`torch.sin(...)`_ : Compute the sin of input. `torch.sin(...)`_ : Compute the sin of input.
""" """
pass
def size(self, axis=None): def size(self, axis=None):
"""Return the size of this tensor. """Return the size of this tensor.
...@@ -1665,7 +1601,6 @@ class Tensor(object): ...@@ -1665,7 +1601,6 @@ class Tensor(object):
`torch.sqrt(...)`_ : Compute the square root of input. `torch.sqrt(...)`_ : Compute the square root of input.
""" """
pass
def sqrt_(self): def sqrt_(self):
r"""Compute the square root. r"""Compute the square root.
...@@ -1682,7 +1617,6 @@ class Tensor(object): ...@@ -1682,7 +1617,6 @@ class Tensor(object):
`torch.sqrt(...)`_ : Compute the square root of input. `torch.sqrt(...)`_ : Compute the square root of input.
""" """
pass
def squeeze(self, dim=None): def squeeze(self, dim=None):
"""Return a tensor with dimensions of size 1 removed. """Return a tensor with dimensions of size 1 removed.
...@@ -1698,7 +1632,6 @@ class Tensor(object): ...@@ -1698,7 +1632,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def squeeze_(self, dim=None): def squeeze_(self, dim=None):
"""Inplace version of ``Tensor.squeeze()``. """Inplace version of ``Tensor.squeeze()``.
...@@ -1714,7 +1647,6 @@ class Tensor(object): ...@@ -1714,7 +1647,6 @@ class Tensor(object):
The self. The self.
""" """
pass
def sum(self, dim=None, keepdim=False): def sum(self, dim=None, keepdim=False):
"""Compute the sum value of elements along the given axis. """Compute the sum value of elements along the given axis.
...@@ -1732,16 +1664,15 @@ class Tensor(object): ...@@ -1732,16 +1664,15 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def sub(self, value): def sub(self, other):
r"""Compute the element-wise subtraction. r"""Compute the element-wise subtraction.
.. math:: \text{out} = \text{self} - \text{value} .. math:: \text{out} = \text{self} - \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to subtract. The value to subtract.
Returns Returns
...@@ -1754,16 +1685,15 @@ class Tensor(object): ...@@ -1754,16 +1685,15 @@ class Tensor(object):
`torch.sub(...)`_ : Compute the element-wise subtraction. `torch.sub(...)`_ : Compute the element-wise subtraction.
""" """
pass
def sub_(self, value): def sub_(self, other):
r"""Compute the element-wise subtraction. r"""Compute the element-wise subtraction.
.. math:: \text{self} \mathrel{-}= \text{value} .. math:: \text{self} \mathrel{-}= \text{other}
Parameters Parameters
---------- ----------
value : Union[dragon.vm.torch.Tensor, number] other : Union[dragon.vm.torch.Tensor, number]
The value to be subtracted. The value to be subtracted.
Returns Returns
...@@ -1776,7 +1706,6 @@ class Tensor(object): ...@@ -1776,7 +1706,6 @@ class Tensor(object):
`torch.sub(...)`_ : Compute the element-wise subtraction. `torch.sub(...)`_ : Compute the element-wise subtraction.
""" """
pass
def type(self, dtype=None): def type(self, dtype=None):
"""Return the data type. """Return the data type.
...@@ -1794,19 +1723,18 @@ class Tensor(object): ...@@ -1794,19 +1723,18 @@ class Tensor(object):
The data type or new tensor. The data type or new tensor.
""" """
pass
def uniform_(self, low=0, high=1): def uniform_(self, low=0, high=1):
r"""Fill self with a uniform distribution. r"""Fill self from a uniform distribution.
.. math:: \text{self} \leftarrow U(\alpha, \beta) .. math:: \text{self} \sim \mathcal{U}(\alpha, \beta)
Parameters Parameters
---------- ----------
low : number, optional, default=0 low : number, optional, default=0
The value of :math:`\alpha`. The value to :math:`\alpha`.
high : number, optional, default=1 high : number, optional, default=1
The value of :math:`\beta`. The value to :math:`\beta`.
Returns Returns
------- -------
...@@ -1814,7 +1742,6 @@ class Tensor(object): ...@@ -1814,7 +1742,6 @@ class Tensor(object):
The self. The self.
""" """
pass
def unsqueeze(self, dim): def unsqueeze(self, dim):
"""Return a tensor with dimensions of size 1 inserted. """Return a tensor with dimensions of size 1 inserted.
...@@ -1830,7 +1757,6 @@ class Tensor(object): ...@@ -1830,7 +1757,6 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def unsqueeze_(self, dim): def unsqueeze_(self, dim):
"""In-place version of ``Tensor.unsqueeze()``. """In-place version of ``Tensor.unsqueeze()``.
...@@ -1846,7 +1772,6 @@ class Tensor(object): ...@@ -1846,7 +1772,6 @@ class Tensor(object):
The self. The self.
""" """
pass
def view(self, *shape): def view(self, *shape):
"""Return a tensor with the same data but a different shape. """Return a tensor with the same data but a different shape.
...@@ -1928,10 +1853,9 @@ class Tensor(object): ...@@ -1928,10 +1853,9 @@ class Tensor(object):
The output tensor. The output tensor.
""" """
pass
def zero_(self): def zero_(self):
r"""Fill self with constant 0. r"""Fill self with zeros.
.. math:: \text{self} \leftarrow 0 .. math:: \text{self} \leftarrow 0
...@@ -1941,7 +1865,7 @@ class Tensor(object): ...@@ -1941,7 +1865,7 @@ class Tensor(object):
The self. The self.
""" """
self.fill_(0) return self.fill_(0)
def _from_numpy(self, array, copy): def _from_numpy(self, array, copy):
"""Create impl from the numpy array.""" """Create impl from the numpy array."""
...@@ -1963,6 +1887,19 @@ class Tensor(object): ...@@ -1963,6 +1887,19 @@ class Tensor(object):
return mapping.TENSOR_TYPE_TO_TORCH_TENSOR[self.dtype] return mapping.TENSOR_TYPE_TO_TORCH_TENSOR[self.dtype]
def __add__(self, other): def __add__(self, other):
"""Compute the element-wise addition.
Parameters
----------
other : Union[dragon.vm.torch.Tensor, number]
The value to add.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return self.add(other) return self.add(other)
def __del__(self): def __del__(self):
...@@ -1971,63 +1908,215 @@ class Tensor(object): ...@@ -1971,63 +1908,215 @@ class Tensor(object):
# PyGC will detect them automatically. # PyGC will detect them automatically.
self._gc.collect(self.id) self._gc.collect(self.id)
def __div__(self, other):
return self.div(other)
def __float__(self): def __float__(self):
"""Return a float python scalar.""" """Return a float python scalar.
if self.numel() == 1:
return float(self.numpy()) Returns
raise TypeError('Only size-1 array can be converted to Python scalars.') -------
float
The float value.
"""
return float(self.numpy())
def __ge__(self, other): def __ge__(self, other):
"""Compute the element-wise greater-equal comparison.
Parameters
----------
other : Union[dragon.vm.torch.Tensor, number]
The value to compare.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return self.ge(other) return self.ge(other)
def __getitem__(self, item): def __getitem__(self, item):
pass """Select elements at the specific index.
Parameters
----------
item : Union[int, slice, dragon.vm.torch.Tensor]
The index.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
def __gt__(self, other): def __gt__(self, other):
"""Compute the element-wise greater comparison.
Parameters
----------
other : Union[dragon.vm.torch.Tensor, number]
The value to compare.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return self.gt(other) return self.gt(other)
def __hash__(self): def __hash__(self):
return id(self) return id(self)
def __iadd__(self, other): def __iadd__(self, other):
return self.add_(other) """Compute the element-wise addition.
def __idiv__(self, other): Parameters
return self.div_(other) ----------
other : Union[dragon.vm.torch.Tensor, number]
The value to add.
Returns
-------
dragon.vm.torch.Tensor
The self.
"""
return self.add_(other)
def __imul__(self, other): def __imul__(self, other):
"""Compute the element-wise multiplication.
Parameters
----------
other : Union[dragon.vm.torch.Tensor, number]
The value to multiply.
Returns
-------
dragon.vm.torch.Tensor
The self.
"""
return self.mul_(other) return self.mul_(other)
def __int__(self): def __int__(self):
"""Return a int python scalar.""" """Return an integer python scalar.
Returns
-------
int
The integer value.
"""
return int(self.__float__()) return int(self.__float__())
def __isub__(self, other): def __isub__(self, other):
"""Compute the element-wise subtraction.
Parameters
----------
other : Union[dragon.vm.torch.Tensor, number]
The value to be subtracted.
Returns
-------
dragon.vm.torch.Tensor
The self.
"""
return self.sub_(other) return self.sub_(other)
def __itruediv__(self, other): def __itruediv__(self, other):
"""Compute the element-wise division.
Parameters
----------
other : Union[dragon.vm.torch.Tensor, number]
The value to be divided.
Returns
-------
dragon.vm.torch.Tensor
The self.
"""
return self.div_(other) return self.div_(other)
def __le__(self, other): def __le__(self, other):
"""Compute the element-wise less-equal comparison.
Parameters
----------
other : Union[dragon.vm.torch.Tensor, number]
The value to compare.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return self.le(other) return self.le(other)
def __lt__(self, other): def __lt__(self, other):
"""Compute the element-wise less comparison.
Parameters
----------
other : Union[dragon.vm.torch.Tensor, number]
The value to compare.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return self.lt(other) return self.lt(other)
def __mul__(self, other): def __mul__(self, other):
"""Compute the element-wise multiplication.
Parameters
----------
other : Union[dragon.vm.torch.Tensor, number]
The value to multiply.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return self.mul(other) return self.mul(other)
def __neg__(self): def __neg__(self):
"""Compute the element-wise negative.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return self.neg() return self.neg()
def __radd__(self, other): def __radd__(self, other):
pass """Compute the element-wise addition.
Parameters
----------
other : Union[dragon.vm.torch.Tensor, number]
The value to add.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
def __rdiv__(self, other): """
pass
def __repr__(self): def __repr__(self):
np_data = self.numpy() np_data = self.numpy()
...@@ -2044,61 +2133,146 @@ class Tensor(object): ...@@ -2044,61 +2133,146 @@ class Tensor(object):
return format_str + meta_info return format_str + meta_info
def __rmul__(self, other): def __rmul__(self, other):
pass """Compute the element-wise multiplication.
Parameters
----------
other : Union[dragon.vm.torch.Tensor, number]
The value to multiply.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
def __rsub__(self, other): def __rsub__(self, other):
pass """Compute the element-wise subtraction.
Parameters
----------
other : Union[dragon.vm.torch.Tensor, number]
The value to be subtracted.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
def __rtruediv__(self, other): def __rtruediv__(self, other):
pass """Compute the element-wise division.
Parameters
----------
other : Union[dragon.vm.torch.Tensor, number]
The value to be divided.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
def __truediv__(self, other): def __truediv__(self, other):
"""Compute the element-wise division.
Parameters
----------
other : Union[dragon.vm.torch.Tensor, number]
The value to divide.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return self.div(other) return self.div(other)
def __setitem__(self, key, value): def __setitem__(self, key, value):
pass """Set elements at the specific index.
Parameters
----------
key : Union[int, slice, dragon.vm.torch.Tensor]
The index.
value : Union[dragon.vm.torch.Tensor, number]
The value to set.
"""
def __sub__(self, other): def __sub__(self, other):
"""Compute the element-wise subtraction.
Parameters
----------
other : Union[dragon.vm.torch.Tensor, number]
The value to subtract.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return self.sub(other) return self.sub(other)
class BoolTensor(object): class BoolTensor(object):
"""The bool tensor."""
def __new__(cls, *args, **kwargs): def __new__(cls, *args, **kwargs):
kwargs['dtype'] = 'bool' kwargs['dtype'] = 'bool'
return Tensor(*args, **kwargs) return Tensor(*args, **kwargs)
class ByteTensor(object): class ByteTensor(object):
"""The uint8 tensor."""
def __new__(cls, *args, **kwargs): def __new__(cls, *args, **kwargs):
kwargs['dtype'] = 'uint8' kwargs['dtype'] = 'uint8'
return Tensor(*args, **kwargs) return Tensor(*args, **kwargs)
class CharTensor(object): class CharTensor(object):
"""The int8 tensor."""
def __new__(cls, *args, **kwargs): def __new__(cls, *args, **kwargs):
kwargs['dtype'] = 'int8' kwargs['dtype'] = 'int8'
return Tensor(*args, **kwargs) return Tensor(*args, **kwargs)
class DoubleTensor(object): class DoubleTensor(object):
"""The float64 tensor."""
def __new__(cls, *args, **kwargs): def __new__(cls, *args, **kwargs):
kwargs['dtype'] = 'float64' kwargs['dtype'] = 'float64'
return Tensor(*args, **kwargs) return Tensor(*args, **kwargs)
class FloatTensor(object): class FloatTensor(object):
"""The float32 tensor."""
def __new__(cls, *args, **kwargs): def __new__(cls, *args, **kwargs):
kwargs['dtype'] = 'float32' kwargs['dtype'] = 'float32'
return Tensor(*args, **kwargs) return Tensor(*args, **kwargs)
class HalfTensor(object): class HalfTensor(object):
"""The float16 tensor."""
def __new__(cls, *args, **kwargs): def __new__(cls, *args, **kwargs):
kwargs['dtype'] = 'float16' kwargs['dtype'] = 'float16'
return Tensor(*args, **kwargs) return Tensor(*args, **kwargs)
class IntTensor(object): class IntTensor(object):
"""The int32 tensor."""
def __new__(cls, *args, **kwargs): def __new__(cls, *args, **kwargs):
kwargs['dtype'] = 'int32' kwargs['dtype'] = 'int32'
return Tensor(*args, **kwargs) return Tensor(*args, **kwargs)
......
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!