Commit aa2ec8c3 by Ting PAN

Make the consistency for shape and data type

Summary:
This commit enforces the shape and data type inherited from the same metaclass,
which ensures the consistency between different styles.
1 parent 0ab14f30
Showing with 440 additions and 433 deletions
......@@ -15,9 +15,6 @@ dragon
`class Tensor <dragon/Tensor.html>`_
: Tensor abstraction for graph executing.
`class TensorSpec <dragon/TensorSpec.html>`_
: Spec to describe properties of a tensor.
`class Workspace <dragon/Workspace.html>`_
: Sandbox to isolate the resources and computations.
......@@ -213,7 +210,6 @@ dragon
dragon/stack
dragon/stop_gradient
dragon/Tensor
dragon/TensorSpec
dragon/tile
dragon/transpose
dragon/where
......
TensorSpec
==========
.. autoclass:: dragon.TensorSpec
__init__
--------
.. automethod:: dragon.TensorSpec.__init__
Properties
----------
dtype
#####
.. autoattribute:: dragon.TensorSpec.dtype
name
####
.. autoattribute:: dragon.TensorSpec.name
shape
#####
.. autoattribute:: dragon.TensorSpec.shape
Methods
-------
is_compatible_with
##################
.. automethod:: dragon.TensorSpec.is_compatible_with
.. raw:: html
<style>
h1:before {
content: "dragon.";
color: #103d3e;
}
</style>
......@@ -9,6 +9,9 @@ vm.tensorflow
`class GradientTape <tensorflow/GradientTape.html>`_
: Record the operations for auto differentiation.
`class TensorShape <tensorflow/TensorShape.html>`_
: Represent the a sequence of dimensions.
`class TensorSpec <tensorflow/TensorSpec.html>`_
: Spec to describe properties of a tensor.
......@@ -124,6 +127,7 @@ vm.tensorflow
tensorflow/slice
tensorflow/split
tensorflow/squeeze
tensorflow/TensorShape
tensorflow/TensorSpec
tensorflow/transpose
tensorflow/zeros
......
TensorShape
===========
.. autoclass:: dragon.vm.tensorflow.TensorShape
__init__
--------
.. automethod:: dragon.vm.tensorflow.TensorShape.__init__
Properties
----------
dims
####
.. autoattribute:: dragon.vm.tensorflow.TensorShape.dims
ndims
#####
.. autoattribute:: dragon.vm.tensorflow.TensorShape.ndims
rank
####
.. autoattribute:: dragon.vm.tensorflow.TensorShape.rank
Methods
-------
as_list
#######
.. automethod:: dragon.vm.tensorflow.TensorShape.as_list
.. raw:: html
<style>
h1:before {
content: "tf.";
color: #103d3e;
}
</style>
......@@ -26,7 +26,7 @@ shape
<style>
h1:before {
content: "dragon.";
content: "tf.";
color: #103d3e;
}
</style>
......@@ -7,6 +7,76 @@ __init__
--------
.. automethod:: dragon.vm.tensorflow.dtypes.DType.__init__
Properties
----------
as_numpy_dtype
##############
.. autoattribute:: dragon.vm.tensorflow.dtypes.DType.as_numpy_dtype
as_datatype_enum
################
.. autoattribute:: dragon.vm.tensorflow.dtypes.DType.as_datatype_enum
base_dtype
##########
.. autoattribute:: dragon.vm.tensorflow.dtypes.DType.base_dtype
is_numpy_compatible
###################
.. autoattribute:: dragon.vm.tensorflow.dtypes.DType.is_numpy_compatible
is_bool
#######
.. autoattribute:: dragon.vm.tensorflow.dtypes.DType.is_bool
is_complex
##########
.. autoattribute:: dragon.vm.tensorflow.dtypes.DType.is_complex
is_floating
###########
.. autoattribute:: dragon.vm.tensorflow.dtypes.DType.is_floating
is_integer
##########
.. autoattribute:: dragon.vm.tensorflow.dtypes.DType.is_integer
is_quantized
############
.. autoattribute:: dragon.vm.tensorflow.dtypes.DType.is_quantized
is_unsigned
###########
.. autoattribute:: dragon.vm.tensorflow.dtypes.DType.is_unsigned
limits
######
.. autoattribute:: dragon.vm.tensorflow.dtypes.DType.limits
max
###
.. autoattribute:: dragon.vm.tensorflow.dtypes.DType.max
min
###
.. autoattribute:: dragon.vm.tensorflow.dtypes.DType.min
name
####
.. autoattribute:: dragon.vm.tensorflow.dtypes.DType.name
real_dtype
##########
.. autoattribute:: dragon.vm.tensorflow.dtypes.DType.real_dtype
Methods
-------
is_compatible_with
##################
.. automethod:: dragon.vm.tensorflow.dtypes.DType.is_compatible_with
.. raw:: html
<style>
......
......@@ -7,7 +7,10 @@ vm.torch
#######
`class device <torch/device.html>`_
: Represent the device where tensor will be allocated.
: Represent the device spec.
`class dtype <torch/device.html>`_
: The basic data type.
`class enable_grad <torch/enable_grad.html>`_
: Context-manager to enable gradient calculation.
......@@ -265,6 +268,7 @@ vm.torch
torch/cumsum
torch/device
torch/div
torch/dtype
torch/empty
torch/enable_grad
torch/eq
......
dtype
=====
.. autoclass:: dragon.vm.torch.dtype
__init__
--------
.. automethod:: dragon.vm.torch.dtype.__init__
.. raw:: html
<style>
h1:before {
content: "torch.";
color: #103d3e;
}
</style>
......@@ -21,7 +21,7 @@ void ArangeOp<Context>::DoRunWithType() {
// Determine the generating range
// Values are in a half-open interval: [start, stop)
auto count = (int64_t)std::round((stop - start) / step);
auto count = (int64_t)std::ceil((stop - start) / step);
CHECK_GT(count, 0) << "\nInvalid generating range: "
<< "[" << start << ", " << stop << ") with step = " << step
<< ".";
......
......@@ -39,7 +39,6 @@ from dragon import vm
from dragon.core.autograph.tensor import Tensor
from dragon.core.eager.tensor import EagerTensor
from dragon.core.eager.backprop import GradientTape
from dragon.core.framework.tensor_spec import TensorSpec
from dragon.core.framework.workspace import Workspace
# Function
......
......@@ -254,7 +254,7 @@ class FunctionGuard(object):
)
shape = input_signature[i].shape
dtype = input_signature[i].dtype
inputs.append(Tensor(name, shape, dtype).constant())
inputs.append(Tensor(shape, dtype, name).constant())
with context.name_scope('${%d}' % id(self)), eager_context.graph_mode():
returns = nest.flatten(self._python_function(*inputs))
outputs, dummies = [], []
......@@ -328,8 +328,8 @@ def function(func=None, input_signature=None):
```python
@dragon.function(input_signature=[
dragon.TensorSpec(shape=[], dtype='float32'),
dragon.TensorSpec(shape=[], dtype='float32')
dragon.Tensor(shape=[], dtype='float32'),
dragon.Tensor(shape=[], dtype='float32'),
])
def foo(x, y):
return dragon.math.add([x + y, x])
......@@ -341,8 +341,8 @@ def function(func=None, input_signature=None):
----------
func : callable, optional
The function to be compiled.
input_signature : Sequence[dragon.TensorSpec], optional
The specs to hint the input info.
input_signature : Sequence[dragon.Tensor], optional
The tensors to hint the input info.
Returns
-------
......
......@@ -20,9 +20,10 @@ import os
from dragon.core.autograph import grad_maker
from dragon.core.autograph.op_def import OpDef
from dragon.core.autograph.op_def import OpInfo
from dragon.core.autograph.tensor import Tensor
from dragon.core.autograph.tensor import TensorRef
from dragon.core.framework import config
from dragon.core.framework import proto_util
from dragon.core.framework import types
from dragon.core.framework import workspace
from dragon.core.proto import dragon_pb2
from dragon.core.util import logging
......@@ -107,9 +108,7 @@ def add_update_defs(graph_def, optimizer):
name=OpDef.get_name(),
operation='MEAN',
communication='ALLREDUCE',
**process_group.arguments
)
)
**process_group.arguments))
graph_def.op.extend(update_defs)
......@@ -147,14 +146,11 @@ class Function(object):
if givens is not None:
name_dict = {}
for k, v in givens.items():
if isinstance(v, Tensor):
if types.is_symbolic_tensor(v):
name_dict[k.id] = v.id
op_info.merge_from(v)
else:
raise ValueError(
'Excepted a Tensor, '
'got {}.'.format(type(v).__name__)
)
raise ValueError('Excepted a Tensor, got {}.'.format(type(v).__name__))
# Update the original defs.
op_info = copy.deepcopy(op_info)
for k in op_info._defs.keys():
......@@ -257,8 +253,8 @@ class Function(object):
The self.
"""
self.outputs = [Tensor(name) for name in graph_def.output]
self.inputs = [Tensor(name).constant() for name in graph_def.input]
self.outputs = [TensorRef(name) for name in graph_def.output]
self.inputs = [TensorRef(name).constant() for name in graph_def.input]
# Fill with all known graph elements.
add_device_option(graph_def)
......@@ -293,7 +289,7 @@ def create_function(inputs=None, outputs=None, givens=None, optimizer=None):
Tensors that catch any operators can be used to create a graph:
```python
x = dragon.Tensor('x', dtype='float32').constant()
x = dragon.Tensor(dtype='float32').constant()
y = x * 2
f = dragon.create_function(outputs=y)
```
......@@ -315,20 +311,20 @@ def create_function(inputs=None, outputs=None, givens=None, optimizer=None):
Specify ``givens`` to substitute tensors before creating:
```python
x = dragon.Tensor('x', dtype='float32').constant()
x = dragon.Tensor(dtype='float32').constant()
y = x * 2
foo = dragon.create_function(outputs=y)
# "bar" takes "x2" as input, and also writes to "y"
x2 = dragon.Tensor('x2', dtype='float32').constant()
x2 = dragon.Tensor(dtype='float32').constant()
bar = dragon.create_function(outputs=y, givens={x: x2})
```
Specify ``optimizer`` to make a graph applying parameter updates:
```python
x = dragon.Tensor('x', dtype='float32').set_value(1)
x_grad = dragon.Tensor('x_grad', dtype='float32').set_value(1)
x = dragon.Tensor(dtype='float32').set_value(1)
x_grad = dragon.Tensor(dtype='float32').set_value(1)
optimizer = dragon.optimizers.SGD(base_lr=0.01)
optimizer.apply_gradients(values_and_grads=[(x, x_grad)])
......
......@@ -27,17 +27,17 @@ from dragon.core.util import nest
class Tensor(types.TensorMetaclass):
"""Tensor abstraction for graph executing."""
def __init__(self, name=None, shape=None, dtype=None):
def __init__(self, shape=None, dtype=None, name=None):
"""Create a ``Tensor``.
Parameters
----------
name : str, optional
The optional tensor name.
shape : sequence, optional
shape : Sequence[int], optional
The optional tensor shape.
dtype : str, optional
The optional data type.
name : str, optional
The optional tensor name.
"""
self._op, self._grad = None, None
......@@ -66,7 +66,7 @@ class Tensor(types.TensorMetaclass):
The data type to set.
"""
self._dtype = value
self._dtype = str(value) if value else value
@property
def id(self):
......@@ -131,8 +131,8 @@ class Tensor(types.TensorMetaclass):
Returns
-------
Sequence[int]
The shape.
Tuple[int]
The tensor shape.
"""
return self._shape
......@@ -150,10 +150,9 @@ class Tensor(types.TensorMetaclass):
if value is not None:
if not nest.is_sequence(value):
raise TypeError(
'The <shape> should be a Sequence. '
'Got {}.'.format(type(value))
)
self._shape = nest.flatten(value)
'The <shape> should be a sequence. Got {}.'
.format(type(value).__name__))
self._shape = tuple(nest.flatten(value))
else:
self._shape = value
......@@ -475,7 +474,7 @@ class Tensor(types.TensorMetaclass):
Parameters
----------
item : Union[int, slice, dragon.Tensor]
item : Union[slice, int, dragon.Tensor]
The index.
Returns
......@@ -643,7 +642,7 @@ class Tensor(types.TensorMetaclass):
Parameters
----------
key : Union[int, slice, dragon.Tensor]
key : Union[slice, int, dragon.Tensor]
The index.
value : Union[dragon.Tensor, number]
The value to set.
......@@ -685,6 +684,6 @@ class TensorRef(object):
"""Create a reference not involved with name scope."""
def __new__(cls, name, shape=None, dtype=None):
tensor = Tensor('', shape=shape, dtype=dtype)
tensor._name = name
return tensor
tensor_ref = Tensor(shape=shape, dtype=dtype, name='')
tensor_ref._name = name
return tensor_ref
......@@ -144,15 +144,15 @@ class EagerTensor(Tensor):
@property
def shape(self):
"""Return the shape of this tensor.
"""Return tensor shape.
Returns
-------
Sequence[int]
The shape.
Tuple[int]
The tensor shape.
"""
return self._impl.dims
return tuple(self._impl.dims)
@shape.setter
def shape(self, value):
......@@ -451,7 +451,7 @@ class EagerTensor(Tensor):
Parameters
----------
item : Union[int, slice, dragon.EagerTensor]
item : Union[slice, int, dragon.EagerTensor]
The index.
Returns
......@@ -668,7 +668,7 @@ class EagerTensor(Tensor):
Parameters
----------
key : Union[int, slice, dragon.EagerTensor]
key : Union[slice, int, dragon.EagerTensor]
The index.
value : Union[dragon.EagerTensor, number]
The value to set.
......
......@@ -36,7 +36,7 @@ if sys.version_info >= (3, 0):
argument.i = value
elif type(value) is bytes:
argument.s = value
elif type(value) is str:
elif isinstance(value, str):
argument.s = str.encode(value)
elif isinstance(value, Message):
argument.s = value.SerializeToString()
......@@ -63,7 +63,7 @@ else:
argument.f = value
elif type(value) in (bool, int, long, numpy.int64):
argument.i = value
elif type(value) is str:
elif isinstance(value, str):
argument.s = value
elif type(value) is unicode:
argument.s = str(value)
......
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
"""Structure to represent a tensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dragon.core.util import nest
class TensorSpec(object):
"""Spec to describe properties of a tensor."""
def __init__(self, shape, dtype='float32', name=None):
"""Create a TensorSpec.
Parameters
----------
shape : Sequence[int], required
The dimensions.
dtype : str, optional, default='float32'
The optional data type.
name : str, optional
The optional name.
"""
self._shape, self._dtype, self._name = shape, dtype, name
if shape is not None:
self._shape = nest.flatten(shape)
@property
def dtype(self):
"""Return the data type.
Returns
-------
str
The data type.
"""
return self._dtype
@property
def name(self):
"""Return the spec name.
Returns
-------
str
The spec name.
"""
return self._name
@property
def shape(self):
"""Return the dimensions.
Returns
-------
Sequence[int]
The dimensions.
"""
return self._shape
def is_compatible_with(self, spec_or_tensor):
"""Return a bool indicating whether given the spec is compatible.
Returns
-------
bool
**True** if ``shape`` and ``dtype`` are
both compatible otherwise **False**.
"""
def dtype_is_compatible_with(spec_or_tensor):
return self._dtype == spec_or_tensor.dtype
def shape_is_compatible_with(spec_or_tensor):
shape = spec_or_tensor.shape
if self._shape is not None and shape is not None:
if len(self._shape) != len(shape):
return False
for x_dim, y_dim in zip(self._shape, shape):
if x_dim != y_dim:
return False
return True
return \
dtype_is_compatible_with(spec_or_tensor) and \
shape_is_compatible_with(spec_or_tensor)
......@@ -180,7 +180,7 @@ class Workspace(backend.Workspace):
```python
# Define a named tensor to feed
x = dragon.Tensor('x')
x = dragon.Tensor(name='x')
dragon.get_workspace().feed_tensor(x, 0)
# Feed by specifying a tensor name
......
......@@ -18,7 +18,7 @@ import itertools
import numpy
import warnings
from dragon.core.autograph.tensor import Tensor
from dragon.core.autograph.tensor import TensorRef
from dragon.core.eager import context
from dragon.core.eager.tensor import EagerTensor
from dragon.core.framework import config
......@@ -191,7 +191,7 @@ class RNNModule(object):
self._weights_count += int(numpy.prod(shape))
# Create the flat float32 weights.
self._weights = EagerTensor(shape=[self._weights_count], trainable=True)
self._weights_ref = Tensor(self._weights.name)
self._weights_ref = TensorRef(self._weights.name)
def _uniform_init(self, shape, dtype='float32'):
stdv = 1. / numpy.sqrt(self.hidden_size)
......
......@@ -146,7 +146,7 @@ def getitem(self, item):
Parameters
----------
item : Union[int, slice, dragon.EagerTensor]
item : Union[slice, int, dragon.EagerTensor]
The index.
Returns
......@@ -490,7 +490,7 @@ def setitem(self, key, value):
Parameters
----------
key : Union[int, slice, dragon.EagerTensor]
key : Union[slice, int, dragon.EagerTensor]
The index.
value : Union[dragon.EagerTensor, number]
The value to set.
......
......@@ -121,7 +121,7 @@ def getitem(self, item):
Parameters
----------
item : Union[int, slice, dragon.Tensor]
item : Union[slice, int, dragon.Tensor]
The index.
Returns
......@@ -324,7 +324,7 @@ def setitem(self, key, value):
Parameters
----------
key : Union[int, slice, dragon.Tensor]
key : Union[slice, int, dragon.Tensor]
The index.
value : Union[dragon.Tensor, number]
The value to set.
......
......@@ -58,8 +58,8 @@ from dragon.vm.tensorflow.core.framework.dtypes import variant
from dragon.vm.tensorflow.core.framework.ops import convert_to_tensor
from dragon.vm.tensorflow.core.framework.ops import device
from dragon.vm.tensorflow.core.framework.ops import name_scope
from dragon.vm.tensorflow.core.framework.tensor_spec import TensorSpec
from dragon.vm.tensorflow.core.framework.tensor_shape import TensorShape
from dragon.vm.tensorflow.core.framework.tensor_spec import TensorSpec
from dragon.vm.tensorflow.core.module.module import Module
from dragon.vm.tensorflow.core.ops.array_ops import broadcast_to
from dragon.vm.tensorflow.core.ops.array_ops import concat
......
......@@ -52,9 +52,9 @@ def constant(value, dtype=None, shape=None, name='Const'):
"""
if dtype is not None:
if isinstance(value, numpy.ndarray):
value = value.astype(str(dtype))
value = value.astype(dtype)
else:
value = numpy.array(value, str(dtype))
value = numpy.array(value, dtype)
else:
if not isinstance(value, numpy.ndarray):
value = numpy.array(value)
......
......@@ -18,5 +18,4 @@ from dragon.vm.tensorflow.core.framework.constant_op import *
from dragon.vm.tensorflow.core.framework.dtypes import *
from dragon.vm.tensorflow.core.framework.ops import device
from dragon.vm.tensorflow.core.framework.ops import convert_to_tensor
from dragon.vm.tensorflow.core.framework.tensor_shape import Dimension
from dragon.vm.tensorflow.core.framework.tensor_shape import TensorShape
......@@ -12,127 +12,87 @@
# <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/framework/tensor_shape.py>
#
# ------------------------------------------------------------
"""Tensor shape utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dragon.core.autograph.tensor import Tensor
from dragon.core.eager.tensor import EagerTensor
class TensorShape(tuple):
"""Represent the a sequence of dimensions."""
class Dimension(object):
def __init__(self, value):
if value is None:
self._value = None
else:
self._value = int(value)
if self._value < 0:
raise ValueError("Dimension %d must be >= 0" % self._value)
def __init__(self, dims):
"""Create a ``TensorShape``.
@property
def value(self):
return self._value
Parameters
----------
dims : Sequence[int]
The dimensions.
def __repr__(self):
return "Dimension(%s)" % repr(self._value)
"""
super(TensorShape, self).__init__()
def __str__(self):
value = self._value
return "?" if value is None else str(value)
def __eq__(self, other):
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value == other.value
def __ne__(self, other):
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value != other.value
def __int__(self):
return self._value
def as_dimension(value):
if isinstance(value, Dimension):
return value
else:
return Dimension(value)
@property
def dims(self):
"""Return the list of dimensions.
Returns
-------
List[int]
The dimensions.
class TensorShape(object):
def __init__(self, dims):
if dims is None:
self._dims = None
elif isinstance(dims, TensorShape):
self._dims = dims.dims
else:
try:
dims_iter = iter(dims)
except TypeError:
self._dims = [as_dimension(dims)]
else:
self._dims = [as_dimension(d) for d in dims_iter]
"""
return list(self)
@property
def dims(self):
return self._dims
def ndims(self):
"""Return the number of dimensions.
Deprecated. See ``TensorShape.rank``.
Returns
-------
int
The number of dimensions.
"""
return len(self)
@property
def ndims(self):
if self._dims is None:
return None
else:
return len(self._dims)
def rank(self):
"""Return the rank of shape.
Returns
-------
int
The rank.
"""
return len(self)
def as_list(self):
if self._dims is None:
raise ValueError("as_list() is not defined on an unknown TensorShape.")
return [dim.value for dim in self._dims]
"""Return the list of dimensions.
Returns
-------
List[int]
The dimensions.
"""
return list(self)
def __repr__(self):
return "TensorShape(%r)" % self._dims
return "TensorShape({})".format(list(self))
def __str__(self):
if self.ndims is None:
return "<unknown>"
elif self.ndims == 1:
return "(%s,)" % self._dims[0]
if self.ndims == 1:
return "(%s,)" % self.dims[0]
else:
return "(%s)" % ", ".join(str(d) for d in self._dims)
return "(%s)" % ", ".join(str(d) for d in self.dims)
def __getitem__(self, key):
if self._dims is not None:
if isinstance(key, slice):
return TensorShape(self._dims[key])
return TensorShape(self.dims[key])
else:
return self._dims[key]
else:
return Dimension(None)
def dimension_value(dimension):
"""Return the value of specified dimension."""
if isinstance(dimension, Dimension):
return dimension.value
return dimension
def get_shape(self):
"""Construct the shape descriptor."""
return TensorShape(self.shape)
# The Monkey Patching.
EagerTensor.get_shape = get_shape
Tensor.get_shape = get_shape
return self.dims[key]
......@@ -17,16 +17,26 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dragon.core.framework import tensor_spec
from dragon.vm.tensorflow.core.framework import dtypes
from dragon.vm.tensorflow.core.framework import tensor_shape
class TensorSpec(tensor_spec.TensorSpec):
class TensorSpec(object):
"""Spec to describe properties of a tensor."""
def __init__(self, shape, dtype=dtypes.float32, name=None):
"""Create a ``TensorSpec``."""
def __init__(self, shape, dtype='float32', name=None):
"""Create a TensorSpec.
Parameters
----------
shape : Sequence[int], required
The dimensions.
dtype : str, optional, default='float32'
The optional data type.
name : str, optional
The optional name.
"""
self._shape = tensor_shape.TensorShape(shape)
try:
self._shape_tuple = tuple(self._shape.as_list())
......@@ -45,7 +55,7 @@ class TensorSpec(tensor_spec.TensorSpec):
The data type.
"""
return self._dtype.name
return str(self._dtype)
@property
def name(self):
......@@ -70,3 +80,29 @@ class TensorSpec(tensor_spec.TensorSpec):
"""
return self._shape.as_list()
def is_compatible_with(self, spec_or_tensor):
"""Return a bool whether given the spec is compatible.
Returns
-------
bool
**True** if compatible otherwise **False**.
"""
def dtype_is_compatible_with(spec_or_tensor):
return self.dtype == spec_or_tensor.dtype
def shape_is_compatible_with(spec_or_tensor):
shape = spec_or_tensor.shape
if self._shape is not None and shape is not None:
if len(self.shape) != len(shape):
return False
for x_dim, y_dim in zip(self.shape, shape):
if x_dim != y_dim:
return False
return True
return \
dtype_is_compatible_with(spec_or_tensor) and \
shape_is_compatible_with(spec_or_tensor)
......@@ -43,8 +43,8 @@ def Input(
x = tf.keras.Input(shape=(8,), batch_size=8, dtype='float32')
# Create a placeholder aliasing an existing tensor
x = dragon.Tensor('x', shape=(8,), dtype='float32').constant()
xx = tf.keras.Input(tensor=x)
x = dragon.Tensor(shape=(8,), dtype='float32').constant()
y = tf.keras.Input(tensor=x)
```
Parameters
......@@ -69,38 +69,29 @@ def Input(
if 'batch_shape' in kwargs:
batch_shape = kwargs.pop('batch_shape')
if shape and batch_shape:
raise ValueError(
'Specify <shape> or '
'<batch_shape>, not both.'
)
raise ValueError('Specify <shape> or <batch_shape>, not both.')
shape = batch_shape
else:
if shape is not None:
shape = (batch_size,) + tuple(shape)
if kwargs:
raise ValueError('Unrecognized keyword arguments:', kwargs.keys())
if dtype is None:
if tensor is not None:
dtype = tensor.dtype
else:
dtype = 'float32'
if shape is None:
if tensor is None:
raise ValueError('Specify either <shape> or <tensor>.')
else:
shape = tensor.shape
if isinstance(shape, tensor_shape.TensorShape):
shape = tuple(shape.as_list())
elif isinstance(shape, six.integer_types):
shape = (shape,)
placeholder = array_ops.placeholder(
dtype=dtype, shape=shape, name=name if name else 'input')
if tensor is not None:
workspace.get_workspace().register_alias(tensor, placeholder.id)
return placeholder
......@@ -78,8 +78,7 @@ def assert_input_compatibility(input_spec, inputs, layer_name):
raise ValueError(
'Layer ' + layer_name + ' expects ' +
str(len(input_spec)) + ' inputs, '
'but it received ' + str(len(inputs)) + ' input tensors.'
)
'but it received ' + str(len(inputs)) + ' input tensors.')
# For each pair of input and spec.
for input_index, (x, spec) in enumerate(zip(inputs, input_spec)):
if spec is None:
......
......@@ -72,13 +72,11 @@ class Conv(Layer):
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
if input_shape.dims[channel_axis] is None:
raise ValueError(
'The channel dimension of the input '
'should be determined, got None.'
)
'should be determined, got None.')
input_dim = int(input_shape[channel_axis])
# Assume that kernel is packed into NCHW format
# for computing the fans correctly
if self.filters > 0:
......@@ -86,7 +84,6 @@ class Conv(Layer):
else:
self.filters = input_dim
kernel_shape = (input_dim, 1) + self.kernel_size
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
......@@ -106,7 +103,6 @@ class Conv(Layer):
)
else:
self.bias = None
self.built = True
def call(self, inputs):
......@@ -280,17 +276,15 @@ class Conv2DTranspose(Conv2D):
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
if input_shape.dims[channel_axis] is None:
raise ValueError(
'The channel dimension of the inputs '
'should be determined, got None.'
)
input_dim = int(input_shape[channel_axis])
# Assume that kernel is packed into NCHW format,
# for computing the fans correctly.
kernel_shape = (input_dim, self.filters) + self.kernel_size
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
......@@ -310,7 +304,6 @@ class Conv2DTranspose(Conv2D):
)
else:
self.bias = None
self.built = True
def call(self, inputs):
......@@ -320,11 +313,9 @@ class Conv2DTranspose(Conv2D):
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
height, width = inputs_shape[h_axis], inputs_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
......@@ -349,7 +340,6 @@ class Conv2DTranspose(Conv2D):
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
outputs = nn_ops.conv_transpose(
input=inputs,
filters=self.kernel,
......
......@@ -83,20 +83,17 @@ class Dense(Layer):
if not (dtype.is_floating or dtype.is_complex):
raise TypeError(
'Unable to build `Dense` layer with non-floating point '
'dtype %s' % (dtype,)
)
'dtype %s' % (dtype,))
if self.input_dim is None:
input_shape = tensor_shape.TensorShape(input_shape)
if tensor_shape.dimension_value(input_shape[-1]) is None:
if input_shape[-1] is None:
raise ValueError(
'The last dimension of the inputs should be defined.\n'
'Or you should specify <input_dim> in the constructor.'
)
last_dim = tensor_shape.dimension_value(input_shape[-1])
'Or you should specify <input_dim> in the constructor.')
last_dim = input_shape[-1]
else:
last_dim = self.input_dim
self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim})
self.kernel = self.add_weight(
'kernel',
shape=[last_dim, self.units],
......
......@@ -97,7 +97,7 @@ class BatchNormalization(Layer):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
param_shape = [input_shape.dims[self.axis].value]
param_shape = [input_shape.dims[self.axis]]
self.input_spec = InputSpec(
# Each layer should adapt to the:
# 1) The number of dimensions.
......
......@@ -217,12 +217,7 @@ def fill(dims, value=0, dtype=None, name=None):
dtype = 'int32'
elif dtype == numpy.float64:
dtype = 'float32'
return init_ops.fill(
shape=dims,
value=value,
dtype=str(dtype),
name=name,
)
return init_ops.fill(shape=dims, value=value, dtype=dtype, name=name)
def gather(params, indices, axis=0, name=None):
......@@ -315,7 +310,7 @@ def ones(shape, dtype='float32', name=None):
A optional name for the operation.
"""
return init_ops.fill(shape, value=1, dtype=str(dtype), name=name)
return init_ops.fill(shape, value=1, dtype=dtype, name=name)
def ones_like(input, dtype='float32', name=None):
......@@ -340,7 +335,7 @@ def ones_like(input, dtype='float32', name=None):
A optional name for the operation.
"""
return init_ops.ones_like(input, dtype=str(dtype), name=name)
return init_ops.ones_like(input, dtype=dtype, name=name)
def one_hot(
......@@ -488,13 +483,13 @@ def placeholder(dtype=None, shape=None, name=None):
workspace.get_workspace().unique_name(
context.get_name_scope() + name if name else 'Placeholder',
suffix=':0', namespace='Tensor'),
dtype=str(dtype) if dtype else dtype,
dtype=dtype if dtype else dtype,
shape=shape,
).constant()
def reshape(tensor, shape, name=None):
r"""Change the dimensions of input.
"""Change the dimensions of input.
Examples:
......@@ -527,7 +522,7 @@ def reshape(tensor, shape, name=None):
def shape(input, name=None):
r"""Return the shape of input.
"""Return the shape of input.
Examples:
......@@ -641,7 +636,7 @@ def split(
axis=0,
name=None,
):
r"""Split input into chunks along the given axis.
"""Split input into chunks along the given axis.
Either number or size of splits will be accepted:
......@@ -775,7 +770,7 @@ def zeros(shape, dtype='float32', name=None):
A optional name for the operation.
"""
return init_ops.fill(shape, value=0., dtype=str(dtype), name=name)
return init_ops.fill(shape, value=0., dtype=dtype, name=name)
def zeros_like(input, dtype='float32', name=None):
......@@ -800,4 +795,4 @@ def zeros_like(input, dtype='float32', name=None):
A optional name for the operation.
"""
return init_ops.zeros_like(input, dtype=str(dtype), name=name)
return init_ops.zeros_like(input, dtype=dtype, name=name)
......@@ -79,7 +79,7 @@ class Constant(Initializer):
The output tensor.
"""
dtype = str(self.dtype) if dtype is None else str(dtype)
dtype = str(self.dtype) if dtype is None else dtype
return init_ops.fill(shape, value=self.value, dtype=dtype)
......@@ -125,7 +125,7 @@ class RandomNormal(Initializer):
shape=shape,
mean=self.mean,
std=self.stddev,
dtype=str(self.dtype) if dtype is None else str(dtype),
dtype=str(self.dtype) if dtype is None else dtype,
)
......@@ -167,12 +167,12 @@ class RandomUniform(Initializer):
The output tensor.
"""
dtype = str(self.dtype) if dtype is None else str(dtype)
dtype = str(self.dtype) if dtype is None else dtype
return init_ops.random_uniform(
shape=shape,
low=self.minval,
high=self.maxval,
dtype=str(self.dtype) if dtype is None else str(dtype),
dtype=str(self.dtype) if dtype is None else dtype,
)
......@@ -218,7 +218,7 @@ class TruncatedNormal(Initializer):
shape=shape,
mean=self.mean,
std=self.stddev,
dtype=str(self.dtype) if dtype is None else str(dtype),
dtype=str(self.dtype) if dtype is None else dtype,
)
......@@ -280,14 +280,14 @@ class VarianceScaling(Initializer):
shape=shape,
mode=self.mode,
scale=self.scale * 2.0,
dtype=str(self.dtype) if dtype is None else str(dtype)
dtype=str(self.dtype) if dtype is None else dtype
)
else:
return init_ops.glorot_uniform(
shape=shape,
mode=self.mode,
scale=self.scale * 3.0,
dtype=str(self.dtype) if dtype is None else str(dtype)
dtype=str(self.dtype) if dtype is None else dtype
)
......@@ -374,8 +374,8 @@ class Ones(Initializer):
The output tensor.
"""
dtype = str(self.dtype) if dtype is None else str(dtype)
return init_ops.fill(shape, value=1, dtype=str(dtype))
dtype = str(self.dtype) if dtype is None else dtype
return init_ops.fill(shape, value=1, dtype=dtype)
class Zeros(Initializer):
......@@ -412,7 +412,7 @@ class Zeros(Initializer):
The output tensor.
"""
dtype = str(self.dtype) if dtype is None else str(dtype)
dtype = str(self.dtype) if dtype is None else dtype
return init_ops.fill(shape, value=0, dtype=dtype)
......
......@@ -48,4 +48,4 @@ def eye(num_rows, num_columns=None, dtype='float32', name=None):
The output tensor.
"""
return init_ops.eye(num_rows, num_columns, dtype=str(dtype), name=name)
return init_ops.eye(num_rows, num_columns, dtype=dtype, name=name)
......@@ -209,7 +209,7 @@ def cast(x, dtype, name=None):
The output tensor.
"""
return array_ops.cast(x, dtype=str(dtype), name=name)
return array_ops.cast(x, dtype=dtype, name=name)
def ceil(x, name=None):
......@@ -890,7 +890,7 @@ def range(start, limit=None, delta=1, dtype='int64', name=None):
start=start,
stop=limit,
step=delta,
dtype=str(dtype),
dtype=dtype,
name=name,
)
......
......@@ -56,12 +56,11 @@ def avg_pool(
The output tensor.
"""
num_total_dims = input.get_shape().ndims
if num_total_dims is None:
if input.shape is not None:
num_total_dims = len(input.shape)
else:
num_total_dims = len(ksize)
num_spatial_dims = num_total_dims - 2
# Make default parameters
data_format = data_format if data_format else 'NHWC'
start_axis = 2 if data_format.startswith('NC') else 1
normalize_spatial_args = \
......@@ -74,7 +73,6 @@ def avg_pool(
ksize = normalize_spatial_args('ksize', ksize)
strides = normalize_spatial_args('strides', strides)
padding, pads = normalize_spatial_args('padding', padding)
return getattr(vision_ops, 'pool{}d'.format(num_spatial_dims))(
[input],
kernel_shape=ksize.shape[start_axis:start_axis + num_spatial_dims],
......@@ -173,12 +171,11 @@ def convolution(
The output tensor.
"""
num_total_dims = filters.get_shape().ndims
if num_total_dims is None:
raise ValueError('Rank of `filters` must be determined.')
if filters.shape is not None:
num_total_dims = len(filters.shape)
else:
raise ValueError('Rank of <filters> must be determined.')
num_spatial_dims = num_total_dims - 2
# Make default parameters
data_format = data_format if data_format else 'NHWC'
start_axis = 2 if data_format.startswith('NC') else 1
normalize_spatial_args = \
......@@ -191,7 +188,6 @@ def convolution(
strides = normalize_spatial_args('strides', strides)
dilations = normalize_spatial_args('dilations', dilations)
padding, pads = normalize_spatial_args('padding', padding)
return getattr(vision_ops, '{}{}d'.format(
kwargs.get('conv_type', 'conv'), num_spatial_dims))(
[input, filters],
......@@ -241,14 +237,13 @@ def conv_transpose(
The output tensor.
"""
num_total_dims = filters.get_shape().ndims
if num_total_dims is None:
num_total_dims = input.get_shape().ndims
if num_total_dims is None:
raise ValueError("rank of input or filters must be known.")
if filters.shape is not None:
num_total_dims = len(filters.shape)
elif input.shape is not None:
num_total_dims = len(input.shape)
else:
raise ValueError('Rank of <input> or <filters> must be known.')
num_spatial_dims = num_total_dims - 2
# Make default parameters
data_format = data_format if data_format else 'NHWC'
start_axis = 2 if data_format.startswith('NC') else 1
normalize_spatial_args = \
......@@ -264,7 +259,6 @@ def conv_transpose(
if padding == 'SAME' and output_shape is None:
raise ValueError('Excepted <output_shape> for same padding.')
output_shape = normalize_spatial_args('output_shape', output_shape)
return getattr(vision_ops, 'conv{}d_transpose'.format(num_spatial_dims))(
[input, filters],
kernel_shape=filters.shape[2:],
......@@ -608,12 +602,11 @@ def max_pool(
The output tensor.
"""
num_total_dims = input.get_shape().ndims
if num_total_dims is None:
if input.shape is not None:
num_total_dims = len(input.shape)
else:
num_total_dims = len(ksize)
num_spatial_dims = num_total_dims - 2
# Make default parameters
data_format = data_format if data_format else 'NHWC'
start_axis = 2 if data_format.startswith('NC') else 1
normalize_spatial_args = \
......@@ -626,7 +619,6 @@ def max_pool(
ksize = normalize_spatial_args('ksize', ksize)
strides = normalize_spatial_args('strides', strides)
padding, pads = normalize_spatial_args('padding', padding)
return getattr(vision_ops, 'pool{}d'.format(num_spatial_dims))(
[input],
kernel_shape=ksize[start_axis:start_axis + num_spatial_dims],
......
......@@ -50,7 +50,7 @@ def random_normal(
The output tensor.
"""
_, dtype, init_fn = seed, str(dtype), init_ops.random_normal
_, dtype, init_fn = seed, dtype, init_ops.random_normal
return init_fn(shape, mean, stddev, dtype=dtype, name=name)
......@@ -87,7 +87,7 @@ def random_uniform(
The output tensor.
"""
_, dtype, init_fn = seed, str(dtype), init_ops.random_uniform
_, dtype, init_fn = seed, dtype, init_ops.random_uniform
return init_fn(shape, minval, maxval, dtype=dtype, name=name)
......@@ -125,5 +125,5 @@ def truncated_normal(
The output tensor.
"""
_, dtype, init_fn = seed, str(dtype), init_ops.truncated_normal
_, dtype, init_fn = seed, dtype, init_ops.truncated_normal
return init_fn(shape, mean, stddev, dtype=dtype, name=name)
......@@ -43,27 +43,21 @@ class Variable(VariableMetaclass, EagerTensor):
):
"""Create a ``Variable``."""
super(Variable, self).__init__(trainable=trainable)
name = name if name else 'Variable'
self._name = context.get_name_scope() + name + ':0'
# Determine th value.
if isinstance(initial_value, EagerTensor):
initial_value = initial_value.numpy()
elif isinstance(initial_value, Tensor):
initial_value = initial_value.get_value()
# Determine the data type.
if not isinstance(initial_value, numpy.ndarray):
initial_value = numpy.array(
initial_value, str(dtype) if dtype else dtype)
initial_value = numpy.array(initial_value, dtype if dtype else dtype)
elif dtype is not None:
initial_value = initial_value.astype(str(dtype))
initial_value = initial_value.astype(dtype)
# Determine the tensor shape.
if shape is not None:
initial_value = initial_value.reshape(shape)
self._from_numpy(initial_value, copy=False)
@property
......@@ -96,7 +90,6 @@ def get_default_initializer(name, shape=None, dtype=dtypes.float32):
# Defaults: float32.
if dtype is None:
dtype = dtypes.float32
# Xavier for float16, float32, float64.
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
......
......@@ -67,20 +67,20 @@ class TestTensor(unittest.TestCase):
self.assertEqual(dragon.Tensor().ndim, 0)
self.assertEqual(dragon.Tensor(shape=(2,)).ndim, 1)
self.assertEqual(dragon.Tensor().shape, None)
self.assertEqual(dragon.Tensor(shape=(2,)).shape, [2])
self.assertEqual(dragon.Tensor(shape=(2,)).shape, (2,))
self.assertEqual(dragon.Tensor().size, 0)
self.assertEqual(dragon.Tensor(shape=(2, None)).size, math.inf)
self.assertEqual(dragon.Tensor(shape=(2,)).size, 2)
self.assertEqual(dragon.Tensor().dtype, None)
self.assertEqual(dragon.Tensor(dtype='float32').dtype, 'float32')
self.assertEqual(dragon.EagerTensor(shape=(2,)).ndim, 1)
self.assertEqual(dragon.EagerTensor(shape=(2,)).shape, [2])
self.assertEqual(dragon.EagerTensor(shape=(2,)).shape, (2,))
self.assertEqual(dragon.EagerTensor(shape=(2,)).size, 2)
self.assertEqual(dragon.EagerTensor(shape=(2,), dtype='float32').dtype, 'float32')
self.assertEqual(dragon.EagerTensor().device, dragon.EagerTensor().device)
self.assertNotEqual(a.__hash__(), b.__hash__())
self.assertNotEqual(a.__repr__(), b.__repr__())
self.assertNotEqual(b.__repr__(), dragon.EagerTensor([2]).__repr__())
self.assertNotEqual(b.__repr__(), dragon.EagerTensor((2,)).__repr__())
self.assertEqual(int(a.constant().set_value(1)), 1)
self.assertEqual(float(dragon.Tensor.convert_to(1)), 1.)
self.assertEqual(int(b.set_value(1)), 1)
......@@ -117,7 +117,7 @@ class TestTensor(unittest.TestCase):
x = dragon.EagerTensor(data, copy=True)
x_to_dlpack = dragon.dlpack.to_dlpack(x)
x_from_dlpack = dragon.dlpack.from_dlpack(x_to_dlpack)
self.assertEqual(x_from_dlpack.shape, list(data.shape))
self.assertEqual(x_from_dlpack.shape, data.shape)
self.assertEqual(x_from_dlpack.dtype, str(data.dtype))
self.assertLessEqual(np.abs(x_from_dlpack.numpy() - data).max(), 1e-5)
......@@ -130,7 +130,7 @@ class TestTensor(unittest.TestCase):
x_from_dlpack = dragon.dlpack.from_dlpack(x_to_dlpack)
self.assertEqual(x_from_dlpack.device.type, 'cuda')
self.assertEqual(x_from_dlpack.device.index, 0)
self.assertEqual(x_from_dlpack.shape, list(data.shape))
self.assertEqual(x_from_dlpack.shape, data.shape)
self.assertEqual(x_from_dlpack.dtype, str(data.dtype))
self.assertLessEqual(np.abs(x_from_dlpack.numpy() - data).max(), 1e-5)
......@@ -150,7 +150,7 @@ class TestWorkspace(unittest.TestCase):
w = dragon.Workspace()
with w.as_default():
v1, v2 = dragon.EagerTensor(1), np.array(2)
x = dragon.Tensor('test_feed_tensor/x')
x = dragon.Tensor(name='test_feed_tensor/x')
w.feed_tensor(x, v1)
self.assertEqual(int(x), 1)
w.feed_tensor(x, v2)
......@@ -159,7 +159,7 @@ class TestWorkspace(unittest.TestCase):
def test_merge_form(self):
w1, w2 = dragon.Workspace(), dragon.Workspace()
with w1.as_default():
x = dragon.Tensor('test_merge_from/x').set_value(0)
x = dragon.Tensor(name='test_merge_from/x').set_value(0)
w2.merge_from(w1)
with w2.as_default():
self.assertEqual(int(x), 0)
......
......@@ -67,7 +67,7 @@ class OpTestCase(unittest.TestCase):
dtype = symbols[i][1].dtype
shape = symbols[i][1].shape
super(OpTestCase, self).assertEqual(dtype, str(values[i].dtype))
super(OpTestCase, self).assertEqual(shape, list(shape))
super(OpTestCase, self).assertEqual(shape, values[i].shape)
inputs[symbols[i][0]] = values[i]
first = inputs[:num_first] if num_first > 1 else inputs[0]
second = inputs[num_first:len(inputs)] if num_second > 1 else inputs[num_first]
......@@ -239,7 +239,7 @@ class TestActivationOps(OpTestCase):
for x_shape, w_shape, data_format in entries:
data1 = uniform(x_shape)
data2 = np.ones(w_shape, 'float32') * 0.25
x, w = new_tensor(data1), new_tensor(data2)
x, w = new_tensor(data1), new_tensor(data2.flatten())
with dragon.GradientTape() as tape:
tape.watch([x, w])
y = dragon.nn.prelu([x, w], data_format=data_format)
......@@ -632,7 +632,7 @@ class TestArrayOps(OpTestCase):
tape.watch(x)
y = dragon.masked_select([x, x > 2])
dx = tape.gradient(y, [x], output_gradients=[y])[0]
self.assertEqual([y, dx], [data[data > 2], grad])
self.assertEqual([y, dx], [data[data > 2], grad], test_symbols=False)
@unittest.skipIf(not TEST_CUDA, 'CUDA unavailable')
def test_masked_select_cuda(self):
......@@ -645,7 +645,8 @@ class TestArrayOps(OpTestCase):
data = arange((2, 3))
x = new_tensor(data)
y = dragon.nonzero(x > 2)
self.assertEqual(y, np.stack(np.nonzero(data > 2), axis=1))
self.assertEqual(
y, np.stack(np.nonzero(data > 2), axis=1), test_symbols=False)
@unittest.skipIf(not TEST_CUDA, 'CUDA unavailable')
def test_non_zero_cuda(self):
......@@ -699,7 +700,7 @@ class TestArrayOps(OpTestCase):
tape.watch(x)
y = dragon.repeat(x, axis, repeats)
grad = arange(y.shape)
grad_shape = y.shape[:-1] + [y.shape[-1] // 2, 2]
grad_shape = y.shape[:-1] + (y.shape[-1] // 2, 2)
dy = new_tensor(grad)
dx = tape.gradient(y, [x], output_gradients=[dy])[0]
self.assertEqual(
......@@ -2271,8 +2272,8 @@ class TestNormalizationOps(OpTestCase):
data4, data5 = arange(w_shape) * .1, arange(w_shape, 1) * .1
data6 = uniform(x_shape)
x = new_tensor(data1)
w, b = new_tensor(data2), new_tensor(data3)
rm, rv = new_tensor(data4), new_tensor(data5)
w, b = new_tensor(data2.flatten()), new_tensor(data3.flatten())
rm, rv = new_tensor(data4.flatten()), new_tensor(data5.flatten())
with dragon.GradientTape() as tape:
tape.watch([x, w, b])
y = dragon.nn.batch_norm(
......@@ -2330,7 +2331,7 @@ class TestNormalizationOps(OpTestCase):
data2, data3 = arange(w_shape, 1) * .1, arange(w_shape) * .1
data6 = arange(x_shape) * .1
x = new_tensor(data1)
w, b = new_tensor(data2), new_tensor(data3)
w, b = new_tensor(data2.flatten()), new_tensor(data3.flatten())
with dragon.GradientTape() as tape:
tape.watch([x, w, b])
y = dragon.nn.group_norm(
......@@ -2374,7 +2375,7 @@ class TestNormalizationOps(OpTestCase):
data2, data3 = arange(w_shape, 1) * .1, arange(w_shape) * .1
data6 = arange(x_shape) * 10.
x = new_tensor(data1)
w, b = new_tensor(data2), new_tensor(data3)
w, b = new_tensor(data2.flatten()), new_tensor(data3.flatten())
with dragon.GradientTape() as tape:
tape.watch([x, w, b])
y = dragon.nn.instance_norm([x, w, b], axis=axis, eps=eps)
......@@ -2417,7 +2418,7 @@ class TestNormalizationOps(OpTestCase):
data2, data3 = arange(w_shape, 1) * .1, arange(w_shape) * .1
data6 = arange(x_shape) * 10.
x = new_tensor(data1)
w, b = new_tensor(data2), new_tensor(data3)
w, b = new_tensor(data2.flatten()), new_tensor(data3.flatten())
with dragon.GradientTape() as tape:
tape.watch([x, w, b])
y = dragon.nn.layer_norm([x, w, b], axis=axis, eps=eps)
......@@ -2587,7 +2588,7 @@ class TestTensorOps(OpTestCase):
grad[data > 2] = 1
grad *= data
x = new_tensor(data)
self.assertEqual(x[x > 2], data[data > 2])
self.assertEqual(x[x > 2], data[data > 2], test_symbols=False)
entries = [0,
slice(None, None, None),
slice(0, None, None),
......@@ -2885,13 +2886,13 @@ class TestVisionOps(OpTestCase):
with execution_context().mode(execution):
for x_shape, b_shape, data_format in entries:
data1, data2 = arange(x_shape), arange(b_shape)
x, w = new_tensor(data1), new_tensor(data2)
x, b = new_tensor(data1), new_tensor(data2.flatten())
with dragon.GradientTape() as tape:
tape.watch([x, w])
y = dragon.nn.bias_add([x, w], data_format)
dx, dw = tape.gradient(y, [x, w], output_gradients=[x])
tape.watch([x, b])
y = dragon.nn.bias_add([x, b], data_format)
dx, db = tape.gradient(y, [x, b], output_gradients=[x])
self.assertEqual(
[y, dx, dw],
[y, dx, db],
[data1 + data2, data1, reduce_like(data1, data2).flatten()])
@unittest.skipIf(not TEST_CUDA, 'CUDA unavailable')
......@@ -3422,7 +3423,7 @@ def new_tensor(data):
"""Create a new tensor for current execution."""
if execution_context().executing_eagerly():
return dragon.EagerTensor(data, copy=True)
return dragon.Tensor(None, data.shape, str(data.dtype)).set_value(data)
return dragon.Tensor(data.shape, str(data.dtype)).set_value(data)
def process_indices(item):
......
......@@ -24,6 +24,7 @@ from dragon.vm.torch import vision
# Classes
from dragon.vm.torch.autograd import Variable
from dragon.vm.torch.cpp import device
from dragon.vm.torch.cpp import dtype
from dragon.vm.torch.cpp import Size
from dragon.vm.torch.tensor import ByteTensor
from dragon.vm.torch.tensor import CharTensor
......@@ -116,3 +117,21 @@ from dragon.vm.torch.serialization import load
from dragon.vm.torch.serialization import save
from dragon.vm.torch.tensor import empty
from dragon.vm.torch.tensor import tensor
# Aliases
bool = dtype('bool')
int8 = dtype('int8')
uint8 = dtype('uint8')
int16 = short = dtype('int16')
int32 = int = dtype('int32')
int64 = long = dtype('int64')
qint8 = dtype('qint8')
quint8 = dtype('quint8')
qint32 = dtype('qint32')
bfloat16 = dtype('bfloat16')
float16 = half = dtype('float16')
float32 = float = dtype('float32')
float64 = double = dtype('float64')
complex32 = dtype('complex32')
complex64 = dtype('complex64')
complex128 = dtype('complex128')
......@@ -58,7 +58,7 @@ class Size(tuple):
class device(object):
"""Represent the device where tensor will be allocated."""
"""Represent the device spec."""
def __init__(self, type='cpu', index=0):
"""Create a ``device``.
......@@ -100,6 +100,57 @@ class device(object):
return 'device(type={}, index={})'.format(self.type, self.index)
class dtype(str):
"""The basic data type.
Following data types are defined:
* ``torch.float16`` or ``torch.half``: 16-bit half-precision floating-point.
* ``torch.float32`` or ``torch.float``: 32-bit single-precision floating-point.
* ``torch.float64`` or ``torch.double``: 64-bit double-precision floating-point.
* ``torch.bfloat16``: 16-bit truncated floating-point.
* ``torch.complex32``: 32-bit single-precision complex.
* ``torch.complex64``: 64-bit single-precision complex.
* ``torch.complex128``: 128-bit double-precision complex.
* ``torch.int8``: 8-bit signed integer.
* ``torch.uint8``: 8-bit unsigned integer.
* ``torch.int16`` or ``torch.short``: 16-bit signed integer.
* ``torch.int32`` or ``torch.int``: 32-bit signed integer.
* ``torch.int64`` or ``torch.long``: 64-bit signed integer.
* ``torch.bool``: Boolean.
* ``torch.qint8``: Quantized 8-bit signed integer.
* ``torch.quint8``: Quantized 8-bit unsigned integer.
* ``torch.qint32``: Quantized 32-bit signed integer.
"""
def __init__(self, s):
"""Create a ``dtype``.
Parameters
----------
s : str
The data type descriptor.
"""
super(dtype, self).__init__()
def from_numpy(array):
"""Create a tensor from the given numpy array.
......
......@@ -636,7 +636,7 @@ def getitem(self, item):
Parameters
----------
item : Union[int, slice, dragon.vm.torch.Tensor]
item : Union[slice, int, dragon.vm.torch.Tensor]
The index.
Returns
......@@ -1318,7 +1318,7 @@ def setitem(self, key, value):
Parameters
----------
key : Union[int, slice, dragon.vm.torch.Tensor]
key : Union[slice, int, dragon.vm.torch.Tensor]
The index.
value : Union[dragon.vm.torch.Tensor, number]
The value to set.
......
......@@ -1940,7 +1940,7 @@ class Tensor(object):
Parameters
----------
item : Union[int, slice, dragon.vm.torch.Tensor]
item : Union[slice, int, dragon.vm.torch.Tensor]
The index.
Returns
......@@ -2198,7 +2198,7 @@ class Tensor(object):
Parameters
----------
key : Union[int, slice, dragon.vm.torch.Tensor]
key : Union[slice, int, dragon.vm.torch.Tensor]
The index.
value : Union[dragon.vm.torch.Tensor, number]
The value to set.
......
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!