Commit 31e02b2b by Ting PAN

remove tf documentation

1 parent 990c496b
Showing with 428 additions and 166 deletions
# ---------------- Welcom To Use Dragon ---------------- # ---------------- Welcom To Use Dragon ----------------
PROJECT(dragon) PROJECT(dragon)
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.0) CMAKE_MINIMUM_REQUIRED(VERSION 2.8.0)
...@@ -24,7 +24,7 @@ set(3RDPARTY_DIR ${PROJECT_SOURCE_DIR}/../3rdparty) ...@@ -24,7 +24,7 @@ set(3RDPARTY_DIR ${PROJECT_SOURCE_DIR}/../3rdparty)
set(PYTHON_DIR /usr/include/python2.7) # prefer set(PYTHON_DIR /usr/include/python2.7) # prefer
#set(PYTHON_DIR /usr/include/python3.x) # optional, set specific version #set(PYTHON_DIR /usr/include/python3.x) # optional, set specific version
#set(ANACONDA_DIR /xxx/anaconda) # optional, set specific version below if using py3 #set(ANACONDA_DIR /xxx/anaconda) # optional, set specific version below if using py3
set(NUMPY_DIR /xxx/numpy) # require£¬ root folder of numpy package set(NUMPY_DIR /xxx/numpy) # require root folder of numpy package
# set CUDA compiling architecture # set CUDA compiling architecture
set(CUDA_ARCH -gencode arch=compute_20,code=sm_20 set(CUDA_ARCH -gencode arch=compute_20,code=sm_20
......
...@@ -114,7 +114,10 @@ class DataTransformer(Process): ...@@ -114,7 +114,10 @@ class DataTransformer(Process):
# handle mean subtraction # handle mean subtraction
if len(self._mean_value) > 0: if len(self._mean_value) > 0:
if self._mean_file: if self._mean_file:
im = im - self._mean_value[h_off : h_off + self._crop_size, w_off : w_off + self._crop_size, :] if self._crop_size > 0:
im = im - self._mean_value[h_off: h_off + self._crop_size,
w_off: w_off + self._crop_size, :]
else: im = im - self._mean_value[:, :, :]
else: im = im - self._mean_value else: im = im - self._mean_value
# handle range scale # handle range scale
......
...@@ -10,19 +10,5 @@ __all__ = ['flatten'] ...@@ -10,19 +10,5 @@ __all__ = ['flatten']
def flatten(inputs, name=None): def flatten(inputs, name=None):
"""
Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: A tensor of size [batch_size, ...].
scope: Optional name for operation.
Returns:
A flattened tensor with shape [batch_size, k].
"""
return ops.Flatten(inputs, axis=1) return ops.Flatten(inputs, axis=1)
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = [
'expand_dims',
'shape',
'zeros',
'ones',
'concat',
'transpose',
'tile',
'reshape'
]
import dragon.ops as ops
from ..core import dtypes
def expand_dims(input, axis=None, name=None, dim=None):
if dim is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'dim'.")
axis = dim
return ops.ExpandDims(input, axis=axis, name=name)
def shape(input, name=None, out_type=dtypes.float32):
return ops.Shape(input, name=None)
def zeros(shape, dtype=dtypes.float32, name=None):
return ops.Fill(shape, value=0.0, name=name)
def ones(shape, dtype=dtypes.float32, name=None):
return ops.Fill(shape, value=1.0, name=name)
def concat(values, axis, name=None):
return ops.Concat(values, axis=axis, name=name)
def transpose(a, perm=None, name=None):
return ops.Transpose(a, perm=perm, name=name)
def tile(input, multiples, name=None):
return ops.Tile(input, multiples=multiples, name=name)
def reshape(tensor, shape, name=None):
return ops.Reshape(tensor, shape=shape, name=None)
...@@ -10,4 +10,5 @@ import dragon.ops as ops ...@@ -10,4 +10,5 @@ import dragon.ops as ops
def equal(a, b, name=None): def equal(a, b, name=None):
return ops.Equal([a, b]) return ops.Equal([a, b])
\ No newline at end of file
...@@ -21,14 +21,12 @@ from ..core import dtypes ...@@ -21,14 +21,12 @@ from ..core import dtypes
class Initializer(object): class Initializer(object):
"""Initializer base class: all initializers inherit from this class."""
def __call__(self, shape, dtype=None): def __call__(self, shape, dtype=None):
raise NotImplementedError raise NotImplementedError
class Zeros(Initializer): class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0."""
def __init__(self, dtype=dtypes.float32): def __init__(self, dtype=dtypes.float32):
self.dtype = dtype self.dtype = dtype
...@@ -39,7 +37,6 @@ class Zeros(Initializer): ...@@ -39,7 +37,6 @@ class Zeros(Initializer):
class Ones(Initializer): class Ones(Initializer):
"""Initializer that generates tensors initialized to 1."""
def __init__(self, dtype=dtypes.float32): def __init__(self, dtype=dtypes.float32):
self.dtype = dtype self.dtype = dtype
...@@ -50,7 +47,6 @@ class Ones(Initializer): ...@@ -50,7 +47,6 @@ class Ones(Initializer):
class Constant(Initializer): class Constant(Initializer):
"""Initializer that generates tensors with constant values."""
def __init__(self, value=0, dtype=dtypes.float32): def __init__(self, value=0, dtype=dtypes.float32):
self.value = value self.value = value
...@@ -62,16 +58,6 @@ class Constant(Initializer): ...@@ -62,16 +58,6 @@ class Constant(Initializer):
class RandomUniform(Initializer): class RandomUniform(Initializer):
"""
Initializer that generates tensors with a uniform distribution.
Args:
minval: A python scalar. Lower bound of the range
of random values to generate.
maxval: A python scalar. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
dtype: The data type.
"""
def __init__(self, minval=0, maxval=1, dtype=dtypes.float32): def __init__(self, minval=0, maxval=1, dtype=dtypes.float32):
self.minval = minval self.minval = minval
...@@ -84,16 +70,6 @@ class RandomUniform(Initializer): ...@@ -84,16 +70,6 @@ class RandomUniform(Initializer):
class RandomNormal(Initializer): class RandomNormal(Initializer):
"""
Initializer that generates tensors with a normal distribution.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, dtype=dtypes.float32): def __init__(self, mean=0.0, stddev=1.0, dtype=dtypes.float32):
self.mean = mean self.mean = mean
...@@ -107,21 +83,6 @@ class RandomNormal(Initializer): ...@@ -107,21 +83,6 @@ class RandomNormal(Initializer):
class TruncatedNormal(Initializer): class TruncatedNormal(Initializer):
"""
Initializer that generates a truncated normal distribution.
These values are similar to values from a `random_normal_initializer`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, dtype=dtypes.float32): def __init__(self, mean=0.0, stddev=1.0, dtype=dtypes.float32):
self.mean = mean self.mean = mean
...@@ -135,25 +96,6 @@ class TruncatedNormal(Initializer): ...@@ -135,25 +96,6 @@ class TruncatedNormal(Initializer):
class VarianceScaling(Initializer): class VarianceScaling(Initializer):
"""
Initializer capable of adapting its scale to the shape of weights tensors.
With `distribution="normal"`, samples are drawn from a truncated normal
distribution centered on zero, with `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`, samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Arguments:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, scale=1.0, def __init__(self, scale=1.0,
mode="fan_in", mode="fan_in",
...@@ -189,22 +131,6 @@ variance_scaling_initializer = VarianceScaling ...@@ -189,22 +131,6 @@ variance_scaling_initializer = VarianceScaling
def glorot_uniform_initializer(dtype=dtypes.float32): def glorot_uniform_initializer(dtype=dtypes.float32):
"""
The Glorot uniform initializer, also called Xavier uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Arguments:
dtype: The data type. Only floating point types are supported.
Returns:
An initializer.
"""
return variance_scaling_initializer(scale=6.0, return variance_scaling_initializer(scale=6.0,
mode='fan_avg', mode='fan_avg',
...@@ -213,22 +139,6 @@ def glorot_uniform_initializer(dtype=dtypes.float32): ...@@ -213,22 +139,6 @@ def glorot_uniform_initializer(dtype=dtypes.float32):
def glorot_normal_initializer(dtype=dtypes.float32): def glorot_normal_initializer(dtype=dtypes.float32):
"""
The Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Arguments:
dtype: The data type. Only floating point types are supported.
Returns:
An initializer.
"""
return variance_scaling_initializer(scale=2.0, return variance_scaling_initializer(scale=2.0,
mode='fan_avg', mode='fan_avg',
......
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = [
'argmax',
'matmul',
'add',
'subtract',
'multiply',
'divide',
'sub',
'mul',
'div',
'log',
'exp',
'square',
'sqrt',
'reduce_sum',
'reduce_mean',
'sigmoid',
'tanh',
'add_n'
]
from six.moves import range as xrange
import dragon.ops as ops
def argmax(input, axis=None, name=None, dimension=None):
if dimension is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'dimension'.")
axis = dimension
elif axis is None: axis = 0
return ops.Argmax(input, axis=axis, name=name)
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
name=None):
return ops.Matmul([a, b], TransA=transpose_a, TransB=transpose_b, name=name)
def add(x, y, name=None):
return ops.Add([x, y], name=None)
def subtract(x, y, name=None):
return ops.Sub([x, y], name=name)
def multiply(x, y, name=None):
return ops.Mul([x, y], name=name)
def divide(x, y, name=None):
return ops.Div([x, y], name=name)
def mul(x, y, name=None):
return multiply(x, y, name)
def sub(x, y, name=None):
return subtract(x, y, name)
def div(x, y, name=None):
return divide(x, y, name=name)
def log(x, name=None):
return ops.Log(x, name=name)
def exp(x, name=None):
return ops.Exp(x, name=name)
def square(x, name=None):
return ops.Square(x, name=name)
def sqrt(x, name=None):
return ops.Pow(x, power=0.5, name=name)
def pow(x, power, name=None):
return ops.Pow(x, power=power, name=name)
def reduce_sum(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
if reduction_indices is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'reduction_indices'.")
axis = reduction_indices
elif axis is None: axis = -1 # reduce all
if isinstance(axis, list) or isinstance(axis, tuple): # reduce continuously
if len(axis) < 1:
raise RuntimeError('reduce axes should at least have one.')
if len(axis) == 1:
return ops.Sum(input_tensor, axis=axis[0], keep_dims=keep_dims)
else:
ret = ops.Sum(input_tensor, axis=axis[0], keep_dims=True)
for i in xrange(1, len(axis) - 1):
ret = ops.Sum(ret, axis=axis[i], keep_dims=True)
return ops.Sum(ret, axis=axis[len(axis) - 1], keep_dims=keep_dims)
else:
return ops.Sum(input_tensor, axis=axis, keep_dims=keep_dims)
def reduce_mean(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
if reduction_indices is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'reduction_indices'.")
axis = reduction_indices
elif axis is None: axis = -1 # reduce all
if isinstance(axis, list) or isinstance(axis, tuple): # reduce continuously
if len(axis) < 1:
raise RuntimeError('reduce axes should at least have one.')
if len(axis) == 1:
return ops.Mean(input_tensor, axis=axis[0], keep_dims=keep_dims)
else:
ret = ops.Mean(input_tensor, axis=axis[0], keep_dims=True)
for i in xrange(1, len(axis) - 1):
ret = ops.Mean(ret, axis=axis[i], keep_dims=True)
return ops.Mean(ret, axis=axis[len(axis) - 1], keep_dims=keep_dims)
else:
return ops.Mean(input_tensor, axis=axis, keep_dims=keep_dims)
def sigmoid(x, name=None):
return ops.Sigmoid(x, name=name)
def tanh(x, name=None):
return ops.Tanh(x, name=name)
def add_n(inputs, name=None):
return ops.Eltwise(inputs, operation='SUM', name=name)
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = [
'relu',
'softmax',
'conv2d',
'avg_pool',
'max_pool',
'xw_plus_b',
'bias_add',
'sigmoid_cross_entropy_with_logits',
'softmax_cross_entropy_with_logits',
'sparse_softmax_cross_entropy_with_logits',
'l2_loss'
]
import dragon.ops as ops
def relu(features, name=None):
return ops.Relu(features, name=name)
def softmax(logits, dim=-1, name=None):
return ops.Softmax(logits, axis=dim)
def conv2d(input, filter, strides, pads=(0, 0, 0, 0),
use_cudnn_on_gpu=True, padding=None,
data_format='NCHW', name=None):
if filter.shape is None:
raise ValueError('filter must have a valid shape.')
else:
if len(filter.shape) != 4:
raise ValueError('filter must be a 4D Tensor')
if len(strides) != 4:
raise ValueError('strides must be a list of length 4.')
if data_format == 'NCHW':
output = ops.Conv2D([input, filter],
num_output=filter.shape[0],
kernel_size=filter.shape[2:],
stride=strides[2:],
pad=pads[2:])
return output
else: raise NotImplementedError()
def avg_pool(value, ksize, strides, pads=(0, 0, 0, 0),
padding=None, data_format="NCHW", name=None):
if len(strides) != 4:
raise ValueError('strides must be a list of length 4.')
if len(ksize) != 4:
raise ValueError('strides must be a list of length 4.')
if data_format == 'NCHW':
if pads is None: pads = 0
return ops.Pool2D(value,
kernel_size=ksize[2:],
stride=strides[2:],
pad=pads,
mode='AVG_POOLING')
else: raise NotImplementedError()
def max_pool(value, ksize, strides, pads=(0, 0, 0, 0),
padding=None, data_format="NCHW", name=None):
if len(strides) != 4:
raise ValueError('strides must be a list of length 4.')
if len(ksize) != 4:
raise ValueError('strides must be a list of length 4.')
if data_format == 'NCHW':
if pads is None: pads = 0
return ops.Pool2D(value,
kernel_size=ksize[2:],
stride=strides[2:],
pad=pads,
mode='MAX_POOLING')
else: raise NotImplementedError()
def xw_plus_b(x, weights, biases, name=None):
if weights.shape is None:
raise ValueError('weights must have a valid shape.')
else:
if len(weights.shape) != 2:
raise ValueError('weights must be a 2D Tensor')
if biases.shape is None:
raise ValueError('biases must a have a valid shape.')
else:
if len(biases.shape) != 1:
raise ValueError('biases must be a 1D Tensor')
if weights.shape[1] != biases.shape[0]:
raise ValueError('the shape of weights and biaes are incompatible.')
return ops.InnerProduct([x, weights, biases], num_output=weights.shape[1], TransW=False)
def bias_add(value, bias, data_format='NCHW', name=None):
return ops.BiasAdd([value, bias], data_format=data_format, name=None)
def sigmoid_cross_entropy_with_logits(logits, targets, name=None):
return ops.SigmoidCrossEntropyLoss([logits, targets], normalization='UNIT', name=None)
def softmax_cross_entropy_with_logits(_sentinel=None,
labels=None, logits=None,
dim=-1, name=None):
if _sentinel is not None:
raise ValueError('Only call `softmax_cross_entropy_with_logits` '
'with named arguments (labels=..., logits=..., ...)')
if dim == -1: dim = 1
return ops.SoftmaxCrossEntropyLoss([logits, labels], axis=dim, normalization='UNIT', name=name)
def sparse_softmax_cross_entropy_with_logits(logits, labels, dim=-1, name=None):
if dim == -1: dim = 1
return ops.SoftmaxLoss([logits, labels], axis=dim, normalization='UNIT', name=name)
def l2_loss(t, name=None):
return (ops.Reduce(ops.Square(t), operation='SUM') * 0.5)
def dropout(x, keep_prob, name=None):
return ops.Dropout(x, 1 - keep_prob)
def batch_normalization(x, mean, variance,
offset, scale,
decay=0.9,
variance_epsilon=1e-3,
use_global_stats=-1,
name=None):
norm_x = ops.BatchNorm([x, mean, variance], decay, variance_epsilon, use_global_stats, name=name)
return ops.Scale([norm_x, scale, offset], name=name + '_scale' if name is not None else name)
def batch_norm_with_global_normalization(t, m, v,
beta, gamma,
decay=0.9,
variance_epsilon=1e-3,
scale_after_normalization=True,
use_global_stats=-1,
name=None):
norm_x = ops.BatchNorm([t, m, v], decay, variance_epsilon, use_global_stats, name=name)
if scale_after_normalization:
return ops.Scale([norm_x, gamma, beta], name=name + '_scale' if name is not None else name)
else: return norm_x
\ No newline at end of file
...@@ -19,19 +19,6 @@ def random_normal(shape, ...@@ -19,19 +19,6 @@ def random_normal(shape,
stddev=1.0, stddev=1.0,
dtype=dtypes.float32, dtype=dtypes.float32,
name=None): name=None):
"""
Outputs random values from a normal distribution.
Args:
shape: A 1-D integer Python array. The shape of the output tensor.
mean: A 0-D Python value of type `dtype`. The mean of the normal distribution.
stddev: A 0-D Python value of type `dtype`. The standard deviation of the normal distribution.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
return ops.RandomNormal(shape, mean, stddev, name=None) return ops.RandomNormal(shape, mean, stddev, name=None)
...@@ -41,23 +28,6 @@ def truncated_normal(shape, ...@@ -41,23 +28,6 @@ def truncated_normal(shape,
stddev=1.0, stddev=1.0,
dtype=dtypes.float32, dtype=dtypes.float32,
name=None): name=None):
"""
Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Python array. The shape of the output tensor.
mean: A 0-D Python value of type `dtype`. The mean of the truncated normal distribution.
stddev: A 0-D Python value of type `dtype`. The standard deviation f the truncated normal distribution.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
return ops.TruncatedNormal(shape, mean, stddev, name=name) return ops.TruncatedNormal(shape, mean, stddev, name=name)
...@@ -67,33 +37,5 @@ def random_uniform(shape, ...@@ -67,33 +37,5 @@ def random_uniform(shape,
maxval=None, maxval=None,
dtype=dtypes.float32, dtype=dtypes.float32,
name=None): name=None):
"""
Outputs random values from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range, while
the upper bound `maxval` is excluded.
For floats, the default range is `[0, 1)`. For ints, at least `maxval` must
be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
Args:
shape: A 1-D integer Python array. The shape of the output tensor.
minval: A 0-D Python value of type `dtype`. The lower bound on the
range of random values to generate. Defaults to 0.
maxval: A 0-D Python value of type `dtype`. The upper bound on
the range of random values to generate. Defaults to 1 if `dtype` is floating point.
dtype: The type of the output: `float32`, `float64`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random uniform values.
"""
return ops.RandomUniform(shape, minval, maxval) return ops.RandomUniform(shape, minval, maxval)
\ No newline at end of file
numpy==1.12.1
six==1.10.0
protobuf==3.3.0
lmdb==0.93
opencv-python==3.1.0
Pillow==4.1.1
\ No newline at end of file
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!