Commit 31e02b2b by Ting PAN

remove tf documentation

1 parent 990c496b
Showing with 467 additions and 1197 deletions
# ---------------- Welcom To Use Dragon ----------------
# ---------------- Welcom To Use Dragon ----------------
PROJECT(dragon)
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.0)
......@@ -24,7 +24,7 @@ set(3RDPARTY_DIR ${PROJECT_SOURCE_DIR}/../3rdparty)
set(PYTHON_DIR /usr/include/python2.7) # prefer
#set(PYTHON_DIR /usr/include/python3.x) # optional, set specific version
#set(ANACONDA_DIR /xxx/anaconda) # optional, set specific version below if using py3
set(NUMPY_DIR /xxx/numpy) # require£¬ root folder of numpy package
set(NUMPY_DIR /xxx/numpy) # require root folder of numpy package
# set CUDA compiling architecture
set(CUDA_ARCH -gencode arch=compute_20,code=sm_20
......
......@@ -114,7 +114,10 @@ class DataTransformer(Process):
# handle mean subtraction
if len(self._mean_value) > 0:
if self._mean_file:
im = im - self._mean_value[h_off : h_off + self._crop_size, w_off : w_off + self._crop_size, :]
if self._crop_size > 0:
im = im - self._mean_value[h_off: h_off + self._crop_size,
w_off: w_off + self._crop_size, :]
else: im = im - self._mean_value[:, :, :]
else: im = im - self._mean_value
# handle range scale
......
......@@ -10,19 +10,5 @@ __all__ = ['flatten']
def flatten(inputs, name=None):
"""
Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: A tensor of size [batch_size, ...].
scope: Optional name for operation.
Returns:
A flattened tensor with shape [batch_size, k].
"""
return ops.Flatten(inputs, axis=1)
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = [
'expand_dims',
'shape',
'zeros',
'ones',
'concat',
'transpose',
'tile',
'reshape'
]
import dragon.ops as ops
from ..core import dtypes
def expand_dims(input, axis=None, name=None, dim=None):
if dim is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'dim'.")
axis = dim
return ops.ExpandDims(input, axis=axis, name=name)
def shape(input, name=None, out_type=dtypes.float32):
return ops.Shape(input, name=None)
def zeros(shape, dtype=dtypes.float32, name=None):
return ops.Fill(shape, value=0.0, name=name)
def ones(shape, dtype=dtypes.float32, name=None):
return ops.Fill(shape, value=1.0, name=name)
def concat(values, axis, name=None):
return ops.Concat(values, axis=axis, name=name)
def transpose(a, perm=None, name=None):
return ops.Transpose(a, perm=perm, name=name)
def tile(input, multiples, name=None):
return ops.Tile(input, multiples=multiples, name=name)
def reshape(tensor, shape, name=None):
return ops.Reshape(tensor, shape=shape, name=None)
......@@ -10,4 +10,5 @@ import dragon.ops as ops
def equal(a, b, name=None):
return ops.Equal([a, b])
\ No newline at end of file
......@@ -21,14 +21,12 @@ from ..core import dtypes
class Initializer(object):
"""Initializer base class: all initializers inherit from this class."""
def __call__(self, shape, dtype=None):
raise NotImplementedError
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtype
......@@ -39,7 +37,6 @@ class Zeros(Initializer):
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtype
......@@ -50,7 +47,6 @@ class Ones(Initializer):
class Constant(Initializer):
"""Initializer that generates tensors with constant values."""
def __init__(self, value=0, dtype=dtypes.float32):
self.value = value
......@@ -62,16 +58,6 @@ class Constant(Initializer):
class RandomUniform(Initializer):
"""
Initializer that generates tensors with a uniform distribution.
Args:
minval: A python scalar. Lower bound of the range
of random values to generate.
maxval: A python scalar. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
dtype: The data type.
"""
def __init__(self, minval=0, maxval=1, dtype=dtypes.float32):
self.minval = minval
......@@ -84,16 +70,6 @@ class RandomUniform(Initializer):
class RandomNormal(Initializer):
"""
Initializer that generates tensors with a normal distribution.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, dtype=dtypes.float32):
self.mean = mean
......@@ -107,21 +83,6 @@ class RandomNormal(Initializer):
class TruncatedNormal(Initializer):
"""
Initializer that generates a truncated normal distribution.
These values are similar to values from a `random_normal_initializer`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, dtype=dtypes.float32):
self.mean = mean
......@@ -135,25 +96,6 @@ class TruncatedNormal(Initializer):
class VarianceScaling(Initializer):
"""
Initializer capable of adapting its scale to the shape of weights tensors.
With `distribution="normal"`, samples are drawn from a truncated normal
distribution centered on zero, with `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`, samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Arguments:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, scale=1.0,
mode="fan_in",
......@@ -189,22 +131,6 @@ variance_scaling_initializer = VarianceScaling
def glorot_uniform_initializer(dtype=dtypes.float32):
"""
The Glorot uniform initializer, also called Xavier uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Arguments:
dtype: The data type. Only floating point types are supported.
Returns:
An initializer.
"""
return variance_scaling_initializer(scale=6.0,
mode='fan_avg',
......@@ -213,22 +139,6 @@ def glorot_uniform_initializer(dtype=dtypes.float32):
def glorot_normal_initializer(dtype=dtypes.float32):
"""
The Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Arguments:
dtype: The data type. Only floating point types are supported.
Returns:
An initializer.
"""
return variance_scaling_initializer(scale=2.0,
mode='fan_avg',
......
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = [
'argmax',
'matmul',
'add',
'subtract',
'multiply',
'divide',
'sub',
'mul',
'div',
'log',
'exp',
'square',
'sqrt',
'reduce_sum',
'reduce_mean',
'sigmoid',
'tanh',
'add_n'
]
from six.moves import range as xrange
import dragon.ops as ops
def argmax(input, axis=None, name=None, dimension=None):
if dimension is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'dimension'.")
axis = dimension
elif axis is None: axis = 0
return ops.Argmax(input, axis=axis, name=name)
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
name=None):
return ops.Matmul([a, b], TransA=transpose_a, TransB=transpose_b, name=name)
def add(x, y, name=None):
return ops.Add([x, y], name=None)
def subtract(x, y, name=None):
return ops.Sub([x, y], name=name)
def multiply(x, y, name=None):
return ops.Mul([x, y], name=name)
def divide(x, y, name=None):
return ops.Div([x, y], name=name)
def mul(x, y, name=None):
return multiply(x, y, name)
def sub(x, y, name=None):
return subtract(x, y, name)
def div(x, y, name=None):
return divide(x, y, name=name)
def log(x, name=None):
return ops.Log(x, name=name)
def exp(x, name=None):
return ops.Exp(x, name=name)
def square(x, name=None):
return ops.Square(x, name=name)
def sqrt(x, name=None):
return ops.Pow(x, power=0.5, name=name)
def pow(x, power, name=None):
return ops.Pow(x, power=power, name=name)
def reduce_sum(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
if reduction_indices is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'reduction_indices'.")
axis = reduction_indices
elif axis is None: axis = -1 # reduce all
if isinstance(axis, list) or isinstance(axis, tuple): # reduce continuously
if len(axis) < 1:
raise RuntimeError('reduce axes should at least have one.')
if len(axis) == 1:
return ops.Sum(input_tensor, axis=axis[0], keep_dims=keep_dims)
else:
ret = ops.Sum(input_tensor, axis=axis[0], keep_dims=True)
for i in xrange(1, len(axis) - 1):
ret = ops.Sum(ret, axis=axis[i], keep_dims=True)
return ops.Sum(ret, axis=axis[len(axis) - 1], keep_dims=keep_dims)
else:
return ops.Sum(input_tensor, axis=axis, keep_dims=keep_dims)
def reduce_mean(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
if reduction_indices is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'reduction_indices'.")
axis = reduction_indices
elif axis is None: axis = -1 # reduce all
if isinstance(axis, list) or isinstance(axis, tuple): # reduce continuously
if len(axis) < 1:
raise RuntimeError('reduce axes should at least have one.')
if len(axis) == 1:
return ops.Mean(input_tensor, axis=axis[0], keep_dims=keep_dims)
else:
ret = ops.Mean(input_tensor, axis=axis[0], keep_dims=True)
for i in xrange(1, len(axis) - 1):
ret = ops.Mean(ret, axis=axis[i], keep_dims=True)
return ops.Mean(ret, axis=axis[len(axis) - 1], keep_dims=keep_dims)
else:
return ops.Mean(input_tensor, axis=axis, keep_dims=keep_dims)
def sigmoid(x, name=None):
return ops.Sigmoid(x, name=name)
def tanh(x, name=None):
return ops.Tanh(x, name=name)
def add_n(inputs, name=None):
return ops.Eltwise(inputs, operation='SUM', name=name)
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = [
'relu',
'softmax',
'conv2d',
'avg_pool',
'max_pool',
'xw_plus_b',
'bias_add',
'sigmoid_cross_entropy_with_logits',
'softmax_cross_entropy_with_logits',
'sparse_softmax_cross_entropy_with_logits',
'l2_loss'
]
import dragon.ops as ops
def relu(features, name=None):
return ops.Relu(features, name=name)
def softmax(logits, dim=-1, name=None):
return ops.Softmax(logits, axis=dim)
def conv2d(input, filter, strides, pads=(0, 0, 0, 0),
use_cudnn_on_gpu=True, padding=None,
data_format='NCHW', name=None):
if filter.shape is None:
raise ValueError('filter must have a valid shape.')
else:
if len(filter.shape) != 4:
raise ValueError('filter must be a 4D Tensor')
if len(strides) != 4:
raise ValueError('strides must be a list of length 4.')
if data_format == 'NCHW':
output = ops.Conv2D([input, filter],
num_output=filter.shape[0],
kernel_size=filter.shape[2:],
stride=strides[2:],
pad=pads[2:])
return output
else: raise NotImplementedError()
def avg_pool(value, ksize, strides, pads=(0, 0, 0, 0),
padding=None, data_format="NCHW", name=None):
if len(strides) != 4:
raise ValueError('strides must be a list of length 4.')
if len(ksize) != 4:
raise ValueError('strides must be a list of length 4.')
if data_format == 'NCHW':
if pads is None: pads = 0
return ops.Pool2D(value,
kernel_size=ksize[2:],
stride=strides[2:],
pad=pads,
mode='AVG_POOLING')
else: raise NotImplementedError()
def max_pool(value, ksize, strides, pads=(0, 0, 0, 0),
padding=None, data_format="NCHW", name=None):
if len(strides) != 4:
raise ValueError('strides must be a list of length 4.')
if len(ksize) != 4:
raise ValueError('strides must be a list of length 4.')
if data_format == 'NCHW':
if pads is None: pads = 0
return ops.Pool2D(value,
kernel_size=ksize[2:],
stride=strides[2:],
pad=pads,
mode='MAX_POOLING')
else: raise NotImplementedError()
def xw_plus_b(x, weights, biases, name=None):
if weights.shape is None:
raise ValueError('weights must have a valid shape.')
else:
if len(weights.shape) != 2:
raise ValueError('weights must be a 2D Tensor')
if biases.shape is None:
raise ValueError('biases must a have a valid shape.')
else:
if len(biases.shape) != 1:
raise ValueError('biases must be a 1D Tensor')
if weights.shape[1] != biases.shape[0]:
raise ValueError('the shape of weights and biaes are incompatible.')
return ops.InnerProduct([x, weights, biases], num_output=weights.shape[1], TransW=False)
def bias_add(value, bias, data_format='NCHW', name=None):
return ops.BiasAdd([value, bias], data_format=data_format, name=None)
def sigmoid_cross_entropy_with_logits(logits, targets, name=None):
return ops.SigmoidCrossEntropyLoss([logits, targets], normalization='UNIT', name=None)
def softmax_cross_entropy_with_logits(_sentinel=None,
labels=None, logits=None,
dim=-1, name=None):
if _sentinel is not None:
raise ValueError('Only call `softmax_cross_entropy_with_logits` '
'with named arguments (labels=..., logits=..., ...)')
if dim == -1: dim = 1
return ops.SoftmaxCrossEntropyLoss([logits, labels], axis=dim, normalization='UNIT', name=name)
def sparse_softmax_cross_entropy_with_logits(logits, labels, dim=-1, name=None):
if dim == -1: dim = 1
return ops.SoftmaxLoss([logits, labels], axis=dim, normalization='UNIT', name=name)
def l2_loss(t, name=None):
return (ops.Reduce(ops.Square(t), operation='SUM') * 0.5)
def dropout(x, keep_prob, name=None):
return ops.Dropout(x, 1 - keep_prob)
def batch_normalization(x, mean, variance,
offset, scale,
decay=0.9,
variance_epsilon=1e-3,
use_global_stats=-1,
name=None):
norm_x = ops.BatchNorm([x, mean, variance], decay, variance_epsilon, use_global_stats, name=name)
return ops.Scale([norm_x, scale, offset], name=name + '_scale' if name is not None else name)
def batch_norm_with_global_normalization(t, m, v,
beta, gamma,
decay=0.9,
variance_epsilon=1e-3,
scale_after_normalization=True,
use_global_stats=-1,
name=None):
norm_x = ops.BatchNorm([t, m, v], decay, variance_epsilon, use_global_stats, name=name)
if scale_after_normalization:
return ops.Scale([norm_x, gamma, beta], name=name + '_scale' if name is not None else name)
else: return norm_x
\ No newline at end of file
......@@ -19,19 +19,6 @@ def random_normal(shape,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""
Outputs random values from a normal distribution.
Args:
shape: A 1-D integer Python array. The shape of the output tensor.
mean: A 0-D Python value of type `dtype`. The mean of the normal distribution.
stddev: A 0-D Python value of type `dtype`. The standard deviation of the normal distribution.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
return ops.RandomNormal(shape, mean, stddev, name=None)
......@@ -41,23 +28,6 @@ def truncated_normal(shape,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""
Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Python array. The shape of the output tensor.
mean: A 0-D Python value of type `dtype`. The mean of the truncated normal distribution.
stddev: A 0-D Python value of type `dtype`. The standard deviation f the truncated normal distribution.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
return ops.TruncatedNormal(shape, mean, stddev, name=name)
......@@ -67,33 +37,5 @@ def random_uniform(shape,
maxval=None,
dtype=dtypes.float32,
name=None):
"""
Outputs random values from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range, while
the upper bound `maxval` is excluded.
For floats, the default range is `[0, 1)`. For ints, at least `maxval` must
be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
Args:
shape: A 1-D integer Python array. The shape of the output tensor.
minval: A 0-D Python value of type `dtype`. The lower bound on the
range of random values to generate. Defaults to 0.
maxval: A 0-D Python value of type `dtype`. The upper bound on
the range of random values to generate. Defaults to 1 if `dtype` is floating point.
dtype: The type of the output: `float32`, `float64`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random uniform values.
"""
return ops.RandomUniform(shape, minval, maxval)
\ No newline at end of file
numpy==1.12.1
six==1.10.0
protobuf==3.3.0
lmdb==0.93
opencv-python==3.1.0
Pillow==4.1.1
\ No newline at end of file
from distutils.core import setup, Extension
from distutils.core import setup
import os.path, sys
import shutil
......@@ -12,17 +12,15 @@ def find_packages(root_dir):
find_packages(filepath)
else:
if filename == '__init__.py':
packages.append(root_dir.replace('python', 'dragon')
.replace('\\', '.')
.replace('/', '.'))
packages.append(root_dir)
def find_modules():
dragon_c_lib_win32 = 'lib/dragon.dll'
dragon_c_lib_other = 'lib/libdragon.so'
dragon_c_lib_win32 = '../lib/dragon.dll'
dragon_c_lib_other = '../lib/libdragon.so'
if os.path.exists(dragon_c_lib_win32):
shutil.copy(dragon_c_lib_win32, 'python/libdragon.pyd')
shutil.copy(dragon_c_lib_win32, 'dragon/libdragon.pyd')
elif os.path.exists(dragon_c_lib_other):
shutil.copy(dragon_c_lib_other, 'python/libdragon.so')
shutil.copy(dragon_c_lib_other, 'dragon/libdragon.so')
else:
print('ERROR: Unable to find modules. built Dragon using CMake.')
sys.exit()
......@@ -34,7 +32,7 @@ def find_resources():
others = []
return c_lib + protos + others
find_packages('python')
find_packages('dragon')
find_modules()
setup(name = 'dragon',
......@@ -44,5 +42,5 @@ setup(name = 'dragon',
author='Ting Pan',
license='BSD 2-Clause',
packages=packages,
package_dir={'dragon': 'python'},
package_data={'dragon': find_resources()})
\ No newline at end of file
package_dir={'dragon': 'dragon'},
package_data={'dragon': find_resources()})
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = [
'expand_dims',
'shape',
'zeros',
'ones',
'concat',
'transpose',
'tile',
'reshape'
]
import dragon.ops as ops
from ..core import dtypes
def expand_dims(input, axis=None, name=None, dim=None):
"""
Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```python
# 't' is a tensor of shape [2]
shape(expand_dims(t, 0)) ==> [1, 2]
shape(expand_dims(t, 1)) ==> [2, 1]
shape(expand_dims(t, -1)) ==> [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to
expand the shape of `input`.
name: The name of the output `Tensor`.
dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
"""
if dim is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'dim'.")
axis = dim
return ops.ExpandDims(input, axis=axis, name=name)
def shape(input, name=None, out_type=dtypes.float32):
"""
Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
shape(t) ==> [2, 2, 3]
```
Args:
input: A `Tensor`.
name: A name for the operation (optional).
out_type: (Enforce) The specified output type of the operation.
Now only support tf.float32.
Returns:
A `Tensor` of type `out_type`.
"""
return ops.Shape(input, name=None)
def zeros(shape, dtype=dtypes.float32, name=None):
return ops.Fill(shape, value=0.0, name=name)
def ones(shape, dtype=dtypes.float32, name=None):
return ops.Fill(shape, value=1.0, name=name)
def concat(values, axis, name=None):
"""
Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `axis`. If
`values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Raxis, ...Dn]
where
Raxis = sum(Daxis(i))
That is, the data from the input tensors is joined along the `axis`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `axis` must be equal.
For example:
```python
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 0) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 1) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat([t3, t4], 0)) ==> [4, 3]
tf.shape(tf.concat([t3, t4], 1)) ==> [2, 6]
```
Args:
values: A list of `Tensor` objects or a single `Tensor`.
axis: 0-D `int32` `Tensor`. Dimension along which to concatenate.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
return ops.Concat(values, axis=axis, name=name)
def transpose(a, perm=None, name=None):
"""
Transposes `a`. Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example:
```python
# 'x' is [[1 2 3]
# [4 5 6]]
tf.transpose(x) ==> [[1 4]
[2 5]
[3 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) ==> [[1 4]
[2 5]
[3 6]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
# 'x' is [[[1 2 3]
# [4 5 6]]
# [[7 8 9]
# [10 11 12]]]
# Take the transpose of the matrices in dimension-0
tf.transpose(x, perm=[0, 2, 1]) ==> [[[1 4]
[2 5]
[3 6]]
[[7 10]
[8 11]
[9 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
return ops.Transpose(a, perm=perm, name=name)
def tile(input, multiples, name=None):
return ops.Tile(input, multiples=multiples, name=name)
def reshape(tensor, shape, name=None):
return ops.Reshape(tensor, shape=shape, name=None)
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = [
'argmax',
'matmul',
'add',
'subtract',
'multiply',
'divide',
'sub',
'mul',
'div',
'log',
'exp',
'square',
'sqrt',
'reduce_sum',
'reduce_mean',
'sigmoid',
'tanh',
'add_n'
]
from six.moves import range as xrange
import dragon.ops as ops
def argmax(input, axis=None, name=None, dimension=None):
if dimension is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'dimension'.")
axis = dimension
elif axis is None: axis = 0
return ops.Argmax(input, axis=axis, name=name)
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
name=None):
"""
Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must be matrices (or tensors of rank > 2, representing batches of
matrices), with matching inner dimensions, possibly after transposition.
Both matrices must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Either matrix can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices (rank-2 tensors) with
datatypes `bfloat16` or `float32`.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
Args:
a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
b: `Tensor` with same type and rank as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a` and `b` where each inner-most matrix is
the product of the corresponding matrices in `a` and `b`, e.g. if all
transpose are `False`:
`output`[..., i, j] = sum_k (`a`[..., i, k] * `b`[..., k, j]),
for all indices i, j.
Note: This is matrix product, not element-wise product.
"""
return ops.Dot([a, b], TransA=transpose_a, TransB=transpose_b, name=name)
def add(x, y, name=None):
return ops.Add([x, y], name=None)
def subtract(x, y, name=None):
return ops.Sub([x, y], name=name)
def multiply(x, y, name=None):
return ops.Mul([x, y], name=name)
def divide(x, y, name=None):
return ops.Div([x, y], name=name)
def mul(x, y, name=None):
return multiply(x, y, name)
def sub(x, y, name=None):
return subtract(x, y, name)
def div(x, y, name=None):
return divide(x, y, name=name)
def log(x, name=None):
"""
Computes log of x element-wise.
I.e., \\(y = log(x)\\).
Args:
x: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
return ops.Log(x, name=name)
def exp(x, name=None):
"""
Computes exp of x element-wise.
I.e., \\(y = exp(x)\\).
Args:
x: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
return ops.Exp(x, name=name)
def square(x, name=None):
"""
Computes square of x element-wise.
I.e., \\(y = x * x = x^2\\).
Args:
x: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
return ops.Square(x, name=name)
def sqrt(x, name=None):
"""
Computes square root of x element-wise.
I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Args:
x: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
return ops.Pow(x, power=0.5, name=name)
def pow(x, power, name=None):
"""
Computes pow of x element-wise.
I.e., \\(y = \pow{x} = x^{power}\\).
Args:
x: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
return ops.Pow(x, power=power, name=name)
def reduce_sum(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""
Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
"""
if reduction_indices is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'reduction_indices'.")
axis = reduction_indices
elif axis is None: axis = -1 # reduce all
if isinstance(axis, list) or isinstance(axis, tuple): # reduce continuously
if len(axis) < 1:
raise RuntimeError('reduce axes should at least have one.')
if len(axis) == 1:
return ops.Sum(input_tensor, axis=axis[0], keep_dims=keep_dims)
else:
ret = ops.Sum(input_tensor, axis=axis[0], keep_dims=True)
for i in xrange(1, len(axis) - 1):
ret = ops.Sum(ret, axis=axis[i], keep_dims=True)
return ops.Sum(ret, axis=axis[len(axis) - 1], keep_dims=keep_dims)
else:
return ops.Sum(input_tensor, axis=axis, keep_dims=keep_dims)
def reduce_mean(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""
Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1.]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
"""
if reduction_indices is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'reduction_indices'.")
axis = reduction_indices
elif axis is None: axis = -1 # reduce all
if isinstance(axis, list) or isinstance(axis, tuple): # reduce continuously
if len(axis) < 1:
raise RuntimeError('reduce axes should at least have one.')
if len(axis) == 1:
return ops.Mean(input_tensor, axis=axis[0], keep_dims=keep_dims)
else:
ret = ops.Mean(input_tensor, axis=axis[0], keep_dims=True)
for i in xrange(1, len(axis) - 1):
ret = ops.Mean(ret, axis=axis[i], keep_dims=True)
return ops.Mean(ret, axis=axis[len(axis) - 1], keep_dims=keep_dims)
else:
return ops.Mean(input_tensor, axis=axis, keep_dims=keep_dims)
def sigmoid(x, name=None):
"""
Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor.
name: A name for the operation (optional).
Returns:
A Tensor with the same type.
"""
return ops.Sigmoid(x, name=name)
def tanh(x, name=None):
"""
Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor.
name: A name for the operation (optional).
Returns:
A Tensor with the same type.
"""
return ops.Tanh(x, name=name)
def add_n(inputs, name=None):
"""
Adds all input tensors element-wise.
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
"""
return ops.Eltwise(inputs, operation='SUM', name=name)
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = [
'relu',
'softmax',
'conv2d',
'avg_pool',
'max_pool',
'xw_plus_b',
'bias_add',
'sigmoid_cross_entropy_with_logits',
'softmax_cross_entropy_with_logits',
'sparse_softmax_cross_entropy_with_logits',
'l2_loss'
]
import dragon.ops as ops
def relu(features, name=None):
"""
Computes Rectified Linear: `max(features, 0)`.
Args:
features: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type.
"""
return ops.Relu(features, name=name)
def softmax(logits, dim=-1, name=None):
"""
Computes softmax activations.
For each batch `i` and class `j` we have
softmax = exp(logits) / reduce_sum(exp(logits), dim)
Args:
logits: A non-empty `Tensor`.
dim: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
"""
return ops.Softmax(logits, axis=dim)
def conv2d(input, filter, strides, pads=(0, 0, 0, 0),
use_cudnn_on_gpu=True, padding=None,
data_format='NCHW', name=None):
"""
Computes a 2-D convolution given 4-D input and filter tensors.
Args:
input: A Tensor.
filter: A Tensor.
For 'NCHW', shape as [out_channels, in_channels, filter_height, filter_width].
For 'NHWC', shape as [filter_height, filter_width, in_channels, out_channels].
strides: A list of ints. 1-D of length 4.
The stride of the sliding window for each dimension of input.
pads: A list of ints. 1-D of length 4.
use_cudnn_on_gpu: An optional bool. Defaults to True.
padding: A string from: "SAME", "VALID". (deprecated)
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: A name for the operation (optional).
Returns:
A Tensor. Has the same type as input.
"""
if filter.shape is None:
raise ValueError('filter must have a valid shape.')
else:
if len(filter.shape) != 4:
raise ValueError('filter must be a 4D Tensor')
if len(strides) != 4:
raise ValueError('strides must be a list of length 4.')
if data_format == 'NCHW':
output = ops.Conv2D([input, filter],
num_output=filter.shape[0],
kernel_size=filter.shape[2:],
stride=strides[2:],
pad=pads[2:])
return output
else: raise NotImplementedError()
def avg_pool(value, ksize, strides, pads=(0, 0, 0, 0),
padding=None, data_format="NCHW", name=None):
"""
Performs the avg pooling on the input.
Args:
value: A 4-D `Tensor` with type `tf.float32`.
ksize: A list of ints that has length 4.
strides: A list of ints that has length 4.
pads: A list of ints or a int.
padding: A string, either `'VALID'` or `'SAME'`. (deprecated)
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with type `tf.float32`. The avg pooled output tensor.
"""
if len(strides) != 4:
raise ValueError('strides must be a list of length 4.')
if len(ksize) != 4:
raise ValueError('strides must be a list of length 4.')
if data_format == 'NCHW':
if pads is None: pads = 0
return ops.Pool2D(value,
kernel_size=ksize[2:],
stride=strides[2:],
pad=pads,
mode='AVG_POOLING')
else: raise NotImplementedError()
def max_pool(value, ksize, strides, pads=(0, 0, 0, 0),
padding=None, data_format="NCHW", name=None):
"""
Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` with type `tf.float32`.
ksize: A list of ints that has length 4.
strides: A list of ints that has length 4.
pads: A list of ints or a int.
padding: A string, either `'VALID'` or `'SAME'`. (deprecated)
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with type `tf.float32`. The max pooled output tensor.
"""
if len(strides) != 4:
raise ValueError('strides must be a list of length 4.')
if len(ksize) != 4:
raise ValueError('strides must be a list of length 4.')
if data_format == 'NCHW':
if pads is None: pads = 0
return ops.Pool2D(value,
kernel_size=ksize[2:],
stride=strides[2:],
pad=pads,
mode='MAX_POOLING')
else: raise NotImplementedError()
def xw_plus_b(x, weights, biases, name=None):
"""
Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
if weights.shape is None:
raise ValueError('weights must have a valid shape.')
else:
if len(weights.shape) != 2:
raise ValueError('weights must be a 2D Tensor')
if biases.shape is None:
raise ValueError('biases must a have a valid shape.')
else:
if len(biases.shape) != 1:
raise ValueError('biases must be a 1D Tensor')
if weights.shape[1] != biases.shape[0]:
raise ValueError('the shape of weights and biaes are incompatible.')
return ops.InnerProduct([x, weights, biases], num_output=weights.shape[1], TransW=False)
def bias_add(value, bias, data_format='NCHW', name=None):
"""
Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
return ops.BiasAdd([value, bias], data_format=data_format, name=None)
def sigmoid_cross_entropy_with_logits(logits, targets, name=None):
"""
Computes sigmoid cross entropy given logits.
Measures the probability error in discrete classification tasks in which
each class is independent and not mutually exclusive.
For instance, one could perform multilabel classification where a picture
can contain both an elephant and a dog at the same time.
Args:
logits: A Tensor of type float32 or float64.
targets: A Tensor of the same type and shape as logits.
name: A name for the operation (optional).
Returns:
A Tensor. Has the same type as logits. Same shape as logits.
"""
return ops.SigmoidCrossEntropyLoss([logits, targets], normalization='UNIT', name=None)
def softmax_cross_entropy_with_logits(_sentinel=None,
labels=None, logits=None,
dim=-1, name=None):
"""
Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
`logits` and `labels` must have the same shape `[batch_size, num_classes]`
and the same dtype (either `float16`, `float32`, or `float64`).
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: Each row `labels[i]` must be a valid probability distribution.
logits: Unscaled log probabilities.
dim: The class dimension. Defaulted to -1 which is the second dimension.
name: A name for the operation (optional).
Returns:
A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the
softmax cross entropy loss.
"""
if _sentinel is not None:
raise ValueError('Only call `softmax_cross_entropy_with_logits` '
'with named arguments (labels=..., logits=..., ...)')
if dim == -1: dim = 1
return ops.SoftmaxCrossEntropyLoss([logits, labels], axis=dim, normalization='UNIT', name=name)
def sparse_softmax_cross_entropy_with_logits(logits, labels, dim=-1, name=None):
"""
Computes sparse softmax cross entropy between `logits` and `labels`.
Args:
logits: Unscaled log probabilities.
labels: A `Tensor` of shape [batchsize,].
Note that it is not a one-hot represention.
dim: The class dimension. Defaulted to -1 which is the second dimension.
name: A name for the operation (optional).
Returns:
A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the
sparse softmax cross entropy loss.
"""
if dim == -1: dim = 1
return ops.SoftmaxLoss([logits, labels], axis=dim, normalization='UNIT', name=name)
def l2_loss(t, name=None):
"""
Computes half the L2 norm of a tensor without the sqrt:
output = sum(t ** 2) / 2
Args:
t: A Tensor. Typically 2-D, but may have any dimensions.
name: Optional name for the operation.
Returns:
A Tensor. Has the same type as t. 0-D.
"""
return (ops.Reduce(ops.Square(t), operation='SUM') * 0.5)
def dropout(x, keep_prob, name=None):
"""
Computes dropout.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
Args:
x: A tensor.
keep_prob: A float. The probability that each element is kept.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
"""
return ops.Dropout(x, 1 - keep_prob)
def batch_normalization(x, mean, variance,
offset, scale,
decay=0.9,
variance_epsilon=1e-3,
use_global_stats=-1,
name=None):
norm_x = ops.BatchNorm([x, mean, variance], decay, variance_epsilon, use_global_stats, name=name)
return ops.Scale([norm_x, scale, offset], name=name + '_scale' if name is not None else name)
def batch_norm_with_global_normalization(t, m, v,
beta, gamma,
decay=0.9,
variance_epsilon=1e-3,
scale_after_normalization=True,
use_global_stats=-1,
name=None):
norm_x = ops.BatchNorm([t, m, v], decay, variance_epsilon, use_global_stats, name=name)
if scale_after_normalization:
return ops.Scale([norm_x, gamma, beta], name=name + '_scale' if name is not None else name)
else: return norm_x
\ No newline at end of file
......@@ -9,12 +9,6 @@
4. OpenMPI [Optional]
-----
### Runtime Requirements for Python
0. Package: protobuf
1. Package: lmdb
-----
### Installation
1. Clone this repository
......@@ -28,15 +22,22 @@
[*Linux64*](https://pan.baidu.com/s/1qXPEOWG) (OpenMPI)
4. Configure Dragon/CMakeLists.txt
4. Install Python Requirements
```Shell
cd Dragon/python
pip install -r requirements.txt
```
5. Configure Dragon/CMakeLists.txt
- Select optional libraries [PYTHON3 / CUDA / CUDNN / BLAS / SSE / MPI / MPI_CUDA_AWARE / CUDA_FP16]
- Set 3rdparty path (recommend to keep defualt)
- Set Python include path & Numpy root path
- Set CUDA compiling architectures if necessary
- GCC version(4.8+, 5.0-) should add ``-std=c++11`` to ``CUDA_NVCC_FLAGS``, if ``nullptr`` is not found
- We generate *.h and *.cc files under the ``Dragon/src/protos`` with protobuf2.6, run protoc by yourself if higher are required
- We pre-generated files under the ``Dragon/src/protos`` with protobuf-2.6, run protoc by yourself if higher are required
5. Environment Variables
6. Environment Variables
### Linux(Only for OpenMPI):
- Create dragon.conf
......@@ -57,7 +58,7 @@
- PATH=........;C:\Dragon\3rdparty\bin;
6. Setup MPI [Optional]
7. Setup MPI [Optional]
#### Linux:
- We use OpenMPI which support "cuda-aware-mpi"
- See more:
......@@ -77,7 +78,7 @@
- We use Microsoft MPI which can perfectly run at lastest Windows10
- Microsoft MPI is intergrated into 3rdparty and you should do nothing
7. Compile
8. Compile
#### Linux:
- Install cmake
......@@ -105,32 +106,18 @@
8. Deploy
- Install Dragon
```Shell
cd Dragon
cd Dragon/python
python setup.py install
```
``Hint``: If you do not have permission, try as follows:
```Shell
cd Dragon
cd Dragon/python
python setup.py install --user
```
- Install protobuf
```Shell
pip install protobuf
```
- Install lmdb
```Shell
pip install lmdb
```
----
## Usage
......@@ -143,6 +130,14 @@ import dragon
### Virtual DL Frameworks
**------------------- Attention -------------------**
``tensorflow`` and ``theano`` are incomplete yet, prefer not to use it.
Currently, we recommend ``caffe`` and ``tiny-dragon``(ops + thenao.function + theano.tensor.grad + updaters)
**-------------------------------------------------**
```Shell
import dragon.vm.theano as theano
import dragon.vm.caffe as caffe
......
......@@ -138,9 +138,9 @@ def make_db(image_path, label_path, database_path):
if __name__ == '__main__':
#untar('data/cifar-10-python.tar.gz')
untar('data/cifar-10-python.tar.gz')
#extract_images()
extract_images()
make_db('data/extract/JPEGImages',
'data/extract/ImageSets/train.txt',
......
......@@ -14,7 +14,8 @@ classes = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
# init
#caffe.set_mode_gpu()
caffe.set_mode_gpu()
caffe.set_device(0)
# load net
net = caffe.Net("cifar10_quick_deploy.prototxt",
'snapshots/cifar10_quick_iter_5000.caffemodel', caffe.TEST)
......@@ -44,4 +45,4 @@ def run(filename):
if __name__ == '__main__':
run('data/demo/cat.jpg')
\ No newline at end of file
run('data/demo/cat.jpg')
......@@ -12,6 +12,7 @@ if __name__ == '__main__':
# init
caffe.set_mode_gpu()
caffe.set_device(0)
# solve
solver = caffe.SGDSolver('cifar10_full_solver.prototxt')
......
......@@ -12,8 +12,9 @@ if __name__ == '__main__':
# init
caffe.set_mode_gpu()
caffe.set_device(0)
# solve
solver = caffe.SGDSolver('cifar10_quick_solver.prototxt')
solver.step(5000)
solver.snapshot()
\ No newline at end of file
solver.snapshot()
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!