Commit f4e789be by Ting PAN

VM.TensorFlow Preview

1 parent f52f82f6
Showing with 4879 additions and 4 deletions
...@@ -550,7 +550,7 @@ class Net(object): ...@@ -550,7 +550,7 @@ class Net(object):
The implementation of `Net_outputs(pycaffe.py, L81)`_. The implementation of `Net_outputs(pycaffe.py, L81)`_.
""" """
return self._net_outputs return list(self._net_outputs)
def replace(self, A, B): def replace(self, A, B):
...@@ -571,8 +571,8 @@ class Net(object): ...@@ -571,8 +571,8 @@ class Net(object):
-------- --------
>>> import dragon.ops as ops >>> import dragon.ops as ops
>>> data, label = ops.LMDBData() >>> data, label = ops.LMDBData()
>>> net.Replace(net.blobs['data'].data, data) >>> net.replace(net.blobs['data'].data, data)
>>> net.Replace(net.blobs['label'].data, label) >>> net.replace(net.blobs['label'].data, label)
""" """
self._swap_tensors[A] = B self._swap_tensors[A] = B
......
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
# Framework
from dragon.vm.tensorflow.framework.framework_lib import *
# Session
from dragon.vm.tensorflow.client.client_lib import *
# Ops
from dragon.vm.tensorflow.ops.standard_ops import *
# Bring in subpackages.
from dragon.vm.tensorflow.ops import nn
# Import the names from training.py as train.Name.
from dragon.vm.tensorflow.training import training as train
#from .utils.gradients import *
# Export modules and constants
from dragon.vm.tensorflow.layers import layers
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.vm.tensorflow.client.session import Session
from dragon.vm.tensorflow.client.session import get_default_session
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import warnings
from dragon.core.tensor import Tensor
import dragon.vm.theano as theano
from dragon.vm.tensorflow.protobuf import config_pb2
from dragon.vm.tensorflow.training.optimizer import Optimizer
from dragon.vm.tensorflow.ops.variables import VariablesInitializer
from dragon.vm.tensorflow.framework import ops
_TRANSACTIONS = {}
class Transaction(object):
def __init__(self, functions):
self.functions = functions
def run(self, feed_values=None):
for i, function in enumerate(self.functions):
if i == 0 and feed_values is not None:
function(*feed_values, return_outputs=False)
else: function(return_outputs=False)
_default_session = None
class BaseSession(object):
"""
Construct a BaseSession.
"""
def __init__(self, target='', graph=None, config=None):
if graph is None:
self._graph = ops.get_default_graph()
else:
raise NotImplementedError('Session can only use the default graph yet.')
self._opened = False
self._closed = False
if config is not None:
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError('Config should be a tf.ConfigProto, but got {}'.format(type(config)))
self._config = config
self._add_shapes = config.graph_options.infer_shapes
else:
self._config = None
self._add_shapes = False
self._session = None
def list_devices(self):
"""Lists available devices in this session.
```python
devices = sess.list_devices()
for d in devices:
print(d.name)
```
Each element in the list has the following properties:
- `name`: A string with the full name of the device. ex:
`/job:worker/replica:0/task:3/device:CPU:0`
- `device_type`: The type of the device (e.g. `CPU`, `GPU`, `TPU`.)
- `memory_limit`: The maximum amount of memory available on the device.
Note: depending on the device, it is possible the usable memory could
be substantially less.
Raises:
tf.errors.OpError: If it encounters an error (e.g. session is in an
invalid state, or network errors occur).
Returns:
A list of devices in the session.
"""
# with errors.raise_exception_on_not_ok_status() as status:
# if self._created_with_new_api:
# raw_device_list = tf_session.TF_SessionListDevices(
# self._session, status)
# else:
# raw_device_list = tf_session.TF_DeprecatedSessionListDevices(
# self._session, status)
# device_list = []
# size = tf_session.TF_DeviceListCount(raw_device_list)
# for i in range(size):
# name = tf_session.TF_DeviceListName(raw_device_list, i, status)
# device_type = tf_session.TF_DeviceListType(raw_device_list, i, status)
# memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i, status)
# device_list.append(_DeviceAttributes(name, device_type, memory))
# tf_session.TF_DeleteDeviceList(raw_device_list)
# return device_list
def close(self):
pass
@property
def graph(self):
return self._graph
@property
def graph_def(self):
return ''
@property
def sess_str(self):
return ''
def as_default(self):
pass
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
try:
if options is not None:
warnings.warn(Warning('Ignore Arguments: <options>.'))
if run_metadata is not None:
warnings.warn(Warning('Ignore Arguments: <run_metadata>.'))
except Warning:
pass
if not isinstance(fetches, list): fetches = [fetches]
if len(fetches) < 1: return None
return self._run(fetches, feed_dict)
def _run(self, fetches, feed_dict):
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
# unpack opts and tensors
opts = []; tensors = []
for target in fetches:
if isinstance(target, Optimizer): opts.append(target)
elif isinstance(target, VariablesInitializer): tensors.extend(target.var_list)
elif isinstance(target, Tensor): tensors.append(target)
# find minimum solving targets
targets = set()
for t in tensors: targets.add(t)
for opt in opts:
for t in opt.objs: targets.add(t)
targets = list(targets)
# if existing a transaction before?
global _TRANSACTIONS
t_key = tuple(fetches + feed_dict.keys()) \
if feed_dict is not None else tuple(fetches)
transaction = None if not t_key in _TRANSACTIONS else _TRANSACTIONS[t_key]
# cond.1: run by feeding
if feed_dict is not None:
# checking
for key, value in feed_dict.items():
if not isinstance(key, Tensor):
raise TypeError('The key of feed_dict key should be a Tensor.')
if key.shape is not None:
if len(key.shape) != len(value.shape):
raise RuntimeError('The Tensor({}) was limited to {} dimensions, \
while feed a value with {} dimensions.'.
format(key.name, len(key.shape), len(value.shape)))
for i in xrange(len(key.shape)):
if key.shape[i] is None: continue
if key.shape[i] != value.shape[i]:
raise RuntimeError('The shape of Tensor({}) was limited as ('.format(key.name) +
','.join([str(dim) for dim in key.shape]) + '), ' +
'while feed a value with (' + ','.join([str(dim) for dim in value.shape]) + ').')
# create a new transaction
if transaction is None:
functions = []
functions.append(theano.function(inputs=feed_dict.keys(), outputs=targets))
for opt in opts:
functions.append(theano.function(updater=opt.updater))
_TRANSACTIONS[t_key] = transaction = Transaction(functions)
transaction.run(feed_dict.values())
# cond.2: run without feeding
else:
# create a new transaction
if transaction is None:
functions = []
functions.append(theano.function(outputs=targets))
for opt in opts:
functions.append(theano.function(updater=opt.updater))
_TRANSACTIONS[t_key] = transaction = Transaction(functions)
transaction.run(None)
# fetch after running
returns = []
for target in fetches:
if isinstance(target, Optimizer): returns.append(None)
elif isinstance(target, VariablesInitializer): returns.append(None)
else:
np_target = target.get_value()
# unpack the scalar if necessary
if np_target.size == 1:
returns.append(np_target.flatten()[0])
else:
returns.append(np_target)
# unpack the returns if necessary
if len(returns) == 1: return returns[0]
else: return returns
class Session(BaseSession):
"""
Construct a Session.
"""
def __init__(self, target='', graph=None, config=None):
super(Session, self).__init__(target, graph, config=config)
def __enter__(self):
return self
def __exit__(self, exec_type, exec_value, exec_tb):
pass
@staticmethod
def reset(target, containers=None, config=None):
pass
class InteractiveSession(BaseSession):
"""
Construct a InteractiveSession.
"""
def __init__(self, target='', graph=None, config=None):
super(InteractiveSession, self).__init__(target, graph, config=config)
def __enter__(self):
pass
def __exit__(self, exec_type, exec_value, exec_tb):
pass
@staticmethod
def reset(target, containers=None, config=None):
pass
def get_default_session():
global _default_session
if _default_session is None:
_default_session = Session()
return _default_session
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from .layers import *
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.vm.tensorflow.framework import ops
from dragon.vm.tensorflow.ops import var_scope as variable_scope
def get_variables(scope=None, suffix=None,
collection=ops.GraphKeys.GLOBAL_VARIABLES):
if isinstance(scope, variable_scope.VariableScope):
scope = scope.name
if suffix is not None:
scope = (scope or '') + '.*' + suffix
return ops.get_collection(collection, scope)
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from .layers import *
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import math
from dragon.vm.tensorflow.framework import dtypes
from dragon.vm.tensorflow.ops import random_ops
__all__ = ['xavier_initializer',
'xavier_initializer_conv2d',
'variance_scaling_initializer']
def xavier_initializer(uniform=True, seed=None, dtype=dtypes.float32):
return variance_scaling_initializer(factor=1.0, mode='FAN_AVG',
uniform=uniform, seed=seed, dtype=dtype)
xavier_initializer_conv2d = xavier_initializer
def variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False,
seed=None, dtype=dtypes.float32):
if not dtype.is_floating:
raise TypeError('Cannot create initializer for non-floating point type.')
if mode not in ['FAN_IN', 'FAN_OUT', 'FAN_AVG']:
raise TypeError('Unknow mode %s [FAN_IN, FAN_OUT, FAN_AVG]', mode)
def _initializer(shape, dtype=dtype, partition_info=None):
"""Initializer function."""
if not dtype.is_floating:
raise TypeError('Cannot create initializer for non-floating point type.')
# Estimating fan_in and fan_out is not possible to do perfectly, but we try.
# This is the right thing for matrix multiply and convolutions.
if shape:
fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1])
fan_out = float(shape[-1])
else:
fan_in = 1.0
fan_out = 1.0
for dim in shape[:-2]:
fan_in *= float(dim)
fan_out *= float(dim)
if mode == 'FAN_IN':
# Count only number of input connections.
n = fan_in
elif mode == 'FAN_OUT':
# Count only number of output connections.
n = fan_out
elif mode == 'FAN_AVG':
# Average number of inputs and output connections.
n = (fan_in + fan_out) / 2.0
if uniform:
# To get stddev = math.sqrt(factor / n) need to adjust for uniform.
limit = math.sqrt(3.0 * factor / n)
return random_ops.random_uniform(shape, -limit, limit,
dtype, seed=seed)
else:
# To get stddev = math.sqrt(factor / n) need to adjust for truncated.
trunc_stddev = math.sqrt(1.3 * factor / n)
return random_ops.truncated_normal(shape, 0.0, trunc_stddev, dtype,
seed=seed)
return _initializer
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from collections import defaultdict
import dragon.ops as op_lib
import dragon.vm.tensorflow.framework.ops as ops
from dragon.vm.tensorflow.contrib.layers import initializers
from dragon.vm.tensorflow.ops import init_ops
from dragon.vm.tensorflow.ops import nn
from dragon.vm.tensorflow.ops import var_scope as vs
from dragon.vm.tensorflow.layers import layers
from dragon.vm.tensorflow.layers import normalization as normalization_layers
__all__ = ['flatten']
_LAYERS_UID_DICT = defaultdict(int)
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
DATA_FORMAT_NCDHW = 'NCDHW'
DATA_FORMAT_NDHWC = 'NDHWC'
def _default_scope(scope, key, indicator):
if scope is None:
return indicator
# global _LAYERS_UID_DICT
# _LAYERS_UID_DICT[key] += 1
# return '{}{}'.format(indicator, _LAYERS_UID_DICT[key])
else:
return scope
def avg_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
return layers.average_pooling2d(inputs=inputs,
pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df)
def max_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
return layers.max_pooling2d(inputs=inputs,
pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df)
def convolution(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
scope = _default_scope(scope, 'CONVOLUTION', 'Conv')
if data_format not in [None, 'NHWC', 'NCHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
data_format = 'channels_first' if data_format == 'NCHW' else 'channels_last'
input_rank = inputs.get_shape().ndims
with vs.variable_scope(scope, reuse=reuse) as sc:
if input_rank == 4:
return layers.conv2d(
inputs=inputs,
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=data_format,
dilation_rate=rate,
activation=activation_fn,
use_bias=True if biases_initializer is not None else False,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
reuse=reuse)
# Simple alias.
convolution2d = convolution
conv2d = convolution2d
def fully_connected(inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
scope = _default_scope(scope, 'FULLY_CONNECTED', 'fully_connected')
with vs.variable_scope(scope, reuse=reuse) as sc:
return layers.dense(
inputs=inputs,
units=num_outputs,
activation=activation_fn,
use_bias=True if biases_initializer is not None else False,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
reuse=reuse)
def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
param_regularizers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
batch_weights=None,
fused=False,
data_format=DATA_FORMAT_NHWC,
zero_debias_moving_mean=False,
scope=None,
renorm=False,
renorm_clipping=None,
renorm_decay=0.99):
scope = _default_scope(scope, 'BATCH_NORM', 'BatchNorm')
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
axis = 1 if data_format == DATA_FORMAT_NCHW else -1
with vs.variable_scope(scope, reuse=reuse) as sc:
if not param_initializers:
param_initializers = {}
beta_initializer = param_initializers.get('beta', init_ops.zeros_initializer())
gamma_initializer = param_initializers.get('gamma', init_ops.ones_initializer())
moving_mean_initializer = param_initializers.get('moving_mean', init_ops.zeros_initializer())
moving_variance_initializer = param_initializers.get('moving_variance', init_ops.ones_initializer())
if not param_regularizers:
param_regularizers = {}
beta_regularizer = param_regularizers.get('beta')
gamma_regularizer = param_regularizers.get('gamma')
return layers.batch_norm(
inputs=inputs,
axis=axis,
momentum=decay,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
trainable=trainable,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_decay,
fused=fused,
training=is_training)
def flatten(inputs,
outputs_collections=None,
scope=None):
return op_lib.Flatten(inputs, axis=0, keep_axes=2)
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
\ No newline at end of file
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base utilities for loading datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import random
import time
import shutil
from six.moves import urllib
Dataset = collections.namedtuple('Dataset', ['data', 'target'])
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
def retry(initial_delay,
max_delay,
factor=2.0,
jitter=0.25,
is_retriable=None):
"""Simple decorator for wrapping retriable functions.
Args:
initial_delay: the initial delay.
factor: each subsequent retry, the delay is multiplied by this value.
(must be >= 1).
jitter: to avoid lockstep, the returned delay is multiplied by a random
number between (1-jitter) and (1+jitter). To add a 20% jitter, set
jitter = 0.2. Must be < 1.
max_delay: the maximum delay allowed (actual max is
max_delay * (1 + jitter).
is_retriable: (optional) a function that takes an Exception as an argument
and returns true if retry should be applied.
"""
if factor < 1:
raise ValueError('factor must be >= 1; was %f' % (factor,))
if jitter >= 1:
raise ValueError('jitter must be < 1; was %f' % (jitter,))
# Generator to compute the individual delays
def delays():
delay = initial_delay
while delay <= max_delay:
yield delay * random.uniform(1 - jitter, 1 + jitter)
delay *= factor
def wrap(fn):
"""Wrapper function factory invoked by decorator magic."""
def wrapped_fn(*args, **kwargs):
"""The actual wrapper function that applies the retry logic."""
for delay in delays():
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except)
if is_retriable is None:
continue
if is_retriable(e):
time.sleep(delay)
else:
raise
return fn(*args, **kwargs)
return wrapped_fn
return wrap
_RETRIABLE_ERRNOS = {
110, # Connection timed out [socket.py]
}
def _is_retriable(e):
return isinstance(e, IOError) and e.errno in _RETRIABLE_ERRNOS
@retry(initial_delay=1.0, max_delay=16.0, is_retriable=_is_retriable)
def urlretrieve_with_retry(url, filename=None):
return urllib.request.urlretrieve(url, filename)
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not os.path.exists(work_directory):
os.makedirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
temp_file_name, _ = urlretrieve_with_retry(source_url)
shutil.copy(temp_file_name, filepath)
size = os.path.getsize(filepath)
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for downloading and reading MNIST data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import numpy
from six.moves import xrange # pylint: disable=redefined-builtin
from dragon.vm.tensorflow.contrib.learn.datasets import base
from dragon.vm.tensorflow.framework import dtypes
from dragon.vm.tensorflow.framework import random_seed
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
# SOURCE_URL = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(f):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth].
Args:
f: A file object that can be passed into a gzip reader.
Returns:
data: A 4D uint8 numpy array [index, y, x, depth].
Raises:
ValueError: If the bytestream does not start with 2051.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' %
(magic, f.name))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(f, one_hot=False, num_classes=10):
"""Extract the labels into a 1D uint8 numpy array [index].
Args:
f: A file object that can be passed into a gzip reader.
one_hot: Does one hot encoding for the result.
num_classes: Number of classes for the one hot encoding.
Returns:
labels: a 1D uint8 numpy array.
Raises:
ValueError: If the bystream doesn't start with 2049.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' %
(magic, f.name))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels, num_classes)
return labels
class DataSet(object):
def __init__(self,
images,
labels,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True,
seed=None):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`. Seed arg provides for convenient deterministic testing.
"""
seed1, seed2 = random_seed.get_seed(seed)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seed1 if seed is None else seed2)
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
if dtype == dtypes.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False, shuffle=True):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1] * 784
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)
]
start = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
perm0 = numpy.arange(self._num_examples)
numpy.random.shuffle(perm0)
self._images = self.images[perm0]
self._labels = self.labels[perm0]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
rest_num_examples = self._num_examples - start
images_rest_part = self._images[start:self._num_examples]
labels_rest_part = self._labels[start:self._num_examples]
# Shuffle the data
if shuffle:
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self.images[perm]
self._labels = self.labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
images_new_part = self._images[start:end]
labels_new_part = self._labels[start:end]
return numpy.concatenate((images_rest_part, images_new_part), axis=0), numpy.concatenate((labels_rest_part, labels_new_part), axis=0)
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True,
validation_size=5000,
seed=None):
if fake_data:
def fake():
return DataSet(
[], [], fake_data=True, one_hot=one_hot, dtype=dtype, seed=seed)
train = fake()
validation = fake()
test = fake()
return base.Datasets(train=train, validation=validation, test=test)
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
local_file = base.maybe_download(TRAIN_IMAGES, train_dir,
SOURCE_URL + TRAIN_IMAGES)
with open(local_file, 'rb') as f:
train_images = extract_images(f)
local_file = base.maybe_download(TRAIN_LABELS, train_dir,
SOURCE_URL + TRAIN_LABELS)
with open(local_file, 'rb') as f:
train_labels = extract_labels(f, one_hot=one_hot)
local_file = base.maybe_download(TEST_IMAGES, train_dir,
SOURCE_URL + TEST_IMAGES)
with open(local_file, 'rb') as f:
test_images = extract_images(f)
local_file = base.maybe_download(TEST_LABELS, train_dir,
SOURCE_URL + TEST_LABELS)
with open(local_file, 'rb') as f:
test_labels = extract_labels(f, one_hot=one_hot)
if not 0 <= validation_size <= len(train_images):
raise ValueError(
'Validation size should be between 0 and {}. Received: {}.'
.format(len(train_images), validation_size))
validation_images = train_images[:validation_size]
validation_labels = train_labels[:validation_size]
train_images = train_images[validation_size:]
train_labels = train_labels[validation_size:]
options = dict(dtype=dtype, reshape=reshape, seed=seed)
train = DataSet(train_images, train_labels, **options)
validation = DataSet(validation_images, validation_labels, **options)
test = DataSet(test_images, test_labels, **options)
return base.Datasets(train=train, validation=validation, test=test)
def load_mnist(train_dir='MNIST-data'):
return read_data_sets(train_dir)
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.vm.tensorflow.contrib.framework.ops.variables import *
from dragon.vm.tensorflow.contrib.layers.layers import \
convolution, convolution2d, conv2d
from dragon.vm.tensorflow.contrib.layers.layers import \
fully_connected
from dragon.vm.tensorflow.contrib.layers.layers import \
batch_norm
from dragon.vm.tensorflow.contrib.layers.layers import \
avg_pool2d, max_pool2d
from dragon.vm.tensorflow.contrib.layers.layers import \
flatten
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.vm.tensorflow.contrib.learn.datasets.mnist import read_data_sets
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = ['constant']
import numpy as np
import dragon.core.workspace as ws
from dragon.core.tensor import Tensor
from dragon.vm.tensorflow.framework import dtypes
def constant(value, dtype=None, shape=None, name=None, verify_shape=False):
# determine the data type
if dtype == None: dtype = dtypes.float32
if isinstance(value, np.ndarray):
feed = value.astype(dtype.as_numpy_dtype)
elif isinstance(value, list):
feed = np.array(value, dtype.as_numpy_dtype)
else:
feed = np.array([value], dtype.as_numpy_dtype)
# determine the shape
if shape is not None:
if feed.size == 1:
# case 1: broadcast with scalar value
c = feed[0]
feed = np.zeros(shape, dtype.as_numpy_dtype)
feed.fill(c)
else:
# case 2: reshape directly
if verify_shape:
if shape is not None:
if len(shape) != len(value.shape):
raise RuntimeError('The constant was limited to {} dimensions, \
while feed a value with {} dimensions.'.
format(len(shape), len(value.shape)))
for i in xrange(len(shape)):
if shape[i] is None: continue
if shape[i] != value.shape[i]:
raise RuntimeError('The shape of constant was limited as (' +
','.join([str(dim) for dim in shape]) + '), ' +
'while feed a value with (' + ','.join([str(dim) for dim in value.shape]) + ').')
feed = feed.reshape(shape)
# feed to VM
tensor = Tensor(name)
tensor.shape = list(feed.shape)
ws.FeedTensor(tensor, feed)
return tensor
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import numpy as np
from dragon.vm.tensorflow.protobuf import types_pb2
class DType(object):
"""
The basic data type.
"""
def __init__(self, type_enum):
type_enum = int(type_enum)
if (type_enum not in types_pb2.DataType.values()
or type_enum == types_pb2.DT_INVALID):
raise TypeError(
"type_enum is not a valid types_pb2.DataType: %s" % type_enum)
self._type_enum = type_enum
@property
def base_dtype(self):
return self
@property
def real_dtype(self):
base = self.base_dtype
if base == complex64:
return float32
elif base == complex128:
return float64
else:
return self
@property
def is_numpy_compatible(self):
return (self._type_enum != types_pb2.DT_RESOURCE and
self._type_enum != types_pb2.DT_RESOURCE_REF)
@property
def as_numpy_dtype(self):
return _TF_TO_NP[self._type_enum]
@property
def as_datatype_enum(self):
return self._type_enum
@property
def is_bool(self):
return self.base_dtype == bool
@property
def is_integer(self):
return (self.is_numpy_compatible and not self.is_quantized and
issubclass(self.as_numpy_dtype, np.integer))
@property
def is_floating(self):
return self.is_numpy_compatible and issubclass(self.as_numpy_dtype,
np.floating)
@property
def is_complex(self):
return self.base_dtype in (complex64, complex128)
@property
def is_quantized(self):
return self.base_dtype in [qint8, quint8, qint16, quint16, qint32, bfloat16]
@property
def is_unsigned(self):
try:
return self.min == 0
except TypeError:
return False
@property
def min(self):
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
raise TypeError("Cannot find minimum value of %s." % self)
try:
return np.finfo(self.as_numpy_dtype()).min
except:
try:
return np.iinfo(self.as_numpy_dtype()).min
except:
raise TypeError("Cannot find minimum value of %s." % self)
@property
def max(self):
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
raise TypeError("Cannot find maximum value of %s." % self)
try:
return np.finfo(self.as_numpy_dtype()).max
except:
try:
return np.iinfo(self.as_numpy_dtype()).max
except:
raise TypeError("Cannot find maximum value of %s." % self)
@property
def limits(self, clip_negative=True):
min, max = dtype_range[self.as_numpy_dtype]
if clip_negative:
min = 0
return min, max
def is_compatible_with(self, other):
other = as_dtype(other)
return self._type_enum in (
other.as_datatype_enum, other.base_dtype.as_datatype_enum)
def __eq__(self, other):
if other is None:
return False
try:
dtype = as_dtype(other).as_datatype_enum
return self._type_enum == dtype
except TypeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def name(self):
"""Returns the string name for this `DType`."""
return _TYPE_TO_STRING[self._type_enum]
def __int__(self):
return self._type_enum
def __str__(self):
return "<dtype: %r>" % self.name
def __hash__(self):
return self._type_enum
# Define data type range of numpy dtype
dtype_range = {np.bool_: (False, True),
np.bool8: (False, True),
np.uint8: (0, 255),
np.uint16: (0, 65535),
np.int8: (-128, 127),
np.int16: (-32768, 32767),
np.int64: (-2 ** 63, 2 ** 63 - 1),
np.uint64: (0, 2 ** 64 - 1),
np.int32: (-2 ** 31, 2 ** 31 - 1),
np.uint32: (0, 2 ** 32 - 1),
np.float32: (-1, 1),
np.float64: (-1, 1)}
# Define standard wrappers for the types_pb2.DataType enum.
resource = DType(types_pb2.DT_RESOURCE)
float16 = DType(types_pb2.DT_HALF)
half = float16
float32 = DType(types_pb2.DT_FLOAT)
float64 = DType(types_pb2.DT_DOUBLE)
double = float64
int32 = DType(types_pb2.DT_INT32)
uint8 = DType(types_pb2.DT_UINT8)
uint16 = DType(types_pb2.DT_UINT16)
int16 = DType(types_pb2.DT_INT16)
int8 = DType(types_pb2.DT_INT8)
string = DType(types_pb2.DT_STRING)
complex64 = DType(types_pb2.DT_COMPLEX64)
complex128 = DType(types_pb2.DT_COMPLEX128)
int64 = DType(types_pb2.DT_INT64)
bool = DType(types_pb2.DT_BOOL)
qint8 = DType(types_pb2.DT_QINT8)
quint8 = DType(types_pb2.DT_QUINT8)
qint16 = DType(types_pb2.DT_QINT16)
quint16 = DType(types_pb2.DT_QUINT16)
qint32 = DType(types_pb2.DT_QINT32)
bfloat16 = DType(types_pb2.DT_BFLOAT16)
# Standard mappings between types_pb2.DataType values and string names.
_TYPE_TO_STRING = {
types_pb2.DT_HALF: "float16",
types_pb2.DT_FLOAT: "float32",
types_pb2.DT_DOUBLE: "float64",
types_pb2.DT_INT32: "int32",
types_pb2.DT_UINT8: "uint8",
types_pb2.DT_UINT16: "uint16",
types_pb2.DT_INT16: "int16",
types_pb2.DT_INT8: "int8",
types_pb2.DT_STRING: "string",
types_pb2.DT_COMPLEX64: "complex64",
types_pb2.DT_COMPLEX128: "complex128",
types_pb2.DT_INT64: "int64",
types_pb2.DT_BOOL: "bool",
types_pb2.DT_QINT8: "qint8",
types_pb2.DT_QUINT8: "quint8",
types_pb2.DT_QINT16: "qint16",
types_pb2.DT_QUINT16: "quint16",
types_pb2.DT_QINT32: "qint32",
types_pb2.DT_BFLOAT16: "bfloat16",
types_pb2.DT_RESOURCE: "resource",
types_pb2.DT_HALF_REF: "float16_ref",
types_pb2.DT_FLOAT_REF: "float32_ref",
types_pb2.DT_DOUBLE_REF: "float64_ref",
types_pb2.DT_INT32_REF: "int32_ref",
types_pb2.DT_UINT8_REF: "uint8_ref",
types_pb2.DT_UINT16_REF: "uint16_ref",
types_pb2.DT_INT16_REF: "int16_ref",
types_pb2.DT_INT8_REF: "int8_ref",
types_pb2.DT_STRING_REF: "string_ref",
types_pb2.DT_COMPLEX64_REF: "complex64_ref",
types_pb2.DT_COMPLEX128_REF: "complex128_ref",
types_pb2.DT_INT64_REF: "int64_ref",
types_pb2.DT_BOOL_REF: "bool_ref",
types_pb2.DT_QINT8_REF: "qint8_ref",
types_pb2.DT_QUINT8_REF: "quint8_ref",
types_pb2.DT_QINT16_REF: "qint16_ref",
types_pb2.DT_QUINT16_REF: "quint16_ref",
types_pb2.DT_QINT32_REF: "qint32_ref",
types_pb2.DT_BFLOAT16_REF: "bfloat16_ref",
types_pb2.DT_RESOURCE_REF: "resource_ref",
}
# Standard mappings between types_pb2.DataType values and numpy.dtypes.
# Numpy representation for quantized dtypes.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
_NP_TO_TF = frozenset([
(np.float16, float16),
(np.float32, float32),
(np.float64, float64),
(np.int32, int32),
(np.int64, int64),
(np.uint8, uint8),
(np.uint16, uint16),
(np.int16, int16),
(np.int8, int8),
(np.complex64, complex64),
(np.complex128, complex128),
(np.object, string),
(np.bool, bool),
(_np_qint8, qint8),
(_np_quint8, quint8),
(_np_qint16, qint16),
(_np_quint16, quint16),
(_np_qint32, qint32),
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
])
_TF_TO_NP = {
types_pb2.DT_HALF: np.float16,
types_pb2.DT_FLOAT: np.float32,
types_pb2.DT_DOUBLE: np.float64,
types_pb2.DT_INT32: np.int32,
types_pb2.DT_UINT8: np.uint8,
types_pb2.DT_UINT16: np.uint16,
types_pb2.DT_INT16: np.int16,
types_pb2.DT_INT8: np.int8,
# NOTE(touts): For strings we use np.object as it supports variable length
# strings.
types_pb2.DT_STRING: np.object,
types_pb2.DT_COMPLEX64: np.complex64,
types_pb2.DT_COMPLEX128: np.complex128,
types_pb2.DT_INT64: np.int64,
types_pb2.DT_BOOL: np.bool,
types_pb2.DT_QINT8: _np_qint8,
types_pb2.DT_QUINT8: _np_quint8,
types_pb2.DT_QINT16: _np_qint16,
types_pb2.DT_QUINT16: _np_quint16,
types_pb2.DT_QINT32: _np_qint32,
types_pb2.DT_BFLOAT16: np.uint16,
}
def as_dtype(type_value):
if isinstance(type_value, DType):
return type_value
if isinstance(type_value, np.dtype):
if type_value.type == np.string_ or type_value.type == np.unicode_:
return string
for key, val in _NP_TO_TF:
try:
if key == type_value:
return val
except TypeError as e:
raise TypeError("Cannot convert {} to a dtype. {}".format(type_value, e))
raise TypeError("Cannot convert value %r to a TensorFlow DType." % type_value)
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from .ops import Graph
# Utilities used when building a Graph.
from dragon.vm.tensorflow.framework.ops import device
from dragon.vm.tensorflow.framework.ops import name_scope
from dragon.vm.tensorflow.framework.ops import get_default_graph
from dragon.vm.tensorflow.framework.ops import add_to_collection
from dragon.vm.tensorflow.framework.ops import get_collection
from dragon.vm.tensorflow.framework.ops import convert_to_tensor
from dragon.vm.tensorflow.framework.ops import GraphKeys
from dragon.vm.tensorflow.framework.constant_op import *
from dragon.vm.tensorflow.framework.dtypes import *
from dragon.vm.tensorflow.framework.tensor_shape import Dimension
from dragon.vm.tensorflow.framework.tensor_shape import TensorShape
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import re
from dragon.core.tensor import Tensor
from dragon.core.scope import TensorScope, DeviceScope
from dragon.vm.tensorflow.framework import dtypes
_default_graph = None
def convert_to_tensor(value, dtype=None, name=None, **kwargs):
"""Converts the given value to a Tensor.
Parameters
----------
value : basic type, list or numpy.ndarray
The value to convert.
dtype : Dtype or None
The data type. If ``None``, inferred from the type of `value`.
name : str or None
The Optional name.
Returns
-------
Tensor
The output tensor.
"""
if dtype is not None:
if not isinstance(dtype, str):
if isinstance(dtype, dtypes.DType):
dtype = dtype.name
else:
raise ValueError('The dtype should be a str of a tf.Dtype.')
tensor = Tensor(name=name, dtype=dtype)
tensor.set_value(value)
return tensor
class Graph(object):
"""
A virtual graph.
"""
def __init__(self):
self._collections = {}
def get_collection_ref(self, name):
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
coll_list = self._collections.get(name, None)
if coll_list is None:
return []
if scope is None:
return list(coll_list)
else:
filter_coll_list = []
regex = re.compile(scope)
for item in coll_list:
if hasattr(item, "name") and regex.match(item.name):
filter_coll_list.append(item)
return filter_coll_list
def add_to_collection(self, name, value):
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
for name in names:
self.add_to_collection(name, value)
def device(self, device_name_or_function):
if not isinstance(device_name_or_function, str):
raise TypeError('The device function should be a str.')
device_and_id = device_name_or_function.split('/')[1]
device, id = device_and_id.split(':')
if device not in ['cpu', 'gpu']:
raise ValueError('The device should either be cpu or gpu.')
try:
id = int(id)
except Exception as e:
raise ValueError('The device id should be a integer.')
return DeviceScope(device, id=id, use_cudnn=True)
def device(device_name_or_function):
return get_default_graph().device(device_name_or_function)
def get_default_graph():
global _default_graph
if _default_graph is None:
_default_graph = Graph()
return _default_graph
class GraphKeys(object):
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
def get_collection_ref(key):
return get_default_graph().get_collection_ref(key)
def get_collection(key, scope=None):
return get_default_graph().get_collection(key, scope)
def add_to_collection(name, value):
get_default_graph().add_to_collection(name, value)
def add_to_collections(names, value):
get_default_graph().add_to_collections(names, value)
def name_scope(name, default_name=None, values=None):
n = default_name if name is None else name
n = '' if n is None else n
return TensorScope(prefix=n)
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import dragon.config as config
DEFAULT_GRAPH_SEED = 87654321
_MAXINT32 = 2**31 - 1
def _truncate_seed(seed):
return seed % _MAXINT32 # Truncate to fit into 32-bit integer
def get_seed(op_seed):
"""Return the global random seed.
Parameters
----------
op_seed : int
The optional seed to use.
Return
------
tuple
A tuple of two ints for using.
"""
graph_seed = config.GetRandomSeed()
if graph_seed is not None:
if op_seed is None:
# pylint: disable=protected-access
op_seed = graph_seed
seeds = _truncate_seed(graph_seed), _truncate_seed(op_seed)
else:
if op_seed is not None:
seeds = DEFAULT_GRAPH_SEED, _truncate_seed(op_seed)
else:
seeds = None, None
return seeds
def set_random_seed(seed):
"""Set the global random seed.
Parameters
----------
seed : int
The seed to use.
"""
config.SetRandomSeed(seed)
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.core.tensor import Tensor
class Dimension(object):
def __init__(self, value):
if value is None:
self._value = None
else:
self._value = int(value)
if self._value < 0:
raise ValueError("Dimension %d must be >= 0" % self._value)
@property
def value(self):
return self._value
def __repr__(self):
return "Dimension(%s)" % repr(self._value)
def __str__(self):
value = self._value
return "?" if value is None else str(value)
def __eq__(self, other):
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value == other.value
def as_dimension(value):
if isinstance(value, Dimension): return value
else: return Dimension(value)
class TensorShape(object):
def __init__(self, dims):
if dims is None:
self._dims = None
elif isinstance(dims, TensorShape):
self._dims = dims.dims
else:
try:
dims_iter = iter(dims)
except TypeError:
self._dims = [as_dimension(dims)]
else:
self._dims = [as_dimension(d) for d in dims_iter]
@property
def dims(self):
return self._dims
@property
def ndims(self):
if self._dims is None:
return None
else:
return len(self._dims)
def as_list(self):
if self._dims is None:
raise ValueError("as_list() is not defined on an unknown TensorShape.")
return [dim.value for dim in self._dims]
def __repr__(self):
return "TensorShape(%r)" % self._dims
def __str__(self):
if self.ndims is None:
return "<unknown>"
elif self.ndims == 1:
return "(%s,)" % self._dims[0]
else:
return "(%s)" % ", ".join(str(d) for d in self._dims)
def __getitem__(self, key):
if self._dims is not None:
if isinstance(key, slice):
return TensorShape(self._dims[key])
else:
return self._dims[key]
else:
return Dimension(None)
def get_shape(self):
"""
Construct the shape descriptor.
Returns
-------
TensorShape
The shape description.
"""
return TensorShape(self.shape)
Tensor.get_shape = get_shape
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import re
import collections
import weakref
from dragon.core.scope import get_tensor_scope
from dragon.vm.tensorflow.framework import ops
from dragon.vm.tensorflow.framework import dtypes
from dragon.vm.tensorflow.ops import var_scope as vs
from dragon.vm.tensorflow.ops import variables as tf_variables
from dragon.vm.tensorflow.util import nest
class Layer(object):
def __init__(self, trainable=True, name=None, dtype=dtypes.float32, **kwargs):
allowed_kwargs = {'_scope',
'_reuse'}
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError('Keyword argument not understood:', kwarg)
self.trainable = trainable
self.built = False
self._trainable_weights = []
self._non_trainable_weights = []
self._updates = []
self._losses = []
self._reuse = kwargs.get('_reuse')
self._graph = ops.get_default_graph()
self._per_input_losses = {}
self._per_input_updates = {}
self.dtype = dtypes.as_dtype(dtype)
self.input_spec = None
# Determine layer name
if name is None:
base_name = _to_snake_case(self.__class__.__name__)
self.name = _unique_layer_name(base_name)
else:
base_name = name
self.name = name
self._base_name = base_name
# Determine variable scope.
self._scope = None
def build(self, _):
self.built = True
def call(self, inputs, *args, **kwargs):
raise NotImplementedError
@property
def updates(self):
return self._updates
def __call__(self, inputs, *args, **kwargs):
with vs.variable_scope(self._scope,
reuse=self.built or self._reuse) as scope:
with ops.name_scope(scope.original_name_scope):
if not self.built:
input_shapes = [x.get_shape()
for x in nest.flatten(inputs)]
if len(input_shapes) == 1:
self.build(input_shapes[0])
else:
self.build(input_shapes)
outputs = self.call(inputs, *args, **kwargs)
# # Apply activity regularization.
# # Note that it should be applied every time the layer creates a new
# # output, since it is output-specific.
# if hasattr(self, 'activity_regularizer') and self.activity_regularizer:
# output_list = _to_list(outputs)
# for output in output_list:
# with ops.name_scope('ActivityRegularizer'):
# activity_regularization = self.activity_regularizer(output)
# self.add_loss(activity_regularization)
# _add_elements_to_collection(
# activity_regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
# Update global default collections.
_add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
self.built = True
return outputs
def add_variable(self, name, shape, dtype=None, trainable=True,
initializer=None, regularizer=None):
if dtype is None:
dtype = self.dtype
existing_variables = set(tf_variables.global_variables())
with vs.variable_scope(self._scope,
reuse=self.built or self._reuse) as scope:
with ops.name_scope(scope.original_name_scope):
full_name = get_tensor_scope() + name
variable = vs.get_variable(name,
shape=shape,
initializer=initializer,
dtype=dtypes.as_dtype(dtype),
trainable=trainable and self.trainable)
if variable in existing_variables:
# Work only if the layer is built
return variable
if regularizer:
raise NotImplementedError()
if trainable:
self._trainable_weights.append(variable)
else:
self._non_trainable_weights.append(variable)
return variable
def apply(self, inputs, *args, **kwargs):
return self.__call__(inputs, *args, **kwargs)
class InputSpec(object):
def __init__(self, dtype=None, shape=None,
ndim=None, max_ndim=None, min_ndim=None, axes=None):
self.dtype = dtype
self.shape = shape
if shape is not None:
self.ndim = len(shape)
else:
self.ndim = ndim
self.max_ndim = max_ndim
self.min_ndim = min_ndim
self.axes = axes or {}
def _to_snake_case(name):
intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
if insecure[0] != '_': return insecure
return 'private' + insecure
PER_GRAPH_LAYER_NAME_UIDS = weakref.WeakKeyDictionary()
def _unique_layer_name(name):
graph = ops.get_default_graph()
if graph not in PER_GRAPH_LAYER_NAME_UIDS:
PER_GRAPH_LAYER_NAME_UIDS[graph] = collections.defaultdict(int)
layer_name_uids = PER_GRAPH_LAYER_NAME_UIDS[graph]
layer_name_uids[name] += 1
def _to_list(x):
if isinstance(x, (list, tuple)):
return list(x)
return [x]
def _add_elements_to_collection(elements, collection_list):
elements = _to_list(elements)
collection_list = _to_list(collection_list)
for name in collection_list:
collection = ops.get_collection_ref(name)
collection_set = set(collection)
for element in elements:
if element not in collection_set:
collection.append(element)
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.vm.tensorflow.framework import tensor_shape
from dragon.vm.tensorflow.layers import base, utils
from dragon.vm.tensorflow.ops import init_ops
from dragon.vm.tensorflow.ops import nn
class _Conv(base.Layer):
def __init__(self, rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(_Conv, self).__init__(trainable=trainable, name=name, **kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = utils.normalize_tuple(strides, rank, 'strides')
self.padding = utils.normalize_padding(padding)
self.data_format = utils.normalize_data_format(data_format)
self.dilation_rate = utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.activity_regularizer = activity_regularizer
self.input_spec = base.InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis].value
if self.data_format == 'channels_first':
# For channels first: (n_out, n_in, k_h, k_w)
kernel_shape = (self.filters, input_dim) + self.kernel_size
else:
# For channels last: (k_h, k_w, n_in, n_out)
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_variable(name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_variable(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = base.InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
tf_data_format = \
utils.convert_data_format(self.data_format, self.rank + 2)
outputs = nn.convolution(
input=inputs,
filter=self.kernel,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=tf_data_format)
if self.bias is not None:
outputs = nn.bias_add(outputs, self.bias, data_format=tf_data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
class Conv2D(_Conv):
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(Conv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name, **kwargs)
def conv2d(inputs,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
reuse=None):
layer = Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.vm.tensorflow.framework import tensor_shape
from dragon.vm.tensorflow.layers import base, utils
from dragon.vm.tensorflow.ops import init_ops
from dragon.vm.tensorflow.ops import nn, standard_ops
class Dense(base.Layer):
def __init__(self, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(Dense, self).__init__(trainable=trainable, name=name, **kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.activity_regularizer = activity_regularizer
self.input_spec = base.InputSpec(min_ndim=2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(min_ndim=2,
axes={-1: input_shape[-1].value})
self.kernel = self.add_variable('kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_variable('bias',
shape=[self.units, ],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
if len(output_shape) > 2:
raise NotImplementedError()
else:
outputs = standard_ops.matmul(inputs, self.kernel)
if self.use_bias:
outputs = outputs + self.bias
if self.activation is not None:
return self.activation(outputs)
return outputs
def dense(inputs, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
reuse=None):
layer = Dense(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.vm.tensorflow.layers.convolutional import conv2d
from dragon.vm.tensorflow.layers.core import dense
from dragon.vm.tensorflow.layers.normalization import \
batch_normalization, batch_norm, BatchNorm
from dragon.vm.tensorflow.layers.pooling import \
average_pooling2d, max_pooling2d
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import dragon.ops as ops
from dragon.vm.tensorflow.framework import tensor_shape
from dragon.vm.tensorflow.layers import base
from dragon.vm.tensorflow.ops import init_ops
class BatchNormalization(base.Layer):
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=False,
trainable=True,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(trainable=trainable, name=name, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
self.renorm = renorm
self.fused = fused
self.trainable = trainable
if fused:
if not center or not scale:
raise ValueError('fused norm requires both center and scale set to be True.')
if renorm:
raise ValueError('renorm is currently not supported.')
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndim = input_shape.ndims
if self.fused and ndim != 4:
raise ValueError(
'Only 4D inputs are currently supported with fused batch norm. '
'Consider reshaping the input to 4D and reshape the output back '
'to its original shape. Got input rank: ', ndim)
if self.axis < 0:
axis = ndim + self.axis
else:
axis = self.axis
if axis < 0 or axis >= ndim:
raise ValueError('Value of `axis` argument ' + str(self.axis) +
' is out of range for input with rank ' + str(ndim))
if axis + 1 == ndim:
self._data_format = 'NHWC'
elif axis == 1:
self._data_format = 'NCHW'
else:
raise ValueError(
'Only axis 1 or last axis are currently supported dimensions for '
'batch norm. Got `axis` dimension: ', axis)
param_dim = input_shape[axis]
if not param_dim.value:
raise ValueError('Input has undefined `axis` dimension. Input shape: ', input_shape)
self.input_spec = base.InputSpec(ndim=ndim, axes={self.axis: param_dim.value})
if self.center:
self.beta = self.add_variable(name='beta',
shape=(param_dim.value,),
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
trainable=self.trainable)
else:
self.beta = None
if self.scale:
self.gamma = self.add_variable(name='gamma',
shape=(param_dim.value,),
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
trainable=True)
else:
self.gamma = None
self.moving_mean = self.add_variable(name='moving_mean',
shape=(param_dim.value,),
initializer=self.moving_mean_initializer,
trainable=False)
self.moving_variance = self.add_variable(name='moving_variance',
shape=(param_dim.value,),
initializer=self.moving_variance_initializer,
trainable=False)
if self.renorm: pass
self.built = True
def call(self, inputs, training=False):
use_stats = 0 if training else 1
if self.fused:
return ops.FusedBatchNorm([inputs, self.moving_mean,
self.moving_variance,
self.gamma,
self.beta],
axis=self.axis,
momentum=self.momentum,
eps=self.epsilon,
use_stats=use_stats,
mode='DEFAULT')
x_norm = ops.BatchNorm([inputs, self.moving_mean,
self.moving_variance],
axis=self.axis,
momentum=self.momentum,
eps=self.epsilon,
use_stats=use_stats,
mode='DEFAULT')
if self.gamma is not None:
# use scale
if self.beta is not None:
return ops.Scale([x_norm, self.gamma, self.beta], axis=self.axis, num_axes=1)
else:
return ops.Scale([x_norm, self.gamma], axis=self.axis, num_axes=1)
else:
# do not use scale
if self.beta is not None:
return ops.BiasAdd([x_norm, self.beta], data_format=self._data_format)
else:
return x_norm
def batch_normalization(inputs,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
training=False,
trainable=True,
name=None,
reuse=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=False):
layer = BatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
fused=fused,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs, training=training)
# Aliases
BatchNorm = BatchNormalization
batch_norm = batch_normalization
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.vm.tensorflow.ops import nn
from dragon.vm.tensorflow.layers import base, utils
class _Pooling2D(base.Layer):
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(_Pooling2D, self).__init__(name=name, **kwargs)
self.pool_function = pool_function
self.pool_size = utils.normalize_tuple(pool_size, 2, 'pool_size')
self.strides = utils.normalize_tuple(strides, 2, 'strides')
self.padding = utils.normalize_padding(padding)
self.data_format = utils.normalize_data_format(data_format)
self.input_spec = base.InputSpec(ndim=4)
def call(self, inputs):
if self.data_format == 'channels_last':
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
else:
pool_shape = (1, 1) + self.pool_size
strides = (1, 1) + self.strides
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper(),
data_format=utils.convert_data_format(self.data_format, 4))
return outputs
class AveragePooling2D(_Pooling2D):
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(AveragePooling2D, self).__init__(
nn.avg_pool,
pool_size=pool_size, strides=strides, padding=padding,
data_format=data_format, name=name, **kwargs)
def average_pooling2d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
layer = AveragePooling2D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
class MaxPooling2D(_Pooling2D):
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(MaxPooling2D, self).__init__(
nn.max_pool,
pool_size=pool_size, strides=strides, padding=padding,
data_format=data_format, name=name, **kwargs)
def max_pooling2d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
layer = MaxPooling2D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
def convert_data_format(data_format, ndim):
if data_format == 'channels_last':
if ndim in (3, 4, 5):
return 'NHWC'
else:
raise ValueError('Input rank not supported:', ndim)
elif data_format == 'channels_first':
if ndim in (3, 4, 5):
return 'NCHW'
else:
raise ValueError('Input rank not supported:', ndim)
else:
raise ValueError('Invalid data_format:', data_format)
def normalize_tuple(value, n, name):
if isinstance(value, int):
return (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
if len(value_tuple) != n:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
for single_value in value_tuple:
try:
int(single_value)
except ValueError:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value) + ' '
'including element ' + str(single_value) + ' of type' +
' ' + str(type(single_value)))
return value_tuple
def normalize_data_format(value):
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
def normalize_padding(value):
padding = value.lower()
if padding not in {'valid', 'same'}:
raise ValueError('The `padding` argument must be one of "valid", "same". '
'Received: ' + str(padding))
return padding
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = [
'expand_dims',
'shape',
'zeros',
'ones',
'placeholder',
'concat',
'transpose',
'tile',
'reshape'
]
import dragon.ops as ops
from dragon.core.tensor import Tensor
from dragon.vm.tensorflow.framework import dtypes
def expand_dims(input, axis=None, name=None, dim=None):
if dim is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'dim'.")
axis = dim
return ops.ExpandDims(input, axis=axis, name=name)
def shape(input, name=None, out_type=dtypes.float32):
return ops.Shape(input, name=None)
def zeros(shape, dtype=dtypes.float32, name=None):
return ops.Fill(shape, value=0.0, name=name)
def ones(shape, dtype=dtypes.float32, name=None):
return ops.Fill(shape, value=1.0, name=name)
def placeholder(dtype, shape=None, name=None):
# check data type
if dtype is not None:
if not isinstance(dtype, dtypes.DType):
raise TypeError('The dtype should be a valid tf data type.')
dtype = dtype.name
return Tensor(name=name, shape=shape, dtype=dtype).Placeholder()
def concat(values, axis, name=None):
return ops.Concat(values, axis=axis, name=name)
def transpose(a, perm=None, name=None):
return ops.Transpose(a, perm=perm, name=name)
def tile(input, multiples, name=None):
return ops.Tile(input, multiples=multiples, name=name)
def reshape(tensor, shape, name=None):
return ops.Reshape(tensor, shape=shape, name=None)
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = ['equal']
import dragon.ops as ops
def equal(a, b, name=None):
return ops.Equal([a, b])
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import numpy as np
__all__ = ['int32', 'int64', 'float32', 'bool']
int32 = np.int32
int64 = np.int64
float32 = np.float32
bool = np.bool
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.core.tensor import Tensor
import dragon.vm.theano.tensor as T
def gradients(ys, xs, **kwargs):
"""Compute the gradients for variables with respect to the cost.
Parameters
----------
ys : Tensor or list of Tensor
The tensor(s) to be differentiated.
xs : Tensor or list of Tensor
The tensor(s to be used for differentiation.
Returns
-------
Tensor or list of Tensor
The gradients of variables.
"""
if not isinstance(ys, list):
ys = [ys]
for y in ys:
dxs = T.grad(y, xs)
return dxs
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = [
'zeros_initializer',
'ones_initializer',
'constant_initializer',
'random_uniform_initializer',
'random_normal_initializer',
'truncated_normal_initializer',
'variance_scaling_initializer',
'glorot_uniform_initializer',
'glorot_normal_initializer',
]
import dragon.ops as ops
from dragon.vm.tensorflow.framework import dtypes
class Initializer(object):
"""
The basic Initializer.
"""
def __call__(self, shape, dtype=None, **kwargs):
raise NotImplementedError
class Zeros(Initializer):
"""The initializer that sets tensors to 0.
Parameters
----------
shape : list, tuple or Tensor
The shape of the initializer.
dtype : DType
The data type.
Returns
-------
Tensor
The initializer.
"""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtype
def __call__(self, shape, dtype=None, **kwargs):
if dtype is None: dtype = self.dtype
return ops.Fill(shape, value=0)
class Ones(Initializer):
"""The initializer that sets tensors to 1.
Parameters
----------
shape : list, tuple or Tensor
The shape of the initializer.
dtype : DType
The data type.
Returns
-------
Tensor
The initializer.
"""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtype
def __call__(self, shape, dtype=None, **kwargs):
if dtype is None: dtype = self.dtype
return ops.Fill(shape, value=1)
class Constant(Initializer):
def __init__(self, value=0, dtype=dtypes.float32):
self.value = value
self.dtype = dtype
def __call__(self, shape, dtype=None, **kwargs):
if dtype is None: dtype = self.dtype
return ops.Fill(shape, value=self.value)
class RandomUniform(Initializer):
def __init__(self, minval=0, maxval=1, dtype=dtypes.float32):
self.minval = minval
self.maxval = maxval
self.dtype = dtype
def __call__(self, shape, dtype=None, **kwargs):
if dtype is None: dtype = self.dtype
return ops.RandomUniform(shape, self.minval, self.maxval)
class RandomNormal(Initializer):
def __init__(self, mean=0.0, stddev=1.0, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
assert dtype == dtypes.float32
self.dtype = dtype
def __call__(self, shape, dtype=None, **kwargs):
if dtype is None: dtype = self.dtype
return ops.RandomNormal(shape, self.mean, self.stddev)
class TruncatedNormal(Initializer):
def __init__(self, mean=0.0, stddev=1.0, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
assert dtype == dtypes.float32
self.dtype = dtype
def __call__(self, shape, dtype=None, **kwargs):
if dtype is None: dtype = self.dtype
return ops.TruncatedNormal(shape, self.mean, self.stddev)
class VarianceScaling(Initializer):
def __init__(self, scale=1.0,
mode="fan_in",
distribution="normal",
dtype=dtypes.float32):
if scale <= 0.:
raise ValueError("`scale` must be positive float.")
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Invalid `mode` argument:", mode)
distribution = distribution.lower()
if distribution not in {"normal", "uniform"}:
raise ValueError("Invalid `distribution` argument:", distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.dtype = dtype
def __call__(self, shape, dtype=None, **kwargs):
if dtype is None: dtype = self.dtype
if self.distribution == "normal":
return ops.GlorotNormal(shape=shape, scale=self.scale, mode=self.mode)
else:
return ops.GlorotUniform(shape=shape, scale=self.scale, mode=self.mode)
zeros_initializer = Zeros
ones_initializer = Ones
constant_initializer = Constant
random_uniform_initializer = RandomUniform
random_normal_initializer = RandomNormal
truncated_normal_initializer = TruncatedNormal
variance_scaling_initializer = VarianceScaling
def glorot_uniform_initializer(dtype=dtypes.float32):
return variance_scaling_initializer(scale=6.0,
mode='fan_avg',
distribution='uniform',
dtype=dtype)
def glorot_normal_initializer(dtype=dtypes.float32):
return variance_scaling_initializer(scale=2.0,
mode='fan_avg',
distribution='normal',
dtype=dtype)
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = [
'argmax',
'matmul',
'add',
'subtract',
'multiply',
'divide',
'sub',
'mul',
'div',
'log',
'exp',
'square',
'sqrt',
'reduce_sum',
'reduce_mean',
'sigmoid',
'tanh',
'add_n'
]
from six.moves import range as xrange
import dragon.ops as ops
def argmax(input, axis=None, name=None, dimension=None):
if dimension is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'dimension'.")
axis = dimension
elif axis is None: axis = 0
return ops.Argmax(input, axis=axis, name=name)
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
name=None):
return ops.Matmul([a, b], TransA=transpose_a, TransB=transpose_b, name=name)
def add(x, y, name=None):
return ops.Add([x, y], name=None)
def subtract(x, y, name=None):
return ops.Sub([x, y], name=name)
def multiply(x, y, name=None):
return ops.Mul([x, y], name=name)
def divide(x, y, name=None):
return ops.Div([x, y], name=name)
def mul(x, y, name=None):
return multiply(x, y, name)
def sub(x, y, name=None):
return subtract(x, y, name)
def div(x, y, name=None):
return divide(x, y, name=name)
def log(x, name=None):
return ops.Log(x, name=name)
def exp(x, name=None):
return ops.Exp(x, name=name)
def square(x, name=None):
return ops.Square(x, name=name)
def sqrt(x, name=None):
return ops.Pow(x, power=0.5, name=name)
def pow(x, power, name=None):
return ops.Pow(x, power=power, name=name)
def reduce_sum(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
if reduction_indices is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'reduction_indices'.")
axis = reduction_indices
elif axis is None: axis = -1 # reduce all
if isinstance(axis, list) or isinstance(axis, tuple): # reduce continuously
if len(axis) < 1:
raise RuntimeError('reduce axes should at least have one.')
if len(axis) == 1:
return ops.Sum(input_tensor, axis=axis[0], keep_dims=keep_dims)
else:
ret = ops.Sum(input_tensor, axis=axis[0], keep_dims=True)
for i in xrange(1, len(axis) - 1):
ret = ops.Sum(ret, axis=axis[i], keep_dims=True)
return ops.Sum(ret, axis=axis[len(axis) - 1], keep_dims=keep_dims)
else:
return ops.Sum(input_tensor, axis=axis, keep_dims=keep_dims)
def reduce_mean(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
if reduction_indices is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'reduction_indices'.")
axis = reduction_indices
elif axis is None: axis = -1 # reduce all
if isinstance(axis, list) or isinstance(axis, tuple): # reduce continuously
if len(axis) < 1:
raise RuntimeError('reduce axes should at least have one.')
if len(axis) == 1:
return ops.Mean(input_tensor, axis=axis[0], keep_dims=keep_dims)
else:
ret = ops.Mean(input_tensor, axis=axis[0], keep_dims=True)
for i in xrange(1, len(axis) - 1):
ret = ops.Mean(ret, axis=axis[i], keep_dims=True)
return ops.Mean(ret, axis=axis[len(axis) - 1], keep_dims=keep_dims)
else:
return ops.Mean(input_tensor, axis=axis, keep_dims=keep_dims)
def sigmoid(x, name=None):
return ops.Sigmoid(x, name=name)
def tanh(x, name=None):
return ops.Tanh(x, name=name)
def add_n(inputs, name=None):
return ops.Eltwise(inputs, operation='SUM', name=name)
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.vm.tensorflow.ops.nn_ops import *
from dragon.vm.tensorflow.ops.nn_impl import *
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.core.tensor import Tensor
import dragon.ops as ops
def batch_normalization(x, mean, variance,
offset, scale,
decay=0.9, variance_epsilon=1e-3, name=None):
raise NotImplementedError('Deprecated. Use tf.layer.batch_normalization.')
def batch_norm_with_global_normalization(t, m, v,
beta, gamma,
decay=0.9, variance_epsilon=1e-3,
scale_after_normalization=True, name=None):
raise NotImplementedError('Deprecated. Use tf.layer.batch_normalization.')
def l2_normalize(x, dim, epsilon=1e-12, name=None):
return ops.L2Norm(inputs=x,
axis=dim,
num_axes=1,
eps=epsilon)
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.core.tensor import Tensor
import dragon.ops as ops
__all__ = [
'convolution',
'relu',
'softmax',
'conv2d',
'conv2d_transpose',
'avg_pool',
'max_pool',
'xw_plus_b',
'bias_add',
'dropout',
'sigmoid_cross_entropy_with_logits',
'softmax_cross_entropy_with_logits',
'sparse_softmax_cross_entropy_with_logits',
'l2_loss'
]
def convolution(input, filter, padding, strides=None,
dilation_rate=None, name=None, data_format=None):
num_total_dims = filter.get_shape().ndims
if num_total_dims is None:
num_total_dims = input.get_shape().ndims
if num_total_dims is None:
raise ValueError("rank of input or filter must be known.")
num_spatial_dims = num_total_dims - 2
# make default parameters
if data_format is None:
data_format = 'NHWC'
if strides is None:
strides = [1] * num_total_dims
else:
if len(strides) != num_total_dims:
_strides = [1] * num_total_dims
_n_provides = len(strides)
if data_format == 'NHWC':
_strides[1 : 1 + _n_provides] = strides
else:
_strides[2 : 2 + _n_provides] = strides
strides = _strides
if dilation_rate is not None:
if len(dilation_rate) != num_total_dims:
_dilation_rate = [1] * num_total_dims
_n_provides = len(dilation_rate)
if data_format == 'NHWC':
_dilation_rate[1 : 1 + _n_provides] = dilation_rate
else:
_dilation_rate[2 : 2 + _n_provides] = dilation_rate
dilation_rate = _dilation_rate
if num_spatial_dims == 2:
return conv2d(input, filter,
strides, padding, dilation_rate,
data_format, name)
else:
raise NotImplementedError('conv{}d is not implemented.'.format(num_spatial_dims))
def relu(features, name=None):
return ops.Relu(features, name=name)
def softmax(logits, dim=-1, name=None):
return ops.Softmax(logits, axis=dim)
def conv2d(input, filter, strides, padding, dilation_rate=None,
data_format='NHWC', name=None, **kwargs):
"""Compute 2D convolution according to the given 4D ``input`` and ``filter``.
For **NHWC** format, filter should be as ``[filter_height, filter_width, in_channels, out_channels]``.
For **NCHW** format, filter should be as ``[out_channels, in_channels, filter_height, filter_width]``.
Parameters
----------
input : Tensor
The input tensor.
filter : Tensor
The filter tensor.
strides : list of int
The strides with length 4.
padding : str
The padding algorithm. ``VALID`` or ``SAME``.
dilation_rate : list of int or None
The dilation rates with with length 4.
data_format : str
The data format. ``NHWC`` or ``NCHW``.
name : str
The optional name for this operator.
Returns
-------
Tensor
The output tensor.
"""
if filter.shape is None:
raise ValueError('filter must have a valid shape.')
else:
if len(filter.shape) != 4:
raise ValueError('filter must be a 4D Tensor.')
if len(strides) != 4:
raise ValueError('strides must be a list with length 4.')
if dilation_rate is not None:
if len(dilation_rate) != 4:
raise ValueError(' dilation_rate must be a list with length 4.')
if data_format == 'NHWC':
output = ops.Conv2d([input, filter],
num_output=filter.shape[3],
kernel_size=filter.shape[0:2],
stride=strides[1:3],
dilation=dilation_rate[1:3] if dilation_rate is not None else 1,
padding=padding,
data_format=data_format)
return output
elif data_format == 'NCHW':
output = ops.Conv2d([input, filter],
num_output=filter.shape[0],
kernel_size=filter.shape[2:4],
stride=strides[2:4],
dilation=dilation_rate[2:4] if dilation_rate is not None else 1,
padding=padding,
data_format=data_format)
return output
else:
raise ValueError('Unknown data format: {}'.format(data_format))
def conv2d_transpose(value, filter, output_shape, strides, padding='SAME',
data_format='NHWC', name=None):
"""Compute 2D deconvolution according to the given 4D ``input`` and ``filter``.
For **NHWC** format, filter should be as ``[filter_height, filter_width, out_channels, in_channels]``.
For **NCHW** format, filter should be as ``[in_channels, out_channels, filter_height, filter_width]``.
``output_shape`` will be ignored if padding algorithm is **VALID**.
Parameters
----------
input : Tensor
The input tensor.
filter : Tensor
The filter tensor.
output_shape : list of int
The deterministic output shape for **SAME** padding.
strides : list of int
The strides with length 4.
padding : str
The padding algorithm. ``VALID`` or ``SAME``.
data_format : str
The data format. ``NHWC`` or ``NCHW``.
name : str
The optional name for this operator.
Returns
-------
Tensor
The output tensor.
"""
if filter.shape is None:
raise ValueError('filter must have a valid shape.')
else:
if len(filter.shape) != 4:
raise ValueError('filter must be a 4D Tensor.')
if len(strides) != 4:
raise ValueError('strides must be a list with length 4.')
if not isinstance(output_shape, list):
raise TypeError('output_shape should be a list.')
if len(output_shape) != 4:
raise ValueError('output_shape should be a list with length 4.')
if data_format == 'NHWC':
output = ops.Conv2dTranspose([value, filter],
num_output=filter.shape[2],
kernel_size=filter.shape[0:2],
stride=strides[1:3],
padding=padding,
data_format=data_format,
output_shape=output_shape)
return output
elif data_format == 'NCHW':
output = ops.Conv2dTranspose([value, filter],
num_output=filter.shape[1],
kernel_size=filter.shape[2:4],
stride=strides[2:4],
padding=padding,
data_format=data_format,
output_shape=output_shape)
return output
else:
raise ValueError('Unknown data format: {}'.format(data_format))
def avg_pool(value, ksize, strides, padding, data_format='NHWC', name=None):
"""Perform avg pooling on spatial axes.
Parameters
----------
value : Tensor
The input tensor.
ksize : list of int
The kernel size with length >= 4.
strides : list of int
The strides with length >= 4.
padding : str
The padding algorithm. ``VALID`` or ``SAME``.
data_format : str
The data format. ``NHWC`` or ``NCHW``.
name : None or str
The optional name of op.
Returns
-------
Tensor
The output tensor.
"""
if len(ksize) < 4:
raise ValueError('ksize must be a list with length >=4.')
if len(strides) < 4:
raise ValueError('strides must be a list with length >=4.')
if len(ksize) != len(strides):
raise ValueError('ksize and strides should have the same length.')
if len(ksize) == 4:
if data_format == 'NHWC':
if ksize[0] != 1 or ksize[3] != 1 or strides[0] != 1 or strides[3] != 1:
raise ValueError('The pooling can only be performed on spatial axes.')
return ops.Pool2d(value, [ksize[1], ksize[2]], [strides[1], strides[2]],
padding=padding, data_format=data_format, mode='AVG')
if data_format == 'NCHW':
if ksize[0] != 1 or ksize[1] != 1 or strides[0] != 1 or strides[1] != 1:
raise ValueError('The pooling can only be performed on spatial axes.')
return ops.Pool2d(value, [ksize[2], ksize[3]], [strides[2], strides[3]],
padding=padding, data_format=data_format, mode='AVG')
else:
raise NotImplementedError('Pool{}d has not been implemented yet.'.format(len(ksize) - 2))
def max_pool(value, ksize, strides, padding, data_format='NHWC', name=None):
"""Perform max pooling on spatial axes.
Parameters
----------
value : Tensor
The input tensor.
ksize : list of int
The kernel size with length >= 4.
strides : list of int
The strides with length >= 4.
padding : str
The padding algorithm. ``VALID`` or ``SAME``.
data_format : str
The data format. ``NHWC`` or ``NCHW``.
name : None or str
The optional name of op.
Returns
-------
Tensor
The output tensor.
"""
if len(ksize) < 4:
raise ValueError('ksize must be a list with length >=4.')
if len(strides) < 4:
raise ValueError('strides must be a list with length >=4.')
if len(ksize) != len(strides):
raise ValueError('ksize and strides should have the same length.')
if len(ksize) == 4:
if data_format == 'NHWC':
if ksize[0] != 1 or ksize[3] != 1 or strides[0] != 1 or strides[3] != 1:
raise ValueError('The pooling can only be performed on spatial axes.')
return ops.Pool2d(value, [ksize[1], ksize[2]], [strides[1], strides[2]],
padding=padding, data_format=data_format, mode='MAX')
if data_format == 'NCHW':
if ksize[0] != 1 or ksize[1] != 1 or strides[0] != 1 or strides[1] != 1:
raise ValueError('The pooling can only be performed on spatial axes.')
return ops.Pool2d(value, [ksize[2], ksize[3]], [strides[2], strides[3]],
padding=padding, data_format=data_format, mode='MAX')
else:
raise NotImplementedError('Pool{}d has not been implemented yet.'.format(len(ksize) - 2))
def xw_plus_b(x, weights, biases, name=None):
if weights.shape is None:
raise ValueError('weights must have a valid shape.')
else:
if len(weights.shape) != 2:
raise ValueError('weights must be a 2D Tensor')
if biases.shape is None:
raise ValueError('biases must a have a valid shape.')
else:
if len(biases.shape) != 1:
raise ValueError('biases must be a 1D Tensor')
if weights.shape[1] != biases.shape[0]:
raise ValueError('the shape of weights and biaes are incompatible.')
return ops.InnerProduct([x, weights, biases], num_output=weights.shape[1], TransW=False)
def bias_add(value, bias, data_format='NHWC', name=None):
return ops.BiasAdd([value, bias], data_format=data_format)
def sigmoid_cross_entropy_with_logits(logits, targets, name=None):
return ops.SigmoidCrossEntropy([logits, targets], normalization='UNIT', name=None)
def softmax_cross_entropy_with_logits(_sentinel=None,
labels=None, logits=None,
dim=-1, name=None):
if _sentinel is not None:
raise ValueError('Only call `softmax_cross_entropy_with_logits` '
'with named arguments (labels=..., logits=..., ...)')
if dim == -1: dim = 1
return ops.SoftmaxCrossEntropy([logits, labels], axis=dim, normalization='UNIT', name=name)
def sparse_softmax_cross_entropy_with_logits(logits, labels, dim=-1, name=None):
if dim == -1: dim = 1
return ops.SparseSoftmaxCrossEntropy([logits, labels], axis=dim, normalization='UNIT', name=name)
def l2_loss(t, name=None):
return (ops.Reduce(ops.Square(t), operation='SUM') * 0.5)
def dropout(x, keep_prob, name=None):
return ops.Dropout(x, 1 - keep_prob)
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = [
'random_normal',
'truncated_normal',
'random_uniform'
]
import dragon.ops as ops
from dragon.vm.tensorflow.framework import dtypes
def random_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
return ops.RandomNormal(shape, mean, stddev)
def truncated_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
return ops.TruncatedNormal(shape, mean, stddev)
def random_uniform(shape,
minval=0,
maxval=None,
dtype=dtypes.float32,
seed=None,
name=None):
return ops.RandomUniform(shape, minval, maxval)
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.vm.tensorflow.ops.variables import *
from dragon.vm.tensorflow.ops.var_scope import *
# Ops
from dragon.vm.tensorflow.ops.init_ops import *
from dragon.vm.tensorflow.ops.random_ops import *
from dragon.vm.tensorflow.ops.math_ops import *
from dragon.vm.tensorflow.ops.array_ops import *
from dragon.vm.tensorflow.ops.control_flow_ops import *
from dragon.vm.tensorflow.ops.nn_ops import *
from dragon.vm.tensorflow.ops.gradients_impl import gradients
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.vm.tensorflow.framework import dtypes
from dragon.vm.tensorflow.ops.variables import Variable
from dragon.vm.tensorflow.ops import init_ops
_VARSCOPE = None
_VARSTORE = {}
class VariableScope(object):
"""
Construct a Variable.
"""
def __init__(self, reuse, name='', name_scope='', **kwargs):
self._name = name
self._reuse = reuse
self._name_scope = name_scope
if self._name_scope is None:
self._name_scope = ''
self._old_varscope = None
@property
def reuse(self):
return self._reuse
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
def get_variable(self, name, shape=None, dtype=None, initializer=None,
trainable=True, collections=None, validate_shape=True, **kwargs):
global _VARSTORE
# get full name
from dragon.core.scope import get_tensor_scope
full_name = get_tensor_scope() + name
# create a new variable
if not full_name in _VARSTORE:
if shape is None:
raise ValueError('Must specific a shape for the Variable({}).'.format(full_name))
if initializer is None:
initializer = self._get_default_initializer(name, shape=shape, dtype=dtype)
initial_value = initializer(shape, dtype=dtype)
new_var = Variable(initial_value, trainable=trainable, collections=collections,
validate_shape=validate_shape, name=name, dtype=dtype)
_VARSTORE[full_name] = new_var
return new_var
else:
# existing ?
if self._reuse:
return _VARSTORE[full_name]
raise ValueError('The Variable({}) already exists.'.format(full_name))
def __enter__(self):
global _VARSCOPE
self._old_varscope = _VARSCOPE
_VARSCOPE = self
from dragon.core.scope import get_tensor_scope, set_tensor_scope
prefix = self._name_scope + '/' if self._name_scope != '' else ''
set_tensor_scope(get_tensor_scope() + prefix)
return self
def __exit__(self, type, value, traceback):
global _VARSCOPE
_VARSCOPE = self._old_varscope
from dragon.core.scope import get_tensor_scope, set_tensor_scope
prefix = self._name_scope + '/' if self._name_scope != '' else ''
assert get_tensor_scope().endswith(prefix)
if self._name_scope != '':
set_tensor_scope(get_tensor_scope()[:-len(prefix)])
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
if dtype is None: dtype = dtypes.float32
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = init_ops.zeros_initializer()(shape=shape, dtype=dtype.base_dtype)
else:
raise ValueError('An initializer for Variable({}) of %s is required.'.
format(name, dtype.base_dtype))
return initializer
def get_variable_scope():
global _VARSCOPE
if _VARSCOPE is None:
_VARSCOPE = VariableScope(False)
return _VARSCOPE
def variable_scope(name_scope, reuse=None, **kwargs):
return VariableScope(reuse, name_scope=name_scope)
def get_variable(name, shape=None, dtype=None, initializer=None,
trainable=True, collections=None, validate_shape=True, **kwargs):
return get_variable_scope().get_variable(name, shape=shape, dtype=dtype,
initializer=initializer,trainable=trainable,
collections=collections, validate_shape=validate_shape)
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import copy
from dragon.core.tensor import Tensor
import dragon.vm.theano as theano
from dragon.vm.tensorflow.framework import ops
from dragon.vm.tensorflow.framework import dtypes
from dragon.vm.tensorflow.util.deprecation import deprecated
class Variable(Tensor):
"""
Construct a Variable.
"""
def __init__(self, initial_value=None, trainable=True,
collections=None, validate_shape=True,
name=None, dtype=None, **kwargs):
super(Variable, self).__init__()
if initial_value is None:
raise ValueError('initial_value must be specified.')
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError('collections argument to Variable constructor must be a list, tuple, '
'or set. Got the type {}'.format(type(collections)))
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
# initialization
if isinstance(initial_value, Tensor):
self.clone(initial_value)
if name is not None:
self.name = name
self.expressions.values()[0].output[0] = self.name
else:
# from ..ops.constant_op import constant
# initial_value = constant(initial_value, name=name)
# self.clone(initial_value)
pass
# check data type
if dtype is not None:
if not isinstance(dtype, dtypes.DType):
raise TypeError('The dtype should be a valid tf data type.')
self.dtype = dtype.name
# registration
self.Variable()
if validate_shape:
initial_value_shape = self.shape
if initial_value_shape is None:
raise ValueError('initial_value must have a shape specified.')
ops.add_to_collections(collections, copy.deepcopy(self))
self.expressions = {}
def global_variables():
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables():
return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def model_variables():
return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES)
def trainable_variables():
return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
class VariablesInitializer(object):
def __init__(self, var_list):
self.var_list = var_list
def run(self):
if not hasattr(self, '_init_func'):
self._init_func = theano.function(outputs=self.var_list)
self._init_func()
def variables_initializer(var_list, name="init"):
return VariablesInitializer(var_list)
def global_variables_initializer():
return variables_initializer(global_variables())
@deprecated("2017-03-02", "Use `tf.global_variables_initializer` instead.")
def initialize_all_variables():
"""
See ``tf.global_variables_initializer``.
"""
return global_variables_initializer()
syntax = "proto2";
package tensorflow;
message GPUOptions {
// A value between 0 and 1 that indicates what fraction of the
// available GPU memory to pre-allocate for each process. 1 means
// to pre-allocate all of the GPU memory, 0.5 means the process
// allocates ~50% of the available GPU memory.
optional double per_process_gpu_memory_fraction = 1;
// The type of GPU allocation strategy to use.
//
// Allowed values:
// "": The empty string (default) uses a system-chosen default
// which may change over time.
//
// "BFC": A "Best-fit with coalescing" algorithm, simplified from a
// version of dlmalloc.
optional string allocator_type = 2;
// Delay deletion of up to this many bytes to reduce the number of
// interactions with gpu driver code. If 0, the system chooses
// a reasonable default (several MBs).
optional int64 deferred_deletion_bytes = 3;
// If true, the allocator does not pre-allocate the entire specified
// GPU memory region, instead starting small and growing as needed.
optional bool allow_growth = 4;
// A comma-separated list of GPU ids that determines the 'visible'
// to 'virtual' mapping of GPU devices. For example, if TensorFlow
// can see 8 GPU devices in the process, and one wanted to map
// visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1", then one
// would specify this field as "5,3". This field is similar in
// spirit to the CUDA_VISIBLE_DEVICES environment variable, except
// it applies to the visible GPU devices in the process.
//
// NOTE: The GPU driver provides the process with the visible GPUs
// in an order which is not guaranteed to have any correlation to
// the *physical* GPU id in the machine. This field is used for
// remapping "visible" to "virtual", which means this operates only
// after the process starts. Users are required to use vendor
// specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
// physical to visible device mapping prior to invoking TensorFlow.
optional string visible_device_list = 5;
// In the event polling loop sleep this many microseconds between
// PollEvents calls, when the queue is not empty. If value is not
// set or set to 0, gets set to a non-zero default.
optional int32 polling_active_delay_usecs = 6;
// In the event polling loop sleep this many millisconds between
// PollEvents calls, when the queue is empty. If value is not
// set or set to 0, gets set to a non-zero default.
optional int32 polling_inactive_delay_msecs = 7;
// Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
// enabling this option forces all CPU tensors to be allocated with Cuda
// pinned memory. Normally, TensorFlow will infer which tensors should be
// allocated as the pinned memory. But in case where the inference is
// incomplete, this option can significantly speed up the cross-device memory
// copy performance as long as it fits the memory.
// Note that this option is not something that should be
// enabled by default for unknown or very large models, since all Cuda pinned
// memory is unpageable, having too much pinned memory might negatively impact
// the overall host system performance.
optional bool force_gpu_compatible = 8;
}
message GraphOptions {
// If true, use control flow to schedule the activation of Recv nodes.
// (Currently ignored.)
optional bool enable_recv_scheduling = 2;
// Options controlling how graph is optimized.
// OptimizerOptions optimizer_options = 3;
// The number of steps to run before returning a cost model detailing
// the memory usage and performance of each node of the graph. 0 means
// no cost model.
optional int64 build_cost_model = 4;
// The number of steps to skip before collecting statistics for the
// cost model.
optional int64 build_cost_model_after = 9;
// Annotate each Node with Op output shape data, to the extent it can
// be statically inferred.
optional bool infer_shapes = 5;
// Only place the subgraphs that are run, rather than the entire graph.
//
// This is useful for interactive graph building, where one might
// produce graphs that cannot be placed during the debugging
// process. In particular, it allows the client to continue work in
// a session after adding a node to a graph whose placement
// constraints are unsatisfiable.
optional bool place_pruned_graph = 6;
// If true, transfer float values between processes as bfloat16.
optional bool enable_bfloat16_sendrecv = 7;
// If > 0, record a timeline every this many steps.
// EXPERIMENTAL: This currently has no effect in MasterSession.
optional int32 timeline_step = 8;
// Options that control the type and amount of graph rewriting.
// Not currently configurable via the public Python API (i.e. there is no API
// stability guarantee if you import RewriterConfig explicitly).
// RewriterConfig rewrite_options = 10;
}
message ConfigProto {
// Map from device type name (e.g., "CPU" or "GPU" ) to maximum
// number of devices of that type to use. If a particular device
// type is not found in the map, the system picks an appropriate
// number.
// map<string, int32> device_count = 1;
// The execution of an individual op (for some op types) can be
// parallelized on a pool of intra_op_parallelism_threads.
// 0 means the system picks an appropriate number.
optional int32 intra_op_parallelism_threads = 2;
// Nodes that perform blocking operations are enqueued on a pool of
// inter_op_parallelism_threads available in each process.
//
// 0 means the system picks an appropriate number.
//
// Note that the first Session created in the process sets the
// number of threads for all future sessions unless use_per_session_threads is
// true or session_inter_op_thread_pool is configured.
optional int32 inter_op_parallelism_threads = 5;
// If true, use a new set of threads for this session rather than the global
// pool of threads. Only supported by direct sessions.
//
// If false, use the global threads created by the first session, or the
// per-session thread pools configured by session_inter_op_thread_pool.
//
// This option is deprecated. The same effect can be achieved by setting
// session_inter_op_thread_pool to have one element, whose num_threads equals
// inter_op_parallelism_threads.
optional bool use_per_session_threads = 9;
// This option is experimental - it may be replaced with a different mechanism
// in the future.
//
// Configures session thread pools. If this is configured, then RunOptions for
// a Run call can select the thread pool to use.
//
// The intended use is for when some session invocations need to run in a
// background pool limited to a small number of threads:
// - For example, a session may be configured to have one large pool (for
// regular compute) and one small pool (for periodic, low priority work);
// using the small pool is currently the mechanism for limiting the inter-op
// parallelism of the low priority work. Note that it does not limit the
// parallelism of work spawned by a single op kernel implementation.
// - Using this setting is normally not needed in training, but may help some
// serving use cases.
// - It is also generally recommended to set the global_name field of this
// proto, to avoid creating multiple large pools. It is typically better to
// run the non-low-priority work, even across sessions, in a single large
// pool.
// repeated ThreadPoolOptionProto session_inter_op_thread_pool = 12;
// Assignment of Nodes to Devices is recomputed every placement_period
// steps until the system warms up (at which point the recomputation
// typically slows down automatically).
optional int32 placement_period = 3;
// When any filters are present sessions will ignore all devices which do not
// match the filters. Each filter can be partially specified, e.g. "/job:ps"
// "/job:worker/replica:3", etc.
repeated string device_filters = 4;
// Options that apply to all GPUs.
optional GPUOptions gpu_options = 6;
// Whether soft placement is allowed. If allow_soft_placement is true,
// an op will be placed on CPU if
// 1. there's no GPU implementation for the OP
// or
// 2. no GPU devices are known or registered
// or
// 3. need to co-locate with reftype input(s) which are from CPU.
optional bool allow_soft_placement = 7;
// Whether device placements should be logged.
optional bool log_device_placement = 8;
// Options that apply to all graphs.
optional GraphOptions graph_options = 10;
// Global timeout for all blocking operations in this session. If non-zero,
// and not overridden on a per-operation basis, this value will be used as the
// deadline for all blocking operations.
optional int64 operation_timeout_in_ms = 11;
// Options that apply when this session uses the distributed runtime.
// RPCOptions rpc_options = 13;
// Optional list of all workers to use in this session.
// ClusterDef cluster_def = 14;
// Next: 15
}
\ No newline at end of file
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='config.proto',
package='tensorflow',
serialized_pb=_b('\n\x0c\x63onfig.proto\x12\ntensorflow\"\x89\x02\n\nGPUOptions\x12\'\n\x1fper_process_gpu_memory_fraction\x18\x01 \x01(\x01\x12\x16\n\x0e\x61llocator_type\x18\x02 \x01(\t\x12\x1f\n\x17\x64\x65\x66\x65rred_deletion_bytes\x18\x03 \x01(\x03\x12\x14\n\x0c\x61llow_growth\x18\x04 \x01(\x08\x12\x1b\n\x13visible_device_list\x18\x05 \x01(\t\x12\"\n\x1apolling_active_delay_usecs\x18\x06 \x01(\x05\x12$\n\x1cpolling_inactive_delay_msecs\x18\x07 \x01(\x05\x12\x1c\n\x14\x66orce_gpu_compatible\x18\x08 \x01(\x08\"\xd3\x01\n\x0cGraphOptions\x12\x1e\n\x16\x65nable_recv_scheduling\x18\x02 \x01(\x08\x12\x18\n\x10\x62uild_cost_model\x18\x04 \x01(\x03\x12\x1e\n\x16\x62uild_cost_model_after\x18\t \x01(\x03\x12\x14\n\x0cinfer_shapes\x18\x05 \x01(\x08\x12\x1a\n\x12place_pruned_graph\x18\x06 \x01(\x08\x12 \n\x18\x65nable_bfloat16_sendrecv\x18\x07 \x01(\x08\x12\x15\n\rtimeline_step\x18\x08 \x01(\x05\"\xe7\x02\n\x0b\x43onfigProto\x12$\n\x1cintra_op_parallelism_threads\x18\x02 \x01(\x05\x12$\n\x1cinter_op_parallelism_threads\x18\x05 \x01(\x05\x12\x1f\n\x17use_per_session_threads\x18\t \x01(\x08\x12\x18\n\x10placement_period\x18\x03 \x01(\x05\x12\x16\n\x0e\x64\x65vice_filters\x18\x04 \x03(\t\x12+\n\x0bgpu_options\x18\x06 \x01(\x0b\x32\x16.tensorflow.GPUOptions\x12\x1c\n\x14\x61llow_soft_placement\x18\x07 \x01(\x08\x12\x1c\n\x14log_device_placement\x18\x08 \x01(\x08\x12/\n\rgraph_options\x18\n \x01(\x0b\x32\x18.tensorflow.GraphOptions\x12\x1f\n\x17operation_timeout_in_ms\x18\x0b \x01(\x03')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GPUOPTIONS = _descriptor.Descriptor(
name='GPUOptions',
full_name='tensorflow.GPUOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='per_process_gpu_memory_fraction', full_name='tensorflow.GPUOptions.per_process_gpu_memory_fraction', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allocator_type', full_name='tensorflow.GPUOptions.allocator_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deferred_deletion_bytes', full_name='tensorflow.GPUOptions.deferred_deletion_bytes', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allow_growth', full_name='tensorflow.GPUOptions.allow_growth', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='visible_device_list', full_name='tensorflow.GPUOptions.visible_device_list', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='polling_active_delay_usecs', full_name='tensorflow.GPUOptions.polling_active_delay_usecs', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='polling_inactive_delay_msecs', full_name='tensorflow.GPUOptions.polling_inactive_delay_msecs', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='force_gpu_compatible', full_name='tensorflow.GPUOptions.force_gpu_compatible', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=29,
serialized_end=294,
)
_GRAPHOPTIONS = _descriptor.Descriptor(
name='GraphOptions',
full_name='tensorflow.GraphOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enable_recv_scheduling', full_name='tensorflow.GraphOptions.enable_recv_scheduling', index=0,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='build_cost_model', full_name='tensorflow.GraphOptions.build_cost_model', index=1,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='build_cost_model_after', full_name='tensorflow.GraphOptions.build_cost_model_after', index=2,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='infer_shapes', full_name='tensorflow.GraphOptions.infer_shapes', index=3,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='place_pruned_graph', full_name='tensorflow.GraphOptions.place_pruned_graph', index=4,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enable_bfloat16_sendrecv', full_name='tensorflow.GraphOptions.enable_bfloat16_sendrecv', index=5,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timeline_step', full_name='tensorflow.GraphOptions.timeline_step', index=6,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=297,
serialized_end=508,
)
_CONFIGPROTO = _descriptor.Descriptor(
name='ConfigProto',
full_name='tensorflow.ConfigProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='intra_op_parallelism_threads', full_name='tensorflow.ConfigProto.intra_op_parallelism_threads', index=0,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inter_op_parallelism_threads', full_name='tensorflow.ConfigProto.inter_op_parallelism_threads', index=1,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_per_session_threads', full_name='tensorflow.ConfigProto.use_per_session_threads', index=2,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='placement_period', full_name='tensorflow.ConfigProto.placement_period', index=3,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_filters', full_name='tensorflow.ConfigProto.device_filters', index=4,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gpu_options', full_name='tensorflow.ConfigProto.gpu_options', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allow_soft_placement', full_name='tensorflow.ConfigProto.allow_soft_placement', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='log_device_placement', full_name='tensorflow.ConfigProto.log_device_placement', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='graph_options', full_name='tensorflow.ConfigProto.graph_options', index=8,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='operation_timeout_in_ms', full_name='tensorflow.ConfigProto.operation_timeout_in_ms', index=9,
number=11, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=511,
serialized_end=870,
)
_CONFIGPROTO.fields_by_name['gpu_options'].message_type = _GPUOPTIONS
_CONFIGPROTO.fields_by_name['graph_options'].message_type = _GRAPHOPTIONS
DESCRIPTOR.message_types_by_name['GPUOptions'] = _GPUOPTIONS
DESCRIPTOR.message_types_by_name['GraphOptions'] = _GRAPHOPTIONS
DESCRIPTOR.message_types_by_name['ConfigProto'] = _CONFIGPROTO
GPUOptions = _reflection.GeneratedProtocolMessageType('GPUOptions', (_message.Message,), dict(
DESCRIPTOR = _GPUOPTIONS,
__module__ = 'config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GPUOptions)
))
_sym_db.RegisterMessage(GPUOptions)
GraphOptions = _reflection.GeneratedProtocolMessageType('GraphOptions', (_message.Message,), dict(
DESCRIPTOR = _GRAPHOPTIONS,
__module__ = 'config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphOptions)
))
_sym_db.RegisterMessage(GraphOptions)
ConfigProto = _reflection.GeneratedProtocolMessageType('ConfigProto', (_message.Message,), dict(
DESCRIPTOR = _CONFIGPROTO,
__module__ = 'config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.ConfigProto)
))
_sym_db.RegisterMessage(ConfigProto)
# @@protoc_insertion_point(module_scope)
syntax = "proto2";
package tensorflow;
enum DataType {
// Not a legal value for DataType. Used to indicate a DataType field
// has not been set.
DT_INVALID = 0;
// Data types that all computation devices are expected to be
// capable to support.
DT_FLOAT = 1;
DT_DOUBLE = 2;
DT_INT32 = 3;
DT_UINT8 = 4;
DT_INT16 = 5;
DT_INT8 = 6;
DT_STRING = 7;
DT_COMPLEX64 = 8; // Single-precision complex
DT_INT64 = 9;
DT_BOOL = 10;
DT_QINT8 = 11; // Quantized int8
DT_QUINT8 = 12; // Quantized uint8
DT_QINT32 = 13; // Quantized int32
DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops.
DT_QINT16 = 15; // Quantized int16
DT_QUINT16 = 16; // Quantized uint16
DT_UINT16 = 17;
DT_COMPLEX128 = 18; // Double-precision complex
DT_HALF = 19;
DT_RESOURCE = 20;
DT_VARIANT = 21; // Arbitrary C++ data types
DT_UINT32 = 22;
DT_UINT64 = 23;
// Do not use! These are only for parameters. Every enum above
// should have a corresponding value below (verified by types_test).
DT_FLOAT_REF = 101;
DT_DOUBLE_REF = 102;
DT_INT32_REF = 103;
DT_UINT8_REF = 104;
DT_INT16_REF = 105;
DT_INT8_REF = 106;
DT_STRING_REF = 107;
DT_COMPLEX64_REF = 108;
DT_INT64_REF = 109;
DT_BOOL_REF = 110;
DT_QINT8_REF = 111;
DT_QUINT8_REF = 112;
DT_QINT32_REF = 113;
DT_BFLOAT16_REF = 114;
DT_QINT16_REF = 115;
DT_QUINT16_REF = 116;
DT_UINT16_REF = 117;
DT_COMPLEX128_REF = 118;
DT_HALF_REF = 119;
DT_RESOURCE_REF = 120;
DT_VARIANT_REF = 121;
DT_UINT32_REF = 122;
DT_UINT64_REF = 123;
}
\ No newline at end of file
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: types.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='types.proto',
package='tensorflow',
serialized_pb=_b('\n\x0btypes.proto\x12\ntensorflow*\xaa\x06\n\x08\x44\x61taType\x12\x0e\n\nDT_INVALID\x10\x00\x12\x0c\n\x08\x44T_FLOAT\x10\x01\x12\r\n\tDT_DOUBLE\x10\x02\x12\x0c\n\x08\x44T_INT32\x10\x03\x12\x0c\n\x08\x44T_UINT8\x10\x04\x12\x0c\n\x08\x44T_INT16\x10\x05\x12\x0b\n\x07\x44T_INT8\x10\x06\x12\r\n\tDT_STRING\x10\x07\x12\x10\n\x0c\x44T_COMPLEX64\x10\x08\x12\x0c\n\x08\x44T_INT64\x10\t\x12\x0b\n\x07\x44T_BOOL\x10\n\x12\x0c\n\x08\x44T_QINT8\x10\x0b\x12\r\n\tDT_QUINT8\x10\x0c\x12\r\n\tDT_QINT32\x10\r\x12\x0f\n\x0b\x44T_BFLOAT16\x10\x0e\x12\r\n\tDT_QINT16\x10\x0f\x12\x0e\n\nDT_QUINT16\x10\x10\x12\r\n\tDT_UINT16\x10\x11\x12\x11\n\rDT_COMPLEX128\x10\x12\x12\x0b\n\x07\x44T_HALF\x10\x13\x12\x0f\n\x0b\x44T_RESOURCE\x10\x14\x12\x0e\n\nDT_VARIANT\x10\x15\x12\r\n\tDT_UINT32\x10\x16\x12\r\n\tDT_UINT64\x10\x17\x12\x10\n\x0c\x44T_FLOAT_REF\x10\x65\x12\x11\n\rDT_DOUBLE_REF\x10\x66\x12\x10\n\x0c\x44T_INT32_REF\x10g\x12\x10\n\x0c\x44T_UINT8_REF\x10h\x12\x10\n\x0c\x44T_INT16_REF\x10i\x12\x0f\n\x0b\x44T_INT8_REF\x10j\x12\x11\n\rDT_STRING_REF\x10k\x12\x14\n\x10\x44T_COMPLEX64_REF\x10l\x12\x10\n\x0c\x44T_INT64_REF\x10m\x12\x0f\n\x0b\x44T_BOOL_REF\x10n\x12\x10\n\x0c\x44T_QINT8_REF\x10o\x12\x11\n\rDT_QUINT8_REF\x10p\x12\x11\n\rDT_QINT32_REF\x10q\x12\x13\n\x0f\x44T_BFLOAT16_REF\x10r\x12\x11\n\rDT_QINT16_REF\x10s\x12\x12\n\x0e\x44T_QUINT16_REF\x10t\x12\x11\n\rDT_UINT16_REF\x10u\x12\x15\n\x11\x44T_COMPLEX128_REF\x10v\x12\x0f\n\x0b\x44T_HALF_REF\x10w\x12\x13\n\x0f\x44T_RESOURCE_REF\x10x\x12\x12\n\x0e\x44T_VARIANT_REF\x10y\x12\x11\n\rDT_UINT32_REF\x10z\x12\x11\n\rDT_UINT64_REF\x10{')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DATATYPE = _descriptor.EnumDescriptor(
name='DataType',
full_name='tensorflow.DataType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DT_INVALID', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_FLOAT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_DOUBLE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT32', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT8', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT16', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT8', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_STRING', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_COMPLEX64', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT64', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_BOOL', index=10, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT8', index=11, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QUINT8', index=12, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT32', index=13, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_BFLOAT16', index=14, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT16', index=15, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QUINT16', index=16, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT16', index=17, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_COMPLEX128', index=18, number=18,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_HALF', index=19, number=19,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_RESOURCE', index=20, number=20,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_VARIANT', index=21, number=21,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT32', index=22, number=22,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT64', index=23, number=23,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_FLOAT_REF', index=24, number=101,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_DOUBLE_REF', index=25, number=102,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT32_REF', index=26, number=103,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT8_REF', index=27, number=104,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT16_REF', index=28, number=105,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT8_REF', index=29, number=106,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_STRING_REF', index=30, number=107,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_COMPLEX64_REF', index=31, number=108,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT64_REF', index=32, number=109,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_BOOL_REF', index=33, number=110,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT8_REF', index=34, number=111,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QUINT8_REF', index=35, number=112,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT32_REF', index=36, number=113,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_BFLOAT16_REF', index=37, number=114,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT16_REF', index=38, number=115,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QUINT16_REF', index=39, number=116,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT16_REF', index=40, number=117,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_COMPLEX128_REF', index=41, number=118,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_HALF_REF', index=42, number=119,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_RESOURCE_REF', index=43, number=120,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_VARIANT_REF', index=44, number=121,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT32_REF', index=45, number=122,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT64_REF', index=46, number=123,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=28,
serialized_end=838,
)
_sym_db.RegisterEnumDescriptor(_DATATYPE)
DataType = enum_type_wrapper.EnumTypeWrapper(_DATATYPE)
DT_INVALID = 0
DT_FLOAT = 1
DT_DOUBLE = 2
DT_INT32 = 3
DT_UINT8 = 4
DT_INT16 = 5
DT_INT8 = 6
DT_STRING = 7
DT_COMPLEX64 = 8
DT_INT64 = 9
DT_BOOL = 10
DT_QINT8 = 11
DT_QUINT8 = 12
DT_QINT32 = 13
DT_BFLOAT16 = 14
DT_QINT16 = 15
DT_QUINT16 = 16
DT_UINT16 = 17
DT_COMPLEX128 = 18
DT_HALF = 19
DT_RESOURCE = 20
DT_VARIANT = 21
DT_UINT32 = 22
DT_UINT64 = 23
DT_FLOAT_REF = 101
DT_DOUBLE_REF = 102
DT_INT32_REF = 103
DT_UINT8_REF = 104
DT_INT16_REF = 105
DT_INT8_REF = 106
DT_STRING_REF = 107
DT_COMPLEX64_REF = 108
DT_INT64_REF = 109
DT_BOOL_REF = 110
DT_QINT8_REF = 111
DT_QUINT8_REF = 112
DT_QINT32_REF = 113
DT_BFLOAT16_REF = 114
DT_QINT16_REF = 115
DT_QUINT16_REF = 116
DT_UINT16_REF = 117
DT_COMPLEX128_REF = 118
DT_HALF_REF = 119
DT_RESOURCE_REF = 120
DT_VARIANT_REF = 121
DT_UINT32_REF = 122
DT_UINT64_REF = 123
DESCRIPTOR.enum_types_by_name['DataType'] = _DATATYPE
# @@protoc_insertion_point(module_scope)
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import dragon.updaters as updaters
import dragon.vm.theano as theano
import dragon.vm.theano.tensor as T
from dragon.vm.tensorflow.framework import ops
from dragon.vm.tensorflow.ops import variables
class Optimizer(object):
def __init__(self, use_locking, name):
if not name:
raise ValueError('Must specify the optimizer name.')
self._use_locking = use_locking
self._name = name
self._slots = {}
self.loss = self.updater = None
self.train = self.update = None
def get_name(self):
return self._name
def minimize(self, loss, global_step=None, var_list=None, **kwargs):
grads_and_vars = self.compute_gradients(loss, var_list)
return self.apply_gradients(grads_and_vars, global_step=global_step)
def compute_gradients(self, loss, var_list=None, **kwargs):
if var_list is None:
var_list = variables.trainable_variables() + \
ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)
self.loss = loss
grads = T.grad(loss, var_list)
grads_and_vars = list(zip(grads, var_list))
return grads_and_vars
def apply_gradients(self, grads_and_vars, global_step=None, **kwargs):
objs = set()
for grad_var in grads_and_vars:
self.updater.append((grad_var[1], grad_var[0])) # (var, grad)
for obj in grad_var[1].grad_objs: objs.add(obj)
self.objs = list(objs)
return self
def run(self, feed_dict=None):
# objective function
if not hasattr(self, '_objective_func'):
# find minimum solving targets
targets = set()
for t in self.objs: targets.add(t)
if feed_dict is not None:
self._objective_func = theano.function(inputs=feed_dict.keys(),
outputs=list(targets))
else:
self._objective_func = theano.function(outputs=list(targets))
if feed_dict is not None:
self._objective_func(*feed_dict.values())
else:
self._objective_func()
# update function
if not hasattr(self, '_update_func'):
self._update_func = theano.function(updater=self.updater)
self._update_func()
class GradientDescentOptimizer(Optimizer):
def __init__(self, learning_rate, use_locking=False, name='GradientDescent'):
super(GradientDescentOptimizer, self).__init__(use_locking, name)
self.updater = updaters.SGDUpdater(learning_rate, 0.0)
class MomentumOptimizer(Optimizer):
def __init__(self, learning_rate, momentum,
use_locking=False, name='Momentum', use_nesterov=False):
super(MomentumOptimizer, self).__init__(use_locking, name)
if not use_nesterov:
self.updater = updaters.SGDUpdater(learning_rate, momentum)
else:
self.updater = updaters.NesterovUpdater(learning_rate, momentum)
class AdamOptimizer(Optimizer):
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name='Adam'):
super(AdamOptimizer, self).__init__(use_locking, name)
self.updater = updaters.AdamUpdater(learning_rate, beta1, beta2, epsilon)
class RMSPropOptimizer(Optimizer):
def __init__(self, learning_rate, decay, momentum, epsilon=1e-10,
use_locking=False, centered=False, name='RMSProp'):
super(RMSPropOptimizer, self).__init__(use_locking, name)
self.updater = updaters.RMSPropUpdater(learning_rate, decay, epsilon)
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = ['Saver']
import dragon.core.workspace as ws
from dragon.core.tensor import Tensor
class Saver(object):
def __init__(self,
var_list=None,
max_to_keep=5,
name=None,):
self.var_list = var_list
def save(self,
sess,
save_path,
global_step=None):
from ..core.variables import VARIABLES
global VARIABLES
var_list = VARIABLES if self.var_list is None else self.var_list
filename = save_path
if global_step is not None:
if isinstance(global_step, Tensor):
__ndarray__global_step = ws.FetchTensor(global_step)
if __ndarray__global_step.size != 1:
raise ValueError('global step must be a scalar of length 1.')
filename += '-' + str(__ndarray__global_step.flatten()[0])
ws.Snapshot(var_list.values(), filename=filename, suffix='')
def restore(self, sess, save_path):
ws.Restore(save_path)
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from .optimizer import *
from .saver import *
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from dragon.vm.tensorflow.training.optimizer import GradientDescentOptimizer, \
MomentumOptimizer, \
RMSPropOptimizer, \
AdamOptimizer
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import functools
import re
_PRINT_DEPRECATION_WARNINGS = True
def _validate_callable(func, decorator_name):
if not hasattr(func, '__call__'):
raise ValueError(
'%s is not a function. If this is a property, make sure'
' @property appears before @%s in your source code:'
'\n\n@property\n@%s\ndef method(...)' % (
func, decorator_name, decorator_name))
def _validate_deprecation_args(date, instructions):
if date is not None and not re.match(r'20\d\d-[01]\d-[0123]\d', date):
raise ValueError('Date must be YYYY-MM-DD.')
if not instructions:
raise ValueError('Don\'t deprecate things without conversion instructions!')
def _get_qualified_name(function):
# Python 3
if hasattr(function, '__qualname__'):
return function.__qualname__
# Python 2
if hasattr(function, 'im_class'):
return function.im_class.__name__ + '.' + function.__name__
return function.__name__
def deprecated(date, instructions):
_validate_deprecation_args(date, instructions)
def deprecated_wrapper(func):
_validate_callable(func, 'deprecated')
@functools.wraps(func)
def new_func(*args, **kwargs):
from dragon.config import logger
if _PRINT_DEPRECATION_WARNINGS:
logger.warning(
'{} (from {}) is deprecated and will be removed {}.\n'
'Instructions for updating:\n{}'.
format(_get_qualified_name(func),
func.__module__,
'in a future version' if date is None else ('after %s' % date),
instructions))
return func(*args, **kwargs)
return new_func
return deprecated_wrapper
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright 2016 The TensorFlow Authors
# --------------------------------------------------------
import collections as _collections
import six as _six
def is_sequence(seq):
if isinstance(seq, dict):
return True
return (isinstance(seq, _collections.Sequence)
and not isinstance(seq, _six.string_types))
def _yield_value(iterable):
if isinstance(iterable, dict):
for key in sorted(_six.iterkeys(iterable)):
yield iterable[key]
else:
for value in iterable:
yield value
def _yield_flat_nest(nest):
for n in _yield_value(nest):
if is_sequence(n):
for ni in _yield_flat_nest(n):
yield ni
else:
yield n
def flatten(nest):
if is_sequence(nest):
return list(_yield_flat_nest(nest))
else:
return [nest]
...@@ -36,7 +36,7 @@ find_packages('dragon') ...@@ -36,7 +36,7 @@ find_packages('dragon')
find_modules() find_modules()
setup(name = 'dragon', setup(name = 'dragon',
version='0.2.1.5', version='0.2.1.6',
description = 'Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework', description = 'Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework',
url='https://github.com/neopenx/Dragon', url='https://github.com/neopenx/Dragon',
author='Ting Pan', author='Ting Pan',
......
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!