Commit 990c496b by Ting PAN

python3 support

1 parent b6182abc
Showing with 520 additions and 285 deletions
......@@ -8,6 +8,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8.0)
# ---------------- User Config ----------------
# set optional libraries
option(WITH_PYTHON3 "Set ON to use PYTHON3 otherwise PYTHON2" OFF)
option(WITH_CUDA "Set ON to use CUDA" ON)
option(WITH_CUDNN "Set ON to use CUDNN" OFF)
option(WITH_BLAS "Set ON to use BLAS" OFF)
......@@ -19,10 +20,11 @@ option(WITH_CUDA_FP16 "Set ON to use FP16" ON)
# set your 3rdparty
set(3RDPARTY_DIR ${PROJECT_SOURCE_DIR}/../3rdparty)
# set your py27
# set your python environment
set(PYTHON_DIR /usr/include/python2.7) # prefer
#set(ANACONDA_DIR /xxx/anaconda) # optional
set(NUMPY_DIR /xxx/numpy) # require
#set(PYTHON_DIR /usr/include/python3.x) # optional, set specific version
#set(ANACONDA_DIR /xxx/anaconda) # optional, set specific version below if using py3
set(NUMPY_DIR /xxx/numpy) # require£¬ root folder of numpy package
# set CUDA compiling architecture
set(CUDA_ARCH -gencode arch=compute_20,code=sm_20
......@@ -78,7 +80,10 @@ include_directories(${3RDPARTY_DIR}/include/mpi)
include_directories(${CUDA_INCLUDE_DIRS})
include_directories(${PROJECT_SOURCE_DIR}/src)
include_directories(${NUMPY_DIR}/core/include)
include_directories(${NUMPY_DIR})
include_directories(${NUMPY_DIR}/numpy)
include_directories(${ANACONDA_DIR}/include/python2.7)
include_directories(${ANACONDA_DIR}/include/python3.x)
include_directories(${PYTHON_DIR})
include_directories(${ANACONDA_DIR}/include)
......@@ -91,50 +96,54 @@ link_directories(/usr/local/cuda/lib64)
set(CMAKE_INSTALL_PREFIX ${PROJECT_SOURCE_DIR} CACHE STRING "set install prefix" FORCE)
# ---[ defines
if (WITH_PYTHON3)
ADD_DEFINITIONS(-DWITH_PYTHON3)
message(STATUS "Use PYTHON3 [Optional]")
endif()
if (WITH_CUDA)
ADD_DEFINITIONS(-DWITH_CUDA)
message(STATUS "Use CUDA [Optional]")
ADD_DEFINITIONS(-DWITH_CUDA)
message(STATUS "Use CUDA [Optional]")
endif()
if (WITH_CUDNN)
ADD_DEFINITIONS(-DWITH_CUDNN)
message(STATUS "Use CUDNN [Optional]")
ADD_DEFINITIONS(-DWITH_CUDNN)
message(STATUS "Use CUDNN [Optional]")
endif()
if (WITH_BLAS)
ADD_DEFINITIONS(-DWITH_BLAS)
message(STATUS "Use BLAS [Optional]")
ADD_DEFINITIONS(-DWITH_BLAS)
message(STATUS "Use BLAS [Optional]")
else()
message(STATUS "Unuse BLAS [Optional]"
"\n -- > GEMM/GEMV is disabled"
"\n -- > prefer not to run as CPU Mode")
message(STATUS "Unuse BLAS [Optional]"
"\n -- > GEMM/GEMV is disabled"
"\n -- > prefer not to run as CPU Mode")
endif()
if (WITH_SSE)
ADD_DEFINITIONS(-DWITH_SSE)
message(STATUS "Use SSE [Optional]")
if(UNIX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.1")
endif()
ADD_DEFINITIONS(-DWITH_SSE)
message(STATUS "Use SSE [Optional]")
if(UNIX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.1")
endif()
endif()
if (WITH_MPI)
ADD_DEFINITIONS(-DWITH_MPI)
message(STATUS "Use MPI [Optional]")
ADD_DEFINITIONS(-DWITH_MPI)
message(STATUS "Use MPI [Optional]")
endif()
if (WITH_MPI_CUDA)
ADD_DEFINITIONS(-DWITH_CUDA_AWARE)
message(STATUS "Use MPI-CUDA [Optional]")
ADD_DEFINITIONS(-DWITH_CUDA_AWARE)
message(STATUS "Use MPI-CUDA [Optional]")
endif()
if (WITH_CUDA_FP16)
ADD_DEFINITIONS(-DWITH_CUDA_FP16)
message(STATUS "Use CUDA FP16 [Optional]")
ADD_DEFINITIONS(-DWITH_CUDA_FP16)
message(STATUS "Use CUDA FP16 [Optional]")
endif()
# ---[ Flags
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} ${CUDA_ARCH}")
if(WIN32)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP")
endif()
if(UNIX)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -O2 -m64 -fpermissive -std=c++11")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -O2 -m64 -fpermissive -std=c++11")
endif()
# ---[ Warnings
......@@ -143,4 +152,4 @@ endif()
add_subdirectory(modules/python)
# ---[ Utils
file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/../lib)
file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/../lib)
\ No newline at end of file
......@@ -28,7 +28,7 @@ namespace dragon {
* it seems not necessary to create handles for different threads
*************************************************************************/
class CUDAObject{
class CUDAObject {
public:
CUDAObject(): cur_gpu(0) {
for (int i = 0; i < MAX_GPUS; i++) {
......
......@@ -51,21 +51,21 @@ class Workspace{
return tensor_map_[query].get();
}
inline Tensor* GetTensor(const string& name){
inline Tensor* GetTensor(const string& name) {
string query = GetTensorName(name);
CHECK(HasTensor(query))
<< "Tensor(" << name << ") does not exist.";
return tensor_map_[query].get();
}
inline void LockTensor(const string& name){
inline void LockTensor(const string& name) {
string query = GetTensorName(name);
if (!lock_map_.count(query))
lock_map_[query] = unique_ptr<mutex>(new mutex);
lock_map_[query]->lock();
}
inline void UnlockTensor(const string& name){
inline void UnlockTensor(const string& name) {
string query = GetTensorName(name);
if (!lock_map_.count(query))
lock_map_[query] = unique_ptr<mutex>(new mutex);
......
......@@ -11,6 +11,10 @@
#include "core/operator.h"
#ifdef WITH_PYTHON3
#define PyBytes_FromStringAndSize PyUnicode_FromStringAndSize
#endif
namespace dragon {
template <class Context>
......
......@@ -403,7 +403,7 @@ PyMethodDef* GetAllMethods() {
return g_python_methods;
}
static void import_array_wrapper() { import_array(); }
static void import_array_wrapper() { import_array1(); }
void common_init() {
import_array_wrapper();
......@@ -414,10 +414,24 @@ void common_init() {
initialized = true;
}
#ifdef WITH_PYTHON3
static struct PyModuleDef libdragon = { PyModuleDef_HEAD_INIT,
"libdragon", "", -1,
GetAllMethods() };
PyMODINIT_FUNC PyInit_libdragon(void) {
PyObject* module = PyModule_Create(&libdragon);
if (module == nullptr) return nullptr;
common_init();
return module;
}
#else // WITH_PYTHON2
PyMODINIT_FUNC initlibdragon(void) {
PyObject* moudle = Py_InitModule("libdragon", GetAllMethods());
if (moudle == nullptr) return;
common_init();
}
#endif
}
\ No newline at end of file
......@@ -19,6 +19,10 @@
#include "core/operator_gradient.h"
#include "core/workspace.h"
#ifdef WITH_PYTHON3
#define PyString_AsString PyUnicode_AsUTF8
#endif
using namespace dragon;
inline std::string PyBytesToStdString(PyObject* pystring) {
......
......@@ -13,6 +13,11 @@
#ifdef WITH_MPI
#include <mpi/mpi.h>
#ifdef WITH_PYTHON3
#define PyInt_FromLong PyLong_FromLong
#define _PyInt_AsInt _PyLong_AsInt
#endif
using namespace dragon;
inline PyObject* MPIInitCC(PyObject* self, PyObject* args) {
......
......@@ -16,5 +16,4 @@ except ImportError as e:
from dragon.core.scope import TensorScope as name_scope
from dragon.core.scope import PhaseScope as phase_scope
from dragon.core.scope import DeviceScope as device_scope
from dragon.core.scope import DeviceScope as device_scope
\ No newline at end of file
......@@ -4,8 +4,12 @@
# Written by Ting Pan
# --------------------------------------------------------
import numpy as np
from __init__ import *
from dragon.__init__ import *
import logging
logger = logging.getLogger('dragon')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
option = {}
......@@ -58,6 +62,14 @@ def SetLoggingLevel(level):
"""
SetLogLevelCC(level)
global logger
logger.setLevel({
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'FATAL': logging.CRITICAL
}[level])
......
......@@ -4,18 +4,21 @@
# Written by Ting Pan
# --------------------------------------------------------
import dragon.config as config
import dragon.protos.dragon_pb2 as pb
from collections import defaultdict
from dragon.core.utils import MakeOperatorDef
from dragon.__init__ import *
import dragon.protos.dragon_pb2 as pb
import dragon.config as config
from scope import GetOperatorName
from dragon.utils import MakeOperatorDef
from .scope import GetOperatorName
class GraphGradientMaker(object):
@classmethod
def CreateGradientForOp(cls, op_def, g_output):
""" parse ops from string """
g_ops, g_inputs, defaults = CreateGradientDefsCC(op_def.SerializeToString(), g_output)
if sys.version_info >= (3, 0):
g_inputs = [g_input.decode('ascii') for g_input in g_inputs]
for idx, g_op in enumerate(g_ops):
new_def = pb.OperatorDef()
new_def.ParseFromString(g_op)
......
......@@ -4,8 +4,11 @@
# Written by Ting Pan
# --------------------------------------------------------
from dragon import MPIInitCC, MPIRankCC, MPISizeCC, MPICreateGroupCC, MPIFinalizeCC
import numpy as np
from six.moves import range as xrange
from dragon import MPIInitCC, MPIRankCC, MPISizeCC, \
MPICreateGroupCC, MPIFinalizeCC
_is_init = False
_snapshot_ranks = []
......
......@@ -4,13 +4,15 @@
# Written by Ting Pan
# --------------------------------------------------------
import numpy as np
from collections import OrderedDict
import dragon.config as config
import dragon.core.workspace as ws
import dragon.protos.dragon_pb2 as pb
import workspace as ws
from scope import GetOperatorName, GetTensorName
from dragon.utils import MakeOperatorDef
import numpy as np
from collections import OrderedDict
from dragon.core.utils import MakeOperatorDef
from dragon.core.scope import GetOperatorName, GetTensorName
from six.moves import range as xrange
class Tensor(object):
REGISTERED_FILLERS = {'Constant', 'Normal', 'TruncatedNormal',
......@@ -39,7 +41,7 @@ class Tensor(object):
@name.setter
def name(self, value):
from scope import TENSOR_SCOPE
from .scope import TENSOR_SCOPE
if value is None: self._name = TENSOR_SCOPE + GetTensorName()
else: self._name = TENSOR_SCOPE + value
......@@ -237,15 +239,15 @@ class Tensor(object):
# 1. collect inputs
if not isinstance(inputs, list): inputs = [inputs]
for input in inputs:
for op_idx, expr in input.expressions.iteritems():
if not expressions.has_key(op_idx):
for op_idx, expr in input.expressions.items():
if not op_idx in expressions:
expressions[op_idx] = expr
if extra_inputs is not None:
if not isinstance(extra_inputs, list): extra_inputs = [extra_inputs]
for input in extra_inputs:
for op_idx, expr in input.expressions.iteritems():
if not expressions.has_key(op_idx):
for op_idx, expr in input.expressions.items():
if not op_idx in expressions:
expressions[op_idx] = expr
# 2. generate outputs
......@@ -286,7 +288,7 @@ class Tensor(object):
output.extra_targets.add(input.name)
# 5. utils
if kwargs.has_key('static_shape'):
if 'static_shape' in kwargs:
outputs[0].tf_shape = kwargs['static_shape']
if nout > 1:
......@@ -302,26 +304,26 @@ class Tensor(object):
filler.type = type.lower()
if filler.type == 'constant':
filler.value = kwargs['value'] if kwargs.has_key('value') else 0
filler.value = kwargs['value'] if 'value' in kwargs else 0
elif filler.type == 'normal' or filler.type == 'gaussian':
filler.mean = kwargs['mean'] if kwargs.has_key('mean') else 0
filler.std = kwargs['std'] if kwargs.has_key('std') else 1
filler.mean = kwargs['mean'] if 'mean' in kwargs else 0
filler.std = kwargs['std'] if 'std' in kwargs else 1
filler.type = 'normal'
elif filler.type == 'uniform':
filler.low = kwargs['low'] if kwargs.has_key('low') else 0
filler.high = kwargs['high'] if kwargs.has_key('high') else 1
filler.low = kwargs['low'] if 'low' in kwargs else 0
filler.high = kwargs['high'] if 'high' in kwargs else 1
filler.type = 'uniform'
elif filler.type == 'truncated_normal' or filler.type == 'truncatednormal':
filler.mean = kwargs['mean'] if kwargs.has_key('mean') else 0
filler.std = kwargs['std'] if kwargs.has_key('std') else 1
filler.mean = kwargs['mean'] if 'mean' in kwargs else 0
filler.std = kwargs['std'] if 'std' in kwargs else 1
filler.low = filler.mean - 2.0 * filler.std
filler.high = filler.mean + 2.0 * filler.std
filler.type = 'truncated_normal'
elif filler.type == 'parameterized_truncated_normal':
filler.mean = kwargs['mean'] if kwargs.has_key('mean') else 0
filler.std = kwargs['std'] if kwargs.has_key('std') else 1
filler.low = kwargs['low'] if kwargs.has_key('low') else -2.0
filler.high = kwargs['high'] if kwargs.has_key('high') else 2.0
filler.mean = kwargs['mean'] if 'mean' in kwargs else 0
filler.std = kwargs['std'] if 'std' in kwargs else 1
filler.low = kwargs['low'] if 'low' in kwargs else -2.0
filler.high = kwargs['high'] if 'high' in kwargs else 2.0
ws.CreateFiller(filler)
return self
......@@ -331,7 +333,7 @@ class Tensor(object):
buffer0 = '-------------------Expressions-------------------\n'
buffer1 = ''; buffer2 = 'Inputs: ['
for k,v in self.expressions.iteritems():
for k,v in self.expressions.items():
buffer1 = buffer1 + '>>> ' + str(k).zfill(3) + '. ('
for input in v.input:
if input not in outputs:
......
# --------------------------------------------------------
# Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import sys
from google.protobuf.message import Message
from dragon.protos import dragon_pb2 as pb
import numpy as np
if sys.version_info >= (3,0):
def MakeArgument(key, value):
argument = pb.Argument()
argument.name = key
if type(value) is float: argument.f = value
elif type(value) is int: argument.i = value
elif type(value) is np.int64: argument.i64 = int(value)
elif type(value) is str: argument.s = value
elif type(value) is bool: argument.b = value
elif isinstance(value, Message): argument.s = value.SerializeToString()
elif all(type(v) is float for v in value): argument.floats.extend(value)
elif all(type(v) is int for v in value): argument.ints.extend(value)
elif all(type(v) is str for v in value): argument.strings.extend(value)
elif all(isinstance(v, Message) for v in value):
argument.strings.extend([v.SerializeToString() for v in value])
else:
raise ValueError('unknown argument type: key={} value={} value type={}' \
.format(key, value, type(value)))
return argument
else:
def MakeArgument(key, value):
argument = pb.Argument()
argument.name = key
if type(value) is float: argument.f = value
elif type(value) is int: argument.i = value
elif type(value) is np.int64: argument.i64 = int(value)
elif type(value) is str: argument.s = value
elif type(value) is unicode: argument.s = value
elif type(value) is bool: argument.b = value
elif isinstance(value, Message): argument.s = value.SerializeToString()
elif all(type(v) is float for v in value): argument.floats.extend(value)
elif all(type(v) is int for v in value): argument.ints.extend(value)
elif all(type(v) is str for v in value): argument.strings.extend(value)
elif all(type(v) is unicode or type(v) is str for v in value):
argument.strings.extend(value)
elif all(isinstance(v, Message) for v in value):
argument.strings.extend([v.SerializeToString() for v in value])
else:
raise ValueError('unknown argument type: key={} value={} value type={}' \
.format(key, value, type(value)))
return argument
def MakeOperatorDef(op_type, inputs, outputs, name='',
device_option=None, arg=None, engine=None, **kwargs):
operator = pb.OperatorDef()
operator.type = op_type
operator.name = name
operator.input.extend([str(tensor) for tensor in inputs])
operator.output.extend([str(tensor) for tensor in outputs])
if device_option is not None:
operator.device_option.CopyFrom(device_option)
if engine is not None:
operator.engine = engine
if 'random_seed' in kwargs:
operator.device_option.random_seed = kwargs['random_seed']
del kwargs['random_seed']
if arg is not None:
operator.arg.extend(arg)
for k,v in kwargs.items():
if v is None: continue
operator.arg.add().CopyFrom(MakeArgument(k,v))
return operator
def MakeDeviceOption(device_type, gpu_id, rng_seed = None):
""" return a DeviceOption """
option = pb.DeviceOption()
option.device_type = device_type
option.gpu_id = gpu_id
if rng_seed is not None: option.random_seed = rng_seed
return option
# fix the python stdout
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
# clear the stdout buffer for mpi(c++ & python)
import sys
sys.stdout = Unbuffered(sys.stdout)
\ No newline at end of file
......@@ -4,15 +4,19 @@
# Written by Ting Pan
# --------------------------------------------------------
import cPickle
import os
try:
import cPickle
except:
import pickle as cPickle
import dragon.core.utils as utils
import dragon.core.mpi as mpi
import dragon.protos.dragon_pb2 as pb
import numpy as np
from google.protobuf.message import Message
import os
from dragon import *
import dragon.protos.dragon_pb2 as pb
import dragon.core.mpi as mpi
import dragon.utils as utils
from dragon.config import logger
from google.protobuf.message import Message
from six.moves import range as xrange
CURRENT_GRAPH_IDX = 0
......@@ -32,7 +36,7 @@ def CreateGraph(graph_def):
def WriteOptimizedGraph(graph_def):
with open(graph_def.name + '.txt', 'w') as f:
f.write(str(graph_def))
print 'write serialized graph to: {}'.format(graph_def.name + '.txt')
logger.info('write serialized graph to: {}'.format(graph_def.name + '.txt'))
def HasTensor(tensor):
......@@ -113,19 +117,19 @@ def RunGraph(graph_name, inputs=(), outputs=[], stage=None, return_outputs=True)
def PrintRawGraphDef(graph_def):
print graph_def
logger.info(graph_def)
def PrintOptimizedGraph(graph_def):
graph_name = graph_def.name
graph_tensor = 'GraphDef_' + graph_name
if not HasTensorCC(graph_tensor):
print 'graph: {} does not exist, ignore printing....'.format(graph_name)
logger.info('graph: {} does not exist, ignore printing....'.format(graph_name))
return
graph_def = pb.GraphDef()
graph_def.ParseFromString(FetchTensor(graph_tensor))
print graph_def
logger.info(graph_def)
def Snapshot(tensors, filename, prefix='', suffix='.bin', format=0):
......@@ -144,8 +148,8 @@ def Snapshot(tensors, filename, prefix='', suffix='.bin', format=0):
content[tensor.name] = FetchTensor(tensor)
with open(filepath, 'wb') as f:
cPickle.dump(content, f, cPickle.HIGHEST_PROTOCOL)
print 'Snapshot Model@: ' + filepath
print 'Model Format: cPickle'
logger.info('Snapshot Model@: ' + filepath)
logger.info('Model Format: cPickle')
elif format is 1:
# caffe-store
......@@ -162,13 +166,13 @@ def Restore(filename, format=0):
assert os.path.exists(filename), 'model of path({}) does not exist.'.format(filename)
if format is 0:
content = cPickle.load(open(filename, 'rb'))
print 'Restore From Model@: ' + filename
print 'Model Format: cPickle'
for key, ndarray in content.iteritems():
logger.info('Restore From Model@: ' + filename)
logger.info('Model Format: cPickle')
for key, ndarray in content.items():
if not HasTensor(key):
print '[Warning]: Tensor({}) of model does not exist in any Graphs, skip.'.format(key)
logger.info('[Warning]: Tensor({}) of model does not exist in any Graphs, skip.'.format(key))
else:
print '[Info]: Tensor({}) restored.'.format(key)
logger.info('[Info]: Tensor({}) restored.'.format(key))
FeedTensor(key, ndarray)
elif format is 1:
......
......@@ -9,6 +9,7 @@ import dragon.core.workspace as ws
import dragon.ops as ops
import dragon.vm.theano as theano
from multiprocessing import Process, Queue
from dragon.config import logger
""" How to custom a RunOp in Dragon """
......@@ -31,7 +32,7 @@ class Fetcher(Process):
self.daemon = True
def cleanup():
print 'Terminating Fetcher......'
logger.info('Terminating Fetcher......')
self.terminate()
self.join()
......@@ -103,4 +104,4 @@ if __name__ == '__main__':
foo()
# fetch
print 'y \n-------------- \n', y.get_value(), '\n'
\ No newline at end of file
logger.info('y \n-------------- \n', y.get_value(), '\n')
\ No newline at end of file
......@@ -10,6 +10,7 @@ import dragon.ops as ops
from dragon.core.tensor import Tensor
import dragon.vm.theano.tensor as T
import dragon.vm.theano as theano
from dragon.config import logger
""" How to custom a TemplateOp in Dragon """
......@@ -97,7 +98,7 @@ if __name__ == '__main__':
foo()
# fetch
print 'y \n-------------- \n', y.get_value(), '\n'
print 'dx1 \n-------------- \n', dx1.get_value(), '\n'
print 'dx2 \n-------------- \n', dx2.get_value(), '\n'
logger.info('y \n-------------- \n', y.get_value(), '\n')
logger.info('dx1 \n-------------- \n', dx1.get_value(), '\n')
logger.info('dx2 \n-------------- \n', dx2.get_value(), '\n')
......@@ -4,9 +4,12 @@
# Written by Ting Pan
# --------------------------------------------------------
import numpy as np
from six.moves import range as xrange
from dragon.core.tensor import Tensor, GetTensorName
import dragon.core.workspace as ws
import numpy as np
def At(inputs, indices=[], axis=0, acc_gradient=False, **kwargs):
......
......@@ -8,29 +8,6 @@ import numpy as np
from dragon.core.tensor import Tensor
from dragon.operators.utils import Run
def Imagenet(**kwargs):
"""
:param kwargs: a dict of imagenet data param
:param --> mean_value: a list of mean values for channles [B-G-R]
:param --> source: a str of the images root directory
:param --> imageset: a str of text file contains image name / label
:param --> prefetch: a int of the prefetching size
:param --> batch_size: a int of the batch size
:param --> force_gray a bool of whether to use only 1 channel
:param --> shuffle a bool of whether to use shuffle
:param --> scale a float of the coeff to scale
:return: 2 Tensors of data and label
"""
args = locals(); kwargs = args['kwargs']
del args['kwargs']; kwargs = dict(args, **kwargs)
kwargs['module'] = 'dragon.vm.caffe.io.data_layer'
kwargs['op'] = 'DataLayer'
return Run([], param_str=str(kwargs), nout=2, **kwargs)
def LMDBData(**kwargs):
"""
:param kwargs: a dict of imagenet data param
......
......@@ -27,7 +27,7 @@ def Fill(shape, value=1.0, **kwargs):
del kwargs['shape']
output = Tensor.CreateOperator([], nout=1, op_type='Fill', **kwargs)
output.shape = kwargs['static_shape'] if kwargs.has_key('static_shape') else None
output.shape = kwargs['static_shape'] if 'static_shape' in kwargs else None
return output
......@@ -52,7 +52,7 @@ def RandomalUniform(shape, low=-1.0, high=1.0, **kwargs):
del kwargs['shape']
output = Tensor.CreateOperator([], nout=1, op_type='RandomUniform', **kwargs)
output.shape = kwargs['static_shape'] if kwargs.has_key('static_shape') else None
output.shape = kwargs['static_shape'] if 'static_shape' in kwargs else None
return output
......@@ -77,7 +77,7 @@ def RandomalNormal(shape, mean=0.0, std=1.0, **kwargs):
del kwargs['shape']
output = Tensor.CreateOperator([], nout=1, op_type='RandomNormal', **kwargs)
output.shape = kwargs['static_shape'] if kwargs.has_key('static_shape') else None
output.shape = kwargs['static_shape'] if 'static_shape' in kwargs else None
return output
......@@ -102,7 +102,7 @@ def TruncatedNormal(shape, mean=0.0, std=1.0, **kwargs):
del kwargs['shape']
output = Tensor.CreateOperator([], nout=1, op_type='TruncatedNormal', **kwargs)
output.shape = kwargs['static_shape'] if kwargs.has_key('static_shape') else None
output.shape = kwargs['static_shape'] if 'static_shape' in kwargs else None
return output
......@@ -125,7 +125,7 @@ def GlorotUniform(shape, scale=3.0, mode='fan_in', **kwargs):
del kwargs['shape']
output = Tensor.CreateOperator([], nout=1, op_type='GlorotUniform', **kwargs)
output.shape = kwargs['static_shape'] if kwargs.has_key('static_shape') else None
output.shape = kwargs['static_shape'] if 'static_shape' in kwargs else None
return output
......@@ -149,7 +149,7 @@ def GlorotNormal(shape, scale=2.0, mode='fan_in', **kwargs):
del kwargs['shape']
output = Tensor.CreateOperator([], nout=1, op_type='GlorotNormal', **kwargs)
output.shape = kwargs['static_shape'] if kwargs.has_key('static_shape') else None
output.shape = kwargs['static_shape'] if 'static_shape' in kwargs else None
return output
......
......@@ -4,9 +4,12 @@
# Written by Ting Pan
# --------------------------------------------------------
from six.moves import range as xrange
from dragon.core.tensor import Tensor
import dragon.core.mpi as mpi
def MPIBroadcast(inputs, root, mpi_rank=None, **kwargs):
"""
:param inputs: a Tensor which to broadcast
......@@ -52,7 +55,7 @@ def MPIGather(inputs, root, mpi_rank=None, **kwargs):
if not isinstance(kwargs['mpi_rank'], list):
kwargs['mpi_rank'] = [kwargs['mpi_rank']]
if kwargs.has_key('nout'):
if 'nout' in kwargs:
if kwargs['nout'] != len(kwargs['mpi_rank']):
raise RuntimeError('specfied nout is {}, but provide {} mpi nodes'
.format(kwargs['nout'], len(kwargs['mpi_rank'])))
......
......@@ -5,8 +5,11 @@
# --------------------------------------------------------
import math
from six.moves import range as xrange
from dragon.core.tensor import Tensor
def Conv2D(inputs, num_output, kernel_size,
stride=1, pad=0, dilation=1, group=1, **kwargs):
"""
......
......@@ -4,21 +4,20 @@
# Written by Ting Pan
# --------------------------------------------------------
import operators.initializer as init
import operators.vision as vision
import operators.loss as loss
import operators.data as data
import operators.activation as act
import operators.arithmetic as math
import operators.utils as utils
import operators.cast as cast
import operators.mpi as mpi
import operators.common as common
import operators.norm as norm
import operators.recurrent as recurrent
from .operators import initializer as init
from .operators import vision as vision
from .operators import loss as loss
from .operators import data as data
from .operators import activation as act
from .operators import arithmetic as math
from .operators import utils as utils
from .operators import cast as cast
from .operators import mpi as mpi
from .operators import common as common
from .operators import norm as norm
from .operators import recurrent as recurrent
# data
Imagenet = data.Imagenet
LMDBData = data.LMDBData
MemoryData = data.MemoryData
......
syntax = "proto2";
message TensorProto {
repeated int32 dims = 1;
enum DataType {
......
......@@ -4,6 +4,7 @@ from multiprocessing import Process
try:
from flask import Flask, render_template, make_response, jsonify, request
except ImportError as e: pass
from six.moves import range as xrange
class DragonBoard(Process):
def __init__(self, log_dir='', port=5000, max_display=1000):
......@@ -20,7 +21,8 @@ class DragonBoard(Process):
'port': port,
'max_display': max_display}
def cleanup():
print 'Terminating DragonBoard......'
from dragon.config import logger
logger.info('Terminating DragonBoard......')
self.terminate()
self.join()
import atexit
......
......@@ -6,6 +6,12 @@
import lmdb
import os
import sys
def wrapper_str(raw_str):
if sys.version_info >= (3, 0):
return raw_str.encode()
return raw_str
class LMDB(object):
def __init__(self, max_commit=10000):
......@@ -14,6 +20,7 @@ class LMDB(object):
self._total_size = 0
self._buffer = []
def open(self, database_path, mode='r'):
if mode == 'r':
assert os.path.exists(database_path), 'database path is not exist'
......@@ -25,13 +32,14 @@ class LMDB(object):
self.txn = self.env.begin(write=(mode == 'w'))
self.cursor = self.txn.cursor()
def _try_put(self):
for pair in self._buffer:
key, value = pair
try: self.txn.put(key, value)
except lmdb.MapFullError as e:
new_size = self.env.info()['map_size'] * 2
print 'doubling LMDB map size to %d MB' % (new_size >> 20)
print('doubling LMDB map size to %d MB' % (new_size >> 20))
self.txn.abort()
self.env.set_mapsize(new_size)
self.txn = self.env.begin(write=True)
......@@ -39,22 +47,27 @@ class LMDB(object):
self._cur_put = 0
self._buffer = []
def put(self, key, value):
self._buffer.append((key, value))
self._buffer.append((wrapper_str(key), wrapper_str(value)))
self._cur_put += 1
if (self._cur_put >= self._max_commit): self._try_put()
def commit(self):
self._try_put()
self.txn.commit()
self.txn = self.env.begin(write=True)
def set(self, key):
self.cursor.set_key(key)
self.cursor.set_key(wrapper_str(key))
def get(self, key):
cursor = self.txn.cursor()
return cursor.get(key)
return cursor.get(wrapper_str(key))
def next(self):
if not self.cursor.next():
......@@ -62,11 +75,14 @@ class LMDB(object):
if self.key() == 'size' or self.key() == 'zfill':
self.next()
def key(self):
return self.cursor.key()
def value(self):
return self.cursor.value()
def close(self):
self.env.close()
\ No newline at end of file
......@@ -6,8 +6,9 @@
import numpy as np
import pprint
import core.workspace as ws
from core.tensor import Tensor
from dragon.config import logger
import dragon.core.workspace as ws
from dragon.core.tensor import Tensor
class Updater(object):
def __init__(self,
......@@ -47,10 +48,10 @@ class Updater(object):
ws.FeedTensor(self._prefix + 'base_lr', np.array([lr], dtype=np.float32))
def echo(self):
print '---------------------------------------------------------'
print 'Optimizer: {}, Using config:'.format(self._type.split('Update')[0])
logger.info('---------------------------------------------------------')
logger.info('Optimizer: {}, Using config:'.format(self._type.split('Update')[0]))
pprint.pprint(self._hyper_params)
print '---------------------------------------------------------'
logger.info('---------------------------------------------------------')
class SGDUpdater(Updater):
......
......@@ -6,4 +6,4 @@
def GetProperty(kwargs, name, default):
return kwargs[name] \
if kwargs.has_key(name) else default
\ No newline at end of file
if name in kwargs else default
\ No newline at end of file
......@@ -6,8 +6,11 @@
import numpy as np
from multiprocessing import Process
from six.moves import range as xrange
from __init__ import GetProperty
from dragon.config import logger
from .__init__ import GetProperty
class BlobFetcher(Process):
def __init__(self, **kwargs):
......@@ -20,7 +23,7 @@ class BlobFetcher(Process):
self.daemon = True
def cleanup():
print 'Terminating BlobFetcher......'
logger.info('Terminating BlobFetcher......')
self.terminate()
self.join()
import atexit
......
......@@ -6,7 +6,7 @@
import dragon.vm.caffe as caffe
import dragon.core.workspace as ws
from minibatch import DataBatch
from .minibatch import DataBatch
class DataLayer(caffe.Layer):
def setup(self, bottom, top):
......
......@@ -9,9 +9,11 @@ import numpy.random as npr
from multiprocessing import Process
import dragon.config as config
from dragon.config import logger
from dragon.tools.db import LMDB
from __init__ import GetProperty
from .__init__ import GetProperty
class DataReader(Process):
def __init__(self, **kwargs):
......@@ -33,7 +35,7 @@ class DataReader(Process):
self.daemon = True
def cleanup():
print 'Terminating DataReader......'
logger.info('Terminating DataReader......')
self.terminate()
self.join()
import atexit
......@@ -46,8 +48,8 @@ class DataReader(Process):
if self._use_shuffle:
self._cur_chunk_idx = 0
self._perm = npr.permutation(self._num_shuffle_parts)
self._start_idx = self._part_idx * self._num_shuffle_parts + self._perm[self._cur_chunk_idx]
self._start_idx = self._start_idx * self._chunk_size
self._start_idx = int(self._part_idx * self._num_shuffle_parts + self._perm[self._cur_chunk_idx])
self._start_idx = int(self._start_idx * self._chunk_size)
if self._start_idx >= self._db_size: self.next_chunk()
self._end_idx = self._start_idx + self._chunk_size
self._end_idx = min(self._db_size, self._end_idx)
......@@ -91,7 +93,7 @@ class DataReader(Process):
self._db.open(self._source)
self._db_size = int(self._db.get('size'))
self._db_zfill = int(self._db.get('zfill'))
self._epoch_size = self._db_size / self._num_parts + 1
self._epoch_size = int(self._db_size / self._num_parts + 1)
# search a optimal chunk size by chunks
if self._chunk_size == -1:
max_chunk_size = self._db._total_size / ((self._num_chunks * (1 << 20)))
......@@ -100,7 +102,7 @@ class DataReader(Process):
self._chunk_size = min_chunk_size
self._num_shuffle_parts = int(math.ceil(self._db._total_size * 1.1 /
(self._num_parts * self._chunk_size << 20)))
self._chunk_size = self._db_size / self._num_shuffle_parts / self._num_parts + 1
self._chunk_size = int(self._db_size / self._num_shuffle_parts / self._num_parts + 1)
# init env
self.reset()
......
......@@ -9,9 +9,10 @@ import numpy.random as npr
from multiprocessing import Process
import dragon.config as config
from dragon.config import logger
import dragon.vm.caffe.proto.caffe_pb2 as pb
from __init__ import GetProperty
from .__init__ import GetProperty
try:
import cv2
......@@ -47,7 +48,7 @@ class DataTransformer(Process):
self.daemon = True
def cleanup():
print 'Terminating DataTransformer......'
logger.info('Terminating DataTransformer......')
self.terminate()
self.join()
import atexit
......
......@@ -7,13 +7,16 @@
import time
import pprint
from multiprocessing import Queue
from six.moves import range as xrange
import dragon.core.mpi as mpi
from dragon.config import logger
from data_reader import DataReader
from data_transformer import DataTransformer
from blob_fetcher import BlobFetcher
from __init__ import GetProperty
from .data_reader import DataReader
from .data_transformer import DataTransformer
from .blob_fetcher import BlobFetcher
from .__init__ import GetProperty
class DataBatch(object):
def __init__(self, **kwargs):
......@@ -106,11 +109,11 @@ class DataBatch(object):
return self.Q_level_3.get()
def echo(self):
print '---------------------------------------------------------'
print 'BatchReader, Using config:'
logger.info('---------------------------------------------------------')
logger.info('BatchReader, Using config:')
params = {'prefetching': self._prefetch,
'num_readers': self._num_readers,
'num_transformers': self._num_transformers,
'num_fetchers': self._num_fetchers}
pprint.pprint(params)
print '---------------------------------------------------------'
logger.info('---------------------------------------------------------')
......@@ -4,18 +4,18 @@
# Written by Ting Pan
# --------------------------------------------------------
from data import DataLayer, MemoryDataLayer
from .data import DataLayer, MemoryDataLayer
from vision import ConvolutionLayer, DeconvolutionLayer, PoolingLayer, \
from .vision import ConvolutionLayer, DeconvolutionLayer, PoolingLayer, \
LRNLayer, ROIPoolingLayer, ROIAlignLayer, NNResizeLayer
from neuron import ReLULayer, DropoutLayer, TanhLayer, PowerLayer
from loss import SoftmaxWithLossLayer, SigmoidCrossEntropyLossLayer, \
from .neuron import ReLULayer, DropoutLayer, TanhLayer, PowerLayer
from .loss import SoftmaxWithLossLayer, SigmoidCrossEntropyLossLayer, \
L2LossLayer, SmoothL1LossLayer
from mpi import MPIBroadcastLayer, MPIGatherLayer
from .mpi import MPIBroadcastLayer, MPIGatherLayer
from common import InnerProductLayer, AccuracyLayer, BatchNormLayer, \
from .common import InnerProductLayer, AccuracyLayer, BatchNormLayer, \
BatchRenormLayer, BNLayer, ConcatLayer, \
CropLayer, PythonLayer, AddLayer, \
ReshapeLayer, EltwiseLayer, ScaleLayer, \
......
......@@ -5,9 +5,10 @@
# --------------------------------------------------------
from dragon.core.tensor import Tensor
from layer import Layer
import dragon.ops as ops
from .layer import Layer
class InnerProductLayer(Layer):
def __init__(self, LayerParameter):
super(InnerProductLayer, self).__init__(LayerParameter)
......
......@@ -4,9 +4,10 @@
# Written by Ting Pan
# --------------------------------------------------------
from layer import Layer
import dragon.ops as ops
from .layer import Layer
class DataLayer(Layer):
def __init__(self, LayerParameter):
super(DataLayer, self).__init__(LayerParameter)
......
......@@ -4,9 +4,10 @@
# Written by Ting Pan
# --------------------------------------------------------
from layer import Layer
import dragon.ops as ops
from .layer import Layer
class SoftmaxWithLossLayer(Layer):
def __init__(self, LayerParameter):
super(SoftmaxWithLossLayer, self).__init__(LayerParameter)
......
......@@ -4,9 +4,10 @@
# Written by Ting Pan
# --------------------------------------------------------
from layer import Layer
from dragon.ops import MPIBroadcast, MPIGather
from .layer import Layer
class MPIBroadcastLayer(Layer):
def __init__(self, LayerParameter):
super(MPIBroadcastLayer, self).__init__(LayerParameter)
......
......@@ -4,9 +4,10 @@
# Written by Ting Pan
# --------------------------------------------------------
from layer import Layer
import dragon.ops as ops
from .layer import Layer
class ReLULayer(Layer):
def __init__(self, LayerParameter):
super(ReLULayer, self).__init__(LayerParameter)
......
......@@ -4,11 +4,11 @@
# Written by Ting Pan
# --------------------------------------------------------
import dragon
from dragon.core.tensor import Tensor
from layer import Layer
import dragon.ops as ops
from .layer import Layer
class ConvolutionLayer(Layer):
def __init__(self, LayerParameter):
super(ConvolutionLayer, self).__init__(LayerParameter)
......
......@@ -5,11 +5,13 @@
# --------------------------------------------------------
import os
from six.moves import range as xrange
from dragon.vm.caffe import layers as L
from dragon.vm.caffe import params as P
from dragon.vm.caffe.proto import caffe_pb2
def check_if_exist(path):
return os.path.exists(path)
......
......@@ -6,12 +6,14 @@
from collections import OrderedDict
from google.protobuf.text_format import Parse
import dragon.core.workspace as ws
from dragon.core.tensor import Tensor
import dragon.vm.theano as theano
import dragon.vm.theano.tensor as T
import proto.caffe_pb2 as pb
import layers as Layer
from .proto import caffe_pb2 as pb
from . import layers
class Blob(object):
def __init__(self, tuple):
......@@ -36,7 +38,7 @@ class Net(object):
if len(self._net.input) > 0:
for input in self._net.input:
if not self._blobs.has_key(input):
if not input in self._blobs:
# create new tensors
self._blobs[input] = {'data':Tensor(input).Variable(),
'diff': Tensor(input + '_grad')}
......@@ -44,7 +46,7 @@ class Net(object):
for layer in self._net.layer:
if not self.FilterNet(layer): continue
self._layers.append(getattr(Layer, layer.type + 'Layer')(layer))
self._layers.append(getattr(layers, layer.type + 'Layer')(layer))
self.Setup()
......@@ -134,8 +136,8 @@ class Net(object):
raise TypeError('only type of Net can be shared.')
other_params = other_net.params
for name, blobs in self.params.iteritems():
if other_params.has_key(name):
for name, blobs in self.params.items():
if name in other_params:
for idx, blob in enumerate(blobs):
self._swap_blobs[blob.data] = other_params[name][idx].data
......@@ -151,7 +153,7 @@ class Net(object):
ret[output] = ws.FetchTensor(net.blobs[output].data)
return ret
if kwargs:
for name, blob in kwargs.iteritems():
for name, blob in kwargs.items():
ws.FeedTensor(self._inputs_to_tensors[name], blob)
self.function(return_outputs=False, stage='forward')
......@@ -167,7 +169,7 @@ class Net(object):
if not hasattr(self, '_function'): func = self.function
tensors = []
for layer in self._net.layer:
if self.params.has_key(layer.name):
if layer.name in self.params:
for param in self.params[layer.name]:
tensors.append(param.data)
......@@ -182,7 +184,7 @@ class Net(object):
def blobs(self):
""" simply follow the pycaffe style """
return OrderedDict([(name,Blob((blob['data'], blob['diff'])))
for name, blob in self._blobs.iteritems()])
for name, blob in self._blobs.items()])
@property
def params(self):
""" simply follow the pycaffe style """
......@@ -221,6 +223,6 @@ class Net(object):
class PartialNet(Net):
def __init__(self, *args, **kwargs):
self._blobs = {}
for input, tensor in kwargs.iteritems():
for input, tensor in kwargs.items():
self._blobs[input] = {'data': tensor, 'diff': None}
super(PartialNet, self).__init__(*args)
......@@ -5,7 +5,6 @@
# --------------------------------------------------------
from collections import OrderedDict, Counter
from .proto import caffe_pb2
import six
......
......@@ -5,17 +5,20 @@
# --------------------------------------------------------
import time
from six.moves import range as xrange
import dragon.core.mpi as mpi
import dragon.updaters as updaters
import dragon.tools.summary_writer as sw
import proto.caffe_pb2 as pb
from dragon.vm.caffe.proto import caffe_pb2 as pb
from dragon.core.workspace import FetchTensor, Snapshot
from dragon.config import logger
from dragon.vm.caffe.common import root_solver
from dragon.vm.caffe.net import Net
from dragon.vm.theano import function
from google.protobuf.text_format import Parse
class Solver(object):
def __init__(self, prototxt):
self._param = pb.SolverParameter()
......@@ -102,13 +105,13 @@ class Solver(object):
if root_solver() and self._param.display:
if self._iter % self._param.display == 0:
base_lr = self._updater.lr
print 'Iteration %d, lr = %s, loss = %f, time = %.2fs' % \
(self._iter, str(base_lr), smoothed_loss, time.time() - tic)
logger.info('Iteration %d, lr = %s, loss = %f, time = %.2fs' % \
(self._iter, str(base_lr), smoothed_loss, time.time() - tic))
tic = time.time()
for idx, net_output in enumerate(self._net._net_outputs):
vals = FetchTensor(self._net.blobs[net_output].data)
for val in vals:
print ' Train net output #{}({}): {}'.format(idx, net_output, val)
logger.info(' Train net output #{}({}): {}'.format(idx, net_output, val))
self.scalar_writer.add_summary((net_output, val), self._iter)
self._iter = self._iter + 1
......@@ -137,9 +140,9 @@ class Solver(object):
for idx, val in enumerate(vals):
test_score[i] += val; i = i + 1
if not root_solver(): return
print 'Iteration {}, Test net #{}'.format(self._iter, test_idx)
logger.info('Iteration {}, Test net #{}'.format(self._iter, test_idx))
for idx, score in enumerate(test_score):
print ' Test net output #%d(%s): %.4f' % (idx, output_id[idx], score / test_iter)
logger.info(' Test net output #%d(%s): %.4f' % (idx, output_id[idx], score / test_iter))
self.scalar_writer.add_summary((output_id[idx], score / test_iter), self._iter)
......@@ -164,8 +167,8 @@ class Solver(object):
if self._current_step < len(self._param.stepvalue) \
and self._iter >= self._param.stepvalue[self._current_step]:
self._current_step = self._current_step + 1
print 'MultiStep Status: Iteration {}, step = {}' \
.format(self._iter, self._current_step)
logger.info('MultiStep Status: Iteration {}, step = {}' \
.format(self._iter, self._current_step))
new_lr = self._param.base_lr * \
pow(self._param.gamma, self._current_step)
self._updater.lr = new_lr
......@@ -178,8 +181,8 @@ class Solver(object):
else:
if self._current_step + 1 < len(stage_iters):
self._current_step = self._current_step + 1
print 'MultiFixed Status: Iteration {}, stage = {}' \
.format(self._iter, self._current_step)
logger.info('MultiFixed Status: Iteration {}, stage = {}' \
.format(self._iter, self._current_step))
self._updater.lr = stage_lrs[self._current_step]
if policy == 'inv':
......@@ -223,7 +226,7 @@ class SGDSolver(Solver):
self._updater = updaters.SGDUpdater(**self._update_param)
# generates update targets
for layer, blobs in self._net.params.iteritems(): self._lr_blobs.extend(blobs)
for layer, blobs in self._net.params.items(): self._lr_blobs.extend(blobs)
for idx, blob in enumerate(self._lr_blobs):
if self._net._lr_mults[idx] > 0:
if blob.diff is None: continue
......@@ -248,7 +251,7 @@ class NesterovSolver(Solver):
self._updater = updaters.NesterovUpdater(**self._update_param)
# generates update targets
for layer, blobs in self._net.params.iteritems(): self._lr_blobs.extend(blobs)
for layer, blobs in self._net.params.items(): self._lr_blobs.extend(blobs)
for idx, blob in enumerate(self._lr_blobs):
if self._net._lr_mults[idx] > 0:
if blob.diff is None: continue
......@@ -273,7 +276,7 @@ class RMSPropSolver(Solver):
self._updater = updaters.RMSPropUpdater(**self._update_param)
# generates update targets
for layer, blobs in self._net.params.iteritems(): self._lr_blobs.extend(blobs)
for layer, blobs in self._net.params.items(): self._lr_blobs.extend(blobs)
for idx, blob in enumerate(self._lr_blobs):
if self._net._lr_mults[idx] > 0:
if blob.diff is None: continue
......@@ -297,7 +300,7 @@ class AdamSolver(Solver):
self._updater = updaters.AdamUpdater(**self._update_param)
# generates update targets
for layer, blobs in self._net.params.iteritems(): self._lr_blobs.extend(blobs)
for layer, blobs in self._net.params.items(): self._lr_blobs.extend(blobs)
for idx, blob in enumerate(self._lr_blobs):
if self._net._lr_mults[idx] > 0:
if blob.diff is None: continue
......
......@@ -6,13 +6,14 @@
from dragon.core.scope import TensorScope as variable_scope
from dragon.core.scope import TensorScope as name_scope
from core.session import *
from core.variables import *
from core.collection import *
from core.device import *
import contrib
import ops.nn as nn
from ops import *
from training import train
from utils.gradients import *
from .core.session import *
from .core.variables import *
from .core.collection import *
from .core.device import *
from . import contrib
from .ops import nn
from .ops import *
from .training import train
from .utils.gradients import *
......@@ -4,4 +4,4 @@
# Written by Ting Pan
# --------------------------------------------------------
from layers import *
\ No newline at end of file
from .layers import *
\ No newline at end of file
......@@ -4,4 +4,4 @@
# Written by Ting Pan
# --------------------------------------------------------
from layers import *
\ No newline at end of file
from .layers import *
\ No newline at end of file
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import numpy as np
__all__ = ['int32', 'int64', 'float32', 'bool']
int32 = np.int32
int64 = np.int64
float32 = np.float32
bool = np.bool
\ No newline at end of file
......@@ -20,7 +20,7 @@ TRAINABLE_VARIABLES = {}
def initialize_all_variables():
outputs = []
for tensor, initializer in VARIABLES.iteritems():
for tensor, initializer in VARIABLES.items():
outputs.append(initializer)
return outputs
......
......@@ -4,10 +4,9 @@
# Written by Ting Pan
# --------------------------------------------------------
from dtypes import float32, bool
from array_ops import *
from init_ops import *
from random_ops import *
from constant_op import *
from math_ops import *
from control_flow_ops import *
\ No newline at end of file
from .array_ops import *
from .constant_op import *
from .control_flow_ops import *
from .init_ops import *
from .math_ops import *
from .random_ops import *
\ No newline at end of file
......@@ -15,8 +15,8 @@ __all__ = [
'reshape'
]
import dtypes
import dragon.ops as ops
from ..core import dtypes
def expand_dims(input, axis=None, name=None, dim=None):
......
......@@ -7,9 +7,10 @@
__all__ = ['constant']
import numpy as np
import dragon.core.workspace as ws
from dragon.core.tensor import Tensor
import dtypes
from ..core import dtypes
def constant(value, dtype=None, shape=None, name=None):
......
......@@ -16,8 +16,8 @@ __all__ = [
'glorot_normal_initializer',
]
import dtypes
import dragon.ops as ops
from ..core import dtypes
class Initializer(object):
......
......@@ -25,6 +25,8 @@ __all__ = [
'add_n'
]
from six.moves import range as xrange
import dragon.ops as ops
......
......@@ -4,4 +4,4 @@
# Written by Ting Pan
# --------------------------------------------------------
from nn_ops import *
from .nn_ops import *
......@@ -11,7 +11,8 @@ __all__ = [
]
import dragon.ops as ops
import dtypes
from ..core import dtypes
def random_normal(shape,
mean=0.0,
......
......@@ -4,5 +4,5 @@
# Written by Ting Pan
# --------------------------------------------------------
from optimizer import *
from saver import *
\ No newline at end of file
from .optimizer import *
from .saver import *
\ No newline at end of file
......@@ -4,11 +4,14 @@
# Written by Ting Pan
# --------------------------------------------------------
from six.moves import range as xrange
from dragon.vm.tensorflow.core.variables import placeholder
def feed_check(feed_dict):
if feed_dict is not None:
for key, value in feed_dict.iteritems():
for key, value in feed_dict.items():
if type(key) != placeholder:
raise TypeError('only a placeholder can be feeded.')
if key.shape is not None:
......
......@@ -4,9 +4,11 @@
# Written by Ting Pan
# --------------------------------------------------------
from core.swap import shared
from core.function import function
from core.scan import scan
import config
import numpy as np
from .core.swap import shared
from .core.function import function
from .core.scan import scan
floatX = np.float32
......@@ -5,14 +5,18 @@
# --------------------------------------------------------
import copy
from collections import OrderedDict
import numpy as np
import dragon.core.workspace as ws
import sys
import dragon.core.mpi as mpi
import dragon.core.workspace as ws
import dragon.protos.dragon_pb2 as pb
from dragon.core.tensor import Tensor
from dragon.core.utils import MakeArgument
from dragon.core.gradient_maker import GraphGradientMaker
from dragon.core.scope import GetOperatorName, GetTensorName
from dragon.utils import MakeArgument
from dragon.core.tensor import Tensor
def GraphDef_Grad(graph_def, targets):
""" generate all graident targets for CC Graph """
......@@ -48,7 +52,7 @@ def GraphDef_Update(graph_def, updater):
extra_kwargs['domain'] = updater._prefix
# wrap hyper-parameters as Tensor for CC
for k,v in updater._hyper_params.iteritems():
for k,v in updater._hyper_params.items():
ws.FeedTensor(updater._prefix + k, np.array([v], dtype=np.float32))
# check data parallel if necessary
......@@ -69,7 +73,7 @@ def GraphDef_Update(graph_def, updater):
_, u_target.name = GetOperatorName()
for tensor in tensors:
u_target.tensor.append(tensor)
for k,v in kwargs.iteritems():
for k,v in kwargs.items():
u_target.arg.add().CopyFrom(MakeArgument(k, v))
graph_def.u_target.extend([u_target])
......@@ -109,12 +113,16 @@ def function(inputs=[], outputs=[], swaps=None, updater=None):
existing_grads = False
for output in outputs:
graph_def.target.extend([output.name])
all_exprs = dict(all_exprs, **output.expressions)
if sys.version_info >= (3, 0):
all_exprs = OrderedDict(all_exprs, **output.expressions)
else:
all_exprs = dict(all_exprs, **output.expressions)
all_extra_targets = all_extra_targets.union(output.extra_targets)
if len(output.grad_wrts) > 0: existing_grads = True
for extra_target in all_extra_targets: graph_def.target.extend([extra_target])
# we should sort out the topology of these operators before using
all_exprs = sorted(all_exprs.iteritems(), key=lambda d:d[0])
all_exprs = sorted(all_exprs.items(), key=lambda d:d[0])
forward_ops = copy.deepcopy([v for k,v in all_exprs])
# handle swap
......@@ -122,16 +130,19 @@ def function(inputs=[], outputs=[], swaps=None, updater=None):
name_dict = {}
external_input_exprs = {}
for old_tenosr, new_tensor in swaps.iteritems():
for old_tenosr, new_tensor in swaps.items():
if isinstance(new_tensor, Tensor):
name_dict[old_tenosr.name] = new_tensor._name
external_input_exprs = dict(external_input_exprs, **new_tensor.expressions)
if sys.version_info >= (3, 0):
external_input_exprs = OrderedDict(external_input_exprs, **new_tensor.expressions)
else:
external_input_exprs = dict(external_input_exprs, **new_tensor.expressions)
elif isinstance(new_tensor, np.ndarray): ws.FeedTensor(new_tensor, GetTensorName())
external_input_ops = [v for k,v in external_input_exprs.iteritems()]
external_input_ops = [v for k,v in external_input_exprs.items()]
for op in forward_ops:
op.input.extend([name_dict[input] if name_dict.has_key(input)
else input for input in op.input])
del op.input[:len(op.input)/2]
op.input.extend([name_dict[input] if input in name_dict
else input for input in op.input])
del op.input[:int(len(op.input)/2)]
forward_ops = external_input_ops + forward_ops
......
......@@ -41,7 +41,7 @@ def scan(fn, sequences, outputs_info, n_steps=None, axis=0):
for output in outputs:
graph_def.target.extend([output._name])
all_exprs = dict(all_exprs, **output.expressions)
all_exprs = sorted(all_exprs.iteritems(), key=lambda d:d[0])
all_exprs = sorted(all_exprs.items(), key=lambda d:d[0])
forward_ops = copy.deepcopy([v for k,v in all_exprs])
graph_def.op.extend(forward_ops)
......
......@@ -5,8 +5,9 @@
# --------------------------------------------------------
from dragon.core.tensor import Tensor
import nnet
import ops
from . import nnet
from . import ops
def matrix(name=None):
if name is None: return Tensor().Variable()
......
......@@ -4,6 +4,4 @@
# Written by Ting Pan
# --------------------------------------------------------
import ops
sigmoid = ops.sigmoid
\ No newline at end of file
from .ops import sigmoid
\ No newline at end of file
......@@ -397,4 +397,4 @@ void ProposalOp<Context>::RunWithType() {
template void ProposalOp<CUDAContext>::RunWithType<float>();
}
}
\ No newline at end of file
syntax = "proto2";
message TensorProto {
repeated int32 dims = 1;
enum DataType {
......
......@@ -3,7 +3,7 @@
### Compile Requirements for C++
0. Google Protocol Buffer
1. Python (2.7, 64bit) &nbsp; | &nbsp; Anaconda (2.7, 64bit)
1. Python (2 or 3, 64bit) &nbsp; | &nbsp; Anaconda (2 or 3, 64bit)
2. CUDA [Optional]
3. CUDNN [Optional]
4. OpenMPI [Optional]
......@@ -24,16 +24,17 @@
3. (Optional) Download 3rdparty.zip and unzip to Dragon/3rdparty (Out of source code dir)
[*Win64*](https://pan.baidu.com/s/1pLmGOLt) (OpenBLAS / Protobuf for VS2013 / CUDNN v6 / Microsoft MPI)
[*Win64*](https://pan.baidu.com/s/1pLmGOLt) (OpenBLAS / Protobuf2.6 for VS2013 / CUDNN v6 / Microsoft MPI)
[*Linux64*](https://pan.baidu.com/s/1qXPEOWG) (OpenMPI)
4. Configure Dragon/CMakeLists.txt
- Select optional libraries [CUDA / CUDNN / BLAS / SSE / MPI / MPI_CUDA_AWARE / CUDA_FP16]
- Select optional libraries [PYTHON3 / CUDA / CUDNN / BLAS / SSE / MPI / MPI_CUDA_AWARE / CUDA_FP16]
- Set 3rdparty path (recommend to keep defualt)
- Set python & numpy root path
- Set cuda compiling architectures if necessary
- GCC version(4.8+, 5.0-) should add ``-std=c++11`` to ``CUDA_NVCC_FLAGS``, if ``nullptr`` is not found.
- Set Python include path & Numpy root path
- Set CUDA compiling architectures if necessary
- GCC version(4.8+, 5.0-) should add ``-std=c++11`` to ``CUDA_NVCC_FLAGS``, if ``nullptr`` is not found
- We generate *.h and *.cc files under the ``Dragon/src/protos`` with protobuf2.6, run protoc by yourself if higher are required
5. Environment Variables
### Linux(Only for OpenMPI):
......
......@@ -11,6 +11,7 @@ import sys
import time
import shutil
import tarfile
from six.moves import range as xrange
import cv2
......@@ -23,6 +24,11 @@ def untar(tar_file):
t = tarfile.open(tar_file)
t.extractall(path='data')
def wrapper_str(raw_str):
if sys.version_info >= (3, 0):
return raw_str.encode()
return raw_str
def extract_images():
prefix = 'data/cifar-10-batches-py'
extract_path = 'data/extract'
......@@ -48,10 +54,9 @@ def extract_images():
import cPickle
with open(batch, 'rb') as f:
dict = cPickle.load(f)
for item_idx in xrange(len(dict['labels'])):
im = dict['data'][item_idx].reshape((3, 32, 32))
label = dict['labels'][item_idx]
for item_idx in xrange(len(dict[wrapper_str('labels')])):
im = dict[wrapper_str('data')][item_idx].reshape((3, 32, 32))
label = dict[wrapper_str('labels')][item_idx]
im = im.transpose((1, 2, 0))
im = im[:, :, ::-1]
filename = str(total_idx).zfill(ZFILL) + '.jpg'
......@@ -79,7 +84,7 @@ def make_db(image_path, label_path, database_path):
if os.path.isdir(database_path) is True:
raise ValueError('the database path is already exist.')
print 'start time: ', time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime())
print('start time: ', time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime()))
db = LMDB(max_commit=10000)
db.open(database_path, mode='w')
......@@ -97,8 +102,8 @@ def make_db(image_path, label_path, database_path):
count += 1
if count % 10000 == 0:
now_time = time.time()
print '{0} / {1} in {2:.2f} sec'.format(
count, total_line, now_time - start_time)
print('{0} / {1} in {2:.2f} sec'.format(
count, total_line, now_time - start_time))
db.commit()
record = record.split()
......@@ -116,26 +121,26 @@ def make_db(image_path, label_path, database_path):
db.put(zfill_flag.format(count - 1), datum.SerializeToString())
now_time = time.time()
print '{0} / {1} in {2:.2f} sec'.format(count, total_line, now_time - start_time)
db.put('size', str(count))
db.put('zfill', str(ZFILL))
print('{0} / {1} in {2:.2f} sec'.format(count, total_line, now_time - start_time))
db.put('size', wrapper_str(str(count)))
db.put('zfill', wrapper_str(str(ZFILL)))
db.commit()
db.close()
shutil.copy(label_path, database_path + '/image_list.txt')
end_time = time.time()
print '{0} images have been stored in the database.'.format(total_line)
print 'This task finishes within {0:.2f} seconds.'.format(
end_time - start_time)
print 'The size of database is {0} MB.'.format(
float(os.path.getsize(database_path + '/data.mdb') / 1000 / 1000))
print('{0} images have been stored in the database.'.format(total_line))
print('This task finishes within {0:.2f} seconds.'.format(
end_time - start_time))
print('The size of database is {0} MB.'.format(
float(os.path.getsize(database_path + '/data.mdb') / 1000 / 1000)))
if __name__ == '__main__':
untar('data/cifar-10-python.tar.gz')
#untar('data/cifar-10-python.tar.gz')
extract_images()
#extract_images()
make_db('data/extract/JPEGImages',
'data/extract/ImageSets/train.txt',
......
......@@ -14,7 +14,7 @@ classes = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
# init
caffe.set_mode_gpu()
#caffe.set_mode_gpu()
# load net
net = caffe.Net("cifar10_quick_deploy.prototxt",
'snapshots/cifar10_quick_iter_5000.caffemodel', caffe.TEST)
......@@ -39,9 +39,9 @@ def run(filename):
pred = score.argmax(0)
# show
print classes[pred]
print(classes[pred])
if __name__ == '__main__':
run('data/demo/cat.jpg')
run('data/demo/cat.jpg')
\ No newline at end of file
......@@ -15,5 +15,5 @@ if __name__ == '__main__':
# solve
solver = caffe.SGDSolver('cifar10_full_solver.prototxt')
solver.step(5000)
solver.snapshot()
\ No newline at end of file
solver.step(70000)
solver.snapshot()
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!