Commit 40e94d24 by Ting PAN

Export Workspace for PyModule

1 parent b35f9320
Showing with 1168 additions and 1496 deletions
------------------------------------------------------------------------
The list of most significant changes made over time in Dragon.
Dragon 0.3.0.0 (20190309)
Dragon 0.3.0.0 (20190402)
DRAGON_VERSION == 3000
Changes (w.r.t. Dragon 0.2.2.13):
......@@ -36,6 +36,8 @@ Preview Features:
- The behavior of accumulating gradients have been canceled.
- Python module now has been assigned to take charge of ``Workspace``.
Bugs fixed:
......
......@@ -22,17 +22,15 @@ class GraphBase {
public:
/*! \brief Default constructor */
GraphBase(
const GraphDef& meta_graph,
const GraphDef& def,
Workspace* ws);
/*! \brief Default deconstructor */
virtual ~GraphBase() {}
GraphDef BuildUpdateOps(const GraphDef& input_def);
/*! \brief Create a graph from the optimized def */
virtual bool Create(
const GraphDef& optimized_graph,
const GraphDef& def,
Workspace* ws) = 0;
/*! \brief Run the graph once synchronously */
......@@ -58,14 +56,14 @@ class GraphBase {
class Graph : public GraphBase {
public:
/*! \brief Default constructor */
Graph(const GraphDef& meta_graph, Workspace* ws);
Graph(const GraphDef& def, Workspace* ws);
/*! \brief Default deconstructor */
virtual ~Graph() { for (auto* op : ops_) delete op; }
/*! \brief Create a graph from the optimized def */
bool Create(
const GraphDef& optimized_graph,
const GraphDef& def,
Workspace* ws) override;
/*! \brief Run the graph once synchronously */
......
......@@ -31,7 +31,7 @@ class GraphGradientMaker {
const GraphDef& forward_def,
GraphDef& backward_def);
void Share(GraphDef& graph);
GraphDef Share(const GraphDef& input_def);
void SetTerms(const Map<string, string>& terms) { terms_ = terms; }
void SetOperatorPrefix(const string& prefix) { op_prefix_ = prefix; }
......
......@@ -42,9 +42,9 @@ class Tensor {
d = dims[i]; strides_[i] = (int64_t)new_size;
CHECK_GE(d, 0);
if (d > 0) new_size *= d;
} if (own_mem_) {
if (size_ != new_size &&
capacity_ < new_size * meta_.itemsize()) {
}
if (own_mem_) {
if (capacity_ < new_size * meta_.itemsize()) {
memory_.reset();
capacity_ = 0;
}
......
......@@ -29,23 +29,28 @@ class Workspace {
typedef Map<string, unique_ptr<OperatorBase> > OperatorMap;
typedef Map<string, unique_ptr<GraphBase> > GraphMap;
typedef Map<string, Workspace*> WorkspaceMap;
/*! \brief Constructor */
Workspace(const string& name) : name_(name) { InitWorkspace(); }
Workspace(const string& name) : name_(name) { Initialize(); }
/*! \brief Return the name of this workspace */
const string& name() { return name_; }
/*! \brief Create some internal tensors */
void InitWorkspace();
/*! \brief Return the name of stored tensors */
vector<string> tensors() const;
/*! \brief Return the name of stored graphs */
vector<string> graphs() const;
/*! \brief Move a external workspace into this workspace */
Workspace* Move(Workspace* ws);
/*! \brief Create some internal tensors */
void Initialize();
/*! \brief Destory all the tensors */
void Clear();
/*! \brief Merge from a external workspace */
void MergeFrom(Workspace* ws);
/*! \brief Query the real name of specified tensor */
string GetTensorName(const string& name) const;
......@@ -66,14 +71,11 @@ class Workspace {
/*! \brief Reset the specified tensor */
void ResetTensor(const string& name);
/*! \brief Return all the stored tensor names */
vector<string> GetTensors() const;
/* \brief Whether the specified filler is in this workspace */
bool HasFiller(const string& name, bool use_remote = true) const;
/*! \brief Create the specified filler */
void CreateFiller(const TensorFillerProto filler);
void CreateFiller(const TensorFillerProto& filler);
/*! \brief Return the specified filler */
const TensorFillerProto* GetFiller(const string& name) const;
......@@ -82,27 +84,26 @@ class Workspace {
template <class Context>
vector<void*> caches(const vector<size_t>& segments) {
int64_t nbytes = 0;
vector<void*> ret(segments.size());
for (auto& segment : segments) nbytes += (int64_t)segment;
Tensor* cache_t = CreateTensor("/share/cache");
cache_t->Reshape({ nbytes });
vector<void*> Bcaches(segments.size());
Bcaches[0] = cache_t->template mutable_data<uint8_t, Context>();
auto* T = CreateTensor("/share/cache")->Reshape({ nbytes });
ret[0] = T->template mutable_data<uint8_t, Context>();
for (int i = 1; i < segments.size(); i++)
Bcaches[i] = (uint8_t*)Bcaches[i - 1] + segments[i - 1];
return Bcaches;
ret[i] = (uint8_t*)ret[i - 1] + segments[i - 1];
return ret;
}
/*! \brief Create temporal cache segments with the specified type */
template <typename T, class Context>
vector<T*> caches(const vector<int64_t>& segments) {
vector<size_t> Tsegments;
for (auto& segment : segments)
Tsegments.emplace_back(segment * sizeof(T));
vector<void*> Bcaches = caches<Context>(Tsegments);
vector<T*> Tcaches(segments.size());
vector<size_t> segments_in_byte;
vector<T*> ret(segments.size());
for (const auto& e : segments)
segments_in_byte.emplace_back(e * sizeof(T));
auto ret_in_byte = caches<Context>(segments_in_byte);
for (int i = 0; i < segments.size(); i++)
Tcaches[i] = (T*)Bcaches[i];
return Tcaches;
ret[i] = (T*)ret_in_byte[i];
return ret;
}
/*! \brief Create a operator in this workspace */
......@@ -124,9 +125,6 @@ class Workspace {
const string& exclude,
int stream_id = 0);
/*! \brief Return all the stored graph names */
vector<string> GetGraphs() const;
/* \brief Set an alias for the tensor */
bool SetTensorAlias(const string& name, const string& alias);
......@@ -160,7 +158,7 @@ class Workspace {
GraphMap graph_map_;
/*! \brief Store the remote workspaces */
WorkspaceMap workspace_map_;
vector<Workspace*> remote_workspaces_;
};
} // namespace dragon
......
......@@ -40,8 +40,11 @@ class GradientGatherOp final : public Operator<Context> {
public:
GradientGatherOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws) {
for (int i = 0; i < InputSize(); i++)
if (Input(i).name() != "NULL") indices.push_back(i);
for (int i = 0; i < InputSize(); i++) {
if (Input(i).name() != "NULL") {
indices.push_back(i);
}
}
}
USE_OPERATOR_FUNCTIONS;
......@@ -53,6 +56,16 @@ class GradientGatherOp final : public Operator<Context> {
};
template <class Context>
class GradientAddOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(GradientAddOp);
USE_OPERATOR_FUNCTIONS;
void RunOnDevice() override;
template <typename T> void RunWithType();
};
template <class Context>
class StopGradientOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(StopGradientOp);
......
......@@ -1033,7 +1033,6 @@ void MixedPrecisionUpdate(
template <typename T, class Context>
void BiasAdd(
const int count,
const int outer_dim,
const int dim,
const int inner_dim,
......
......@@ -38,7 +38,7 @@ Workspace* ResetWorkspace(const std::string& name) {
g_workspaces[name].reset(new Workspace(name));
for (auto& sub_workspace : sub_workspaces[name]) {
if (g_workspaces.count(sub_workspace) > 0)
g_workspaces[name]->Move(
g_workspaces[name]->MergeFrom(
g_workspaces[sub_workspace].get());
}
return g_workspaces[name].get();
......@@ -55,7 +55,7 @@ void MoveWorkspace(
std::unique_lock<std::mutex> lock(g_mutex);
CHECK(src) << "\nGiven source workspace is invalid.";
CHECK(dst) << "\nGiven destination workspace is invalid.";
dst->Move(src);
dst->MergeFrom(src);
sub_workspaces[dst->name()].push_back(src->name());
LOG(INFO) << "Move the Workspace(" << src->name() << ") "
<< "into the Workspace(" << dst->name() << ").";
......
......@@ -36,29 +36,6 @@ void AddGradientMethods(pybind11::module& m) {
vector<pybind11::bytes>, vector<string>, vector<float>
>(grad_ops, grad.g_inputs, grad.defaults);
});
m.def("FlowGradients", [](
const vector<OperatorDef*>& forward_ops,
const vector<string>& targets,
const vector<string>& input_grads,
const vector<string>& ignore_grads,
const bool is_sharing,
const bool verbose) {
// Make => Optimize => Run
GraphDef backward_ops;
GraphGradientMaker maker;
for (auto& grad : input_grads) maker.AddExternalGrad(grad);
for (auto& grad : ignore_grads) maker.AddIgnoreGrad(grad);
maker.Make(forward_ops, targets, backward_ops);
if (is_sharing) maker.Share(backward_ops);
pybind11::gil_scoped_release g;
for (auto& op : backward_ops.op()) {
if (op.type().empty()) continue;
if (verbose) std::cout << op.DebugString() << std::endl;
if (op.has_uid()) ws()->RunOperator(op);
else ws()->RunOperatorOnce(op);
}
});
}
} // namespace python
......
......@@ -16,15 +16,17 @@
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include "py_types.h"
#include "core/common.h"
#include "core/registry.h"
#include "core/context.h"
#include "core/context_cuda.h"
#include "core/operator.h"
#include "core/operator_gradient.h"
#include "core/graph_gradient.h"
#include "core/registry.h"
#include "core/workspace.h"
#include "core/context_cuda.h"
#include "core/graph_gradient.h"
#include "core/operator_gradient.h"
#include "utils/caffemodel.h"
#include "onnx/onnx_backend.h"
#include <pybind11/stl.h>
#include <pybind11/pybind11.h>
......@@ -136,8 +138,6 @@ class NumpyFeeder : public TensorFeederBase {
}
};
Workspace* ws();
} // namespace python
} // namespace dragon
......
/*!
* Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
*
* Licensed under the BSD 2-Clause License.
* You should have received a copy of the BSD 2-Clause License
* along with the software. If not, See,
*
* <https://opensource.org/licenses/BSD-2-Clause>
*
* ------------------------------------------------------------
*/
#ifndef DRAGON_PYTHON_PY_GRAPH_H_
#define DRAGON_PYTHON_PY_GRAPH_H_
#include "py_dragon.h"
namespace dragon {
namespace python {
void AddGraphMethods(pybind11::module& m) {
/*! \brief Create a graph from the serialized def */
m.def("CreateGraph", [](
const string& serialized,
const bool verbose) {
GraphDef graph_def;
if (!graph_def.ParseFromString(serialized))
LOG(FATAL) << "Failed to parse the GraphDef.";
auto* graph = ws()->CreateGraph(graph_def);
if (verbose) {
// It is not a good design to print the debug string
auto* graph_tensor = ws()->CreateTensor(
"/graph_def/optimized/" + graph->name());
if (graph_tensor->count() > 0) {
auto* data = graph_tensor->mutable_data<string, CPUContext>();
std::cout << data[0] << std::endl;
}
}
// Return the graph name may be different from the def
// We will make a unique dummy name on creating the graph
return graph->name();
});
/*! \brief Run an existing graph */
m.def("RunGraph", [](
const string& name,
const string& include,
const string& exclude) {
pybind11::gil_scoped_release g;
ws()->RunGraph(name, include, exclude);
});
/*! \brief List all of the existing graphs */
m.def("Graphs", []() { ws()->GetGraphs(); });
}
} // namespace python
} // namespace dragon
#endif // DRAGON_PYTHON_PY_GRAPH_H_
\ No newline at end of file
/*!
* Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
*
* Licensed under the BSD 2-Clause License.
* You should have received a copy of the BSD 2-Clause License
* along with the software. If not, See,
*
* <https://opensource.org/licenses/BSD-2-Clause>
*
* ------------------------------------------------------------
*/
#ifndef DRAGON_PYTHON_PY_IO_H_
#define DRAGON_PYTHON_PY_IO_H_
#include "py_dragon.h"
namespace dragon {
namespace python {
void AddIOMethods(pybind11::module& m) {
m.def("Snapshot", [](
const string& filename,
vector<string>& names,
const int format) {
vector<Tensor*> tensors;
switch (format) {
case 0: // Pickle
LOG(FATAL) << "Format depends on Pickle. "
"Can't be used in C++.";
break;
case 1: // CaffeModel
for (const auto& e : names)
tensors.emplace_back(ws()->GetTensor(e));
SavaCaffeModel(filename, tensors);
break;
default:
LOG(FATAL) << "Unknwon format, code: " << format;
}
});
m.def("Restore", [](
const string& filename,
const int format) {
switch (format) {
case 0: // Pickle
LOG(FATAL) << "Format depends on Pickle. "
"Can't be used in C++.";
break;
case 1: // CaffeModel
LoadCaffeModel(filename, ws());
break;
default:
LOG(FATAL) << "Unknwon format, code: " << format;
}
});
}
} // namespace python
} // namespace dragon
#endif // DRAGON_PYTHON_PY_IO_H_
\ No newline at end of file
/*!
* Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
*
* Licensed under the BSD 2-Clause License.
* You should have received a copy of the BSD 2-Clause License
* along with the software. If not, See,
*
* <https://opensource.org/licenses/BSD-2-Clause>
*
* ------------------------------------------------------------
*/
#ifndef DRAGON_PYTHON_PY_ONNX_H_
#define DRAGON_PYTHON_PY_ONNX_H_
#include "onnx/onnx_backend.h"
#include "py_dragon.h"
namespace dragon {
namespace python {
void AddONNXMethods(pybind11::module& m) {
m.def("ImportONNXModel", [](
const string& model_path) {
GraphDef init_graph, pred_graph;
onnx::ONNXBackend onnx_backend;
onnx_backend.Prepare(model_path, &init_graph, &pred_graph);
// Serializing to Python is intractable
// We should apply the initializer immediately
ws()->CreateGraph(init_graph);
ws()->RunGraph(init_graph.name(), "", "");
return pybind11::bytes(pred_graph.SerializeAsString());
});
}
} // namespace python
} // namespace dragon
#endif // DRAGON_PYTHON_PY_ONNX_H_
\ No newline at end of file
......@@ -20,36 +20,14 @@ namespace dragon {
namespace python {
void AddOperatorMethods(pybind11::module& m) {
/*! \brief Return all the registered operators */
m.def("RegisteredOperators", []() { return CPUOperatorRegistry()->keys(); });
/*! \brief Return all the operators without gradients */
m.def("NoGradientOperators", []() { return NoGradientRegistry()->keys(); });
/*! \brief Run a operator from the def reference */
m.def("RunOperator", [](
OperatorDef* def,
const bool verbose) {
pybind11::gil_scoped_release g;
if (verbose) {
// It is not a good design to print the debug string
std::cout << def->DebugString() << std::endl;
}
ws()->RunOperator(*def);
/*! \brief Return the registered operators */
m.def("RegisteredOperators", []() {
return CPUOperatorRegistry()->keys();
});
/*! \brief Run a operator from the serialized def */
m.def("RunOperator", [](
const string& serialized,
const bool verbose) {
OperatorDef def;
CHECK(def.ParseFromString(serialized));
pybind11::gil_scoped_release g;
if (verbose) {
// It is not a good design to print the debug string
std::cout << def.DebugString() << std::endl;
}
ws()->RunOperatorOnce(def);
/*! \brief Return the non-gradient operators */
m.def("NoGradientOperators", []() {
return NoGradientRegistry()->keys();
});
}
......
......@@ -22,208 +22,51 @@ namespace python {
void AddTensorMethods(pybind11::module& m) {
/*! \brief Export the Tensor class */
pybind11::class_<Tensor>(m, "Tensor")
/*! \brief Return the number of dimensions */
.def_property_readonly("ndim", &Tensor::ndim)
/*! \brief Return all the dimensions */
.def_property_readonly("dims", &Tensor::dims)
/*! \brief Return the total number of elements */
.def_property_readonly("size", &Tensor::size)
/*! \brief Return the data type */
.def_property_readonly("dtype", [](Tensor* self) {
return TypeMetaToString(self->meta());
}).def_property_readonly("device", [](Tensor* self) {
})
/*! \brief Return the device information */
.def_property_readonly("device", [](Tensor* self) {
if (self->has_memory()) {
Map<string, string> mem_info = self->memory()->info();
auto mem_info = self->memory()->info();
return std::tuple<string, int>(
mem_info["device_type"], atoi(
mem_info["device_id"].c_str()));
} else {
return std::tuple<string, int>("Unknown", 0);
}
}).def("ToCPU", [](Tensor* self) {
CHECK(self->has_memory()) << "\nTensor(" << self->name()
<< ") does not initialize or had been reset.";
})
/*! \brief Switch the memory to the cpu context */
.def("ToCPU", [](Tensor* self) {
CHECK(self->has_memory())
<< "\nTensor(" << self->name() << ") "
<< "does not initialize or had been reset.";
self->memory()->ToCPU();
}).def("ToCUDA", [](Tensor* self, const int device_id) {
})
/*! \brief Switch the memory to the cuda context */
.def("ToCUDA", [](Tensor* self, int device_id) {
#ifdef WITH_CUDA
CHECK(self->has_memory()) << "\nTensor(" << self->name()
<< ") does not initialize or had been reset.";
CHECK(self->has_memory())
<< "\nTensor(" << self->name() << ") "
<< "does not initialize or had been reset.";
self->memory()->SwitchToCUDADevice(device_id);
#else
CUDA_NOT_COMPILED;
#endif
});
/*! \brief List all the existing tensors */
m.def("Tensors", []() { return ws()->GetTensors(); });
/*! \brief Indicate whether the given tensor is existing */
m.def("HasTensor", [](
const string& name) -> bool {
return ws()->HasTensor(name);
});
/*! \brief Return the unique name of given tensor */
m.def("GetTensorName", [](
const string& name) -> string {
return ws()->GetTensorName(name);
});
/*! \brief Create a tensor with the given name */
m.def("CreateTensor", [](
const string& name) -> void {
ws()->CreateTensor(name);
});
/*! \brief Create a tensor with the given name */
m.def("ResetTensor", [](
const string& name) -> void {
ws()->ResetTensor(name);
});
/*! \brief Create a tensor with the given shape */
m.def("TensorFromShape", [](
const string& name,
const vector<int64_t>& shape,
const string& dtype) {
const TypeMeta& meta = TypeStringToMeta(dtype);
if (meta.id() == 0) {
LOG(FATAL) << "Unsupported data type: " + dtype + ".";
}
Tensor* tensor = ws()->CreateTensor(name);
if (meta.id() != tensor->meta().id() && tensor->meta().id() != 0)
LOG(WARNING) << "Set Tensor(" << tensor->name() << ")"
<< " with different data type from original one.";
tensor->Reshape(shape);
tensor->raw_mutable_data<CPUContext>(meta);
});
/*! \brief Create a tensor with the given array */
m.def("TensorFromPyArray", [](
const string& name,
pybind11::object py_array) {
PyArrayObject* array = PyArray_GETCONTIGUOUS(
reinterpret_cast<PyArrayObject*>(py_array.ptr()));
const TypeMeta& meta = TypeNPYToMeta(PyArray_TYPE(array));
if (meta.id() == 0) LOG(FATAL) << "Unsupported data type.";
Tensor* tensor = ws()->CreateTensor(name);
tensor->SetMeta(meta);
int ndim = PyArray_NDIM(array);
npy_intp* npy_dims = PyArray_DIMS(array);
vector<int64_t> dims;
for (int i = 0; i < ndim; i++) dims.push_back(npy_dims[i]);
tensor->Reshape(dims);
auto* data = static_cast<void*>(PyArray_DATA(array));
if (!tensor->has_memory()) {
MixedMemory* memory(new MixedMemory());
memory->set_cpu_data(data, tensor->nbytes());
tensor->set_memory(memory);
} else {
if (tensor->DECREFPyArray) tensor->DECREFPyArray();
tensor->memory()->set_cpu_data(data, tensor->nbytes());
}
// Follow the codes of PyTorch
// Here we bind the DECREF to Tensor
// ResetTensor() or ResetWorkspace() can trigger it
tensor->DECREFPyArray = [array]()->void { Py_XDECREF(array); };
});
/*! \brief Create a tensor copied from an existing one */
m.def("TensorFromTensor", [](
const string& name,
const string& other,
const string& dev1,
const string& dev2) {
DeviceOption dst_ctx, src_ctx;
dst_ctx.ParseFromString(dev1);
src_ctx.ParseFromString(dev2);
Tensor* srcT = ws()->GetTensor(other);
Tensor* dstT = ws()->CreateTensor(name);
dstT->ReshapeLike(*srcT);
const TypeMeta& meta = srcT->meta();
if (dst_ctx.device_type() == PROTO_CUDA) {
if (src_ctx.device_type() == PROTO_CUDA) {
// CUDA <- CUDA
CUDAContext::MemcpyEx<CUDAContext, CUDAContext>(
srcT->nbytes(),
dstT->raw_mutable_data<CUDAContext>(meta),
srcT->raw_data<CUDAContext>(),
src_ctx.device_id());
} else {
// CUDA <- CPU
CUDAContext::MemcpyEx<CUDAContext, CPUContext>(
srcT->nbytes(),
dstT->raw_mutable_data<CUDAContext>(meta),
srcT->raw_data<CPUContext>(),
dst_ctx.device_id());
}
} else {
if (src_ctx.device_type() == PROTO_CUDA) {
// CPU <- CUDA
CUDAContext::MemcpyEx<CPUContext, CUDAContext>(
srcT->nbytes(),
dstT->raw_mutable_data<CPUContext>(meta),
srcT->raw_data<CUDAContext>(),
src_ctx.device_id());
} else {
// CPU <- CPU
CPUContext::Memcpy<CUDAContext, CUDAContext>(
srcT->nbytes(),
dstT->raw_mutable_data<CPUContext>(meta),
srcT->raw_data<CPUContext>());
}
}
});
/*! \brief Return a array zero-copied from an existing tensor */
m.def("TensorToPyArray", [](
const string& name,
const bool readonly) {
Tensor* tensor = ws()->GetTensor(name);
CHECK_GT(tensor->count(), 0);
vector<npy_intp> dims;
for (const auto dim : tensor->dims()) dims.push_back(dim);
int npy_type = TypeMetaToNPY(tensor->meta());
if (npy_type == -1) {
LOG(FATAL) << "Tensor(" + tensor->name() + ") "
"with dtype." + TypeMetaToString(tensor->meta()) +
" is not supported by numpy.";
}
auto* data = readonly ?
const_cast<void*>(tensor->raw_data<CPUContext>()) :
tensor->raw_mutable_data<CPUContext>();
PyObject* array = PyArray_SimpleNewFromData(
tensor->ndim(), dims.data(), npy_type, data);
return pybind11::reinterpret_steal<pybind11::object>(array);
});
/*! \brief Create a tensor from the specified filler */
m.def("CreateFiller", [](
const string& serialized) {
TensorFillerProto filler_proto;
if (!filler_proto.ParseFromString(serialized))
LOG(FATAL) << "Failed to parse the TensorFiller.";
ws()->CreateFiller(filler_proto);
ws()->CreateTensor(filler_proto.tensor());
});
/*! \brief Return the filler type of a tensor */
m.def("GetFillerType", [](const string& name) {
return ws()->GetFiller(name)->type();
});
/* \brief Set an alias for the tensor */
m.def("SetTensorAlias", [](
const string& name,
const string& alias) {
if (!ws()->HasTensor(name)) {
LOG(FATAL) << "Tensor(" + name << ") has not "
"been registered in the current workspace.";
}
ws()->SetTensorAlias(name, alias);
});
/*! \brief Return the CXX Tensor reference */
m.def("GetTensor", [](
const string& name) {
return ws()->GetTensor(name);
}, pybind11::return_value_policy::reference_internal);
}
} // namespace python
......
......@@ -22,6 +22,9 @@ import dragon.config as config
# Core
from dragon.core.tensor import Tensor
import dragon.core.workspace as workspace
from dragon.core.workspace import Workspace
from dragon.core.workspace import get_default_workspace
from dragon.core.workspace import reset_default_workspace
import dragon.core.tensor_utils as tensor_utils
import dragon.core.mpi as mpi
import dragon.core.cuda as cuda
......@@ -41,7 +44,6 @@ from dragon.vm.theano.tensor import grad as grad
from dragon.core.scope import name_scope, get_default_name_scope
from dragon.core.scope import phase_scope, get_default_phase
from dragon.core.scope import device_scope, get_default_device
from dragon.core.scope import WorkspaceScope as ws_scope
# Version
from dragon.version import version
......
......@@ -15,8 +15,9 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon.import_c_api as C
import dragon.core.logging as logging
from dragon import import_c_api as _C
from dragon.core import logging as _logging
option = {}
......@@ -290,12 +291,12 @@ def SetLoggingLevel(level):
The default level is *INFO*.
"""
C.SetLoggingLevel(level)
logging.set_verbosity({
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARN,
'ERROR': logging.ERROR,
'FATAL': logging.FATAL,
_C.SetLoggingLevel(level)
_logging.set_verbosity({
'DEBUG': _logging.DEBUG,
'INFO': _logging.INFO,
'WARNING': _logging.WARN,
'ERROR': _logging.ERROR,
'FATAL': _logging.FATAL,
}[level]
)
\ No newline at end of file
......@@ -15,7 +15,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon.import_c_api as _C
from dragon import import_c_api as _C
def IsCUDADriverSufficient():
......
......@@ -30,11 +30,10 @@ from __future__ import print_function
from collections import defaultdict
import dragon.proto.dragon_pb2 as pb
import dragon.import_c_api as C
from dragon.core.helper import OperatorHelper
from dragon.core.proto_utils import MakeOperatorDef
from dragon import import_c_api as _C
from dragon.core import helper as _helper
from dragon.proto import dragon_pb2 as _proto_def
from dragon.core import proto_utils as _proto_utils
class GraphGradientMaker(object):
......@@ -62,16 +61,22 @@ class GraphGradientMaker(object):
The OpDef, outputs and defaults of ``BackwardOp``.
"""
g_ops, g_inputs, defaults = C.CreateGradientDefs(
g_ops, g_inputs, defaults = _C.CreateGradientDefs(
forward_op.SerializeToString(), g_outputs)
for idx, g_op in enumerate(g_ops):
new_def = pb.OperatorDef()
new_def = _proto_def.OperatorDef()
new_def.ParseFromString(g_op)
g_ops[idx] = new_def
return g_ops, g_inputs, defaults
@classmethod
def CheckGrad(cls, forward_op, inputs_to_grads, blacklist, targets):
def CheckGrad(
cls,
forward_op,
inputs_to_grads,
blacklist,
targets,
):
"""Check if missing Grads. If True, skip this Op.
Parameters
......@@ -91,7 +96,7 @@ class GraphGradientMaker(object):
The result of checking and generated filling grads.
"""
if forward_op.type in C.NO_GRADIENT_OPERATORS:
if forward_op.type in _C.NO_GRADIENT_OPERATORS:
for input in forward_op.input: blacklist.add(input)
return True, None
......@@ -114,7 +119,13 @@ class GraphGradientMaker(object):
return False, gen_grads
@classmethod
def Make(cls, forward_ops, targets, input_grads=None, auto_names=True):
def Make(
cls,
forward_ops,
targets,
input_grads=None,
auto_names=True,
):
"""Make ``BackwardOps`` based on ``ForwardOps``.
Parameters
......@@ -149,7 +160,7 @@ class GraphGradientMaker(object):
# PLAY for the forward
for forward_op in forward_ops:
if forward_op.type in C.NO_GRADIENT_OPERATORS: continue
if forward_op.type in _C.NO_GRADIENT_OPERATORS: continue
outputs = [o for o in forward_op.output]
for input in forward_op.input:
if input not in outputs:
......@@ -176,14 +187,17 @@ class GraphGradientMaker(object):
op_inputs.append(item[0])
op_outputs.append(item[0] + '_grad')
values.append(defaults[item[1]])
gen_op = MakeOperatorDef('GradientGenerate', op_inputs, op_outputs, defaults=values)
gen_op.name = OperatorHelper.get_name() if auto_names else 'runtime'
gen_op = _proto_utils.MakeOperatorDef(
'GradientGenerate', op_inputs, op_outputs, defaults=values)
gen_op.name = _helper.OperatorHelper. \
get_name() if auto_names else 'runtime'
if forward_op.HasField('device_option'):
gen_op.device_option.CopyFrom(forward_op.device_option)
backward_ops.append(gen_op)
# GradientOp
for g_op in g_ops:
g_op.name = OperatorHelper.get_name() if auto_names else 'runtime'
g_op.name = _helper.OperatorHelper. \
get_name() if auto_names else 'runtime'
backward_ops.append(g_op)
# Split & Gather grads for multi-used input
......@@ -208,10 +222,12 @@ class GraphGradientMaker(object):
for idx in range(grads_count[g_output]):
if '%s_autosplit_%d' % (g_output, idx) in all_split_grads:
split_inputs.append('%s_autosplit_%d' % (g_output, idx))
gather_op = MakeOperatorDef('GradientGather', split_inputs, [g_output])
gather_op = _proto_utils.MakeOperatorDef(
'GradientGather', split_inputs, [g_output])
if g_op.HasField('device_option'):
gather_op.device_option.CopyFrom(g_op.device_option)
gather_op.name = OperatorHelper.get_name() if auto_names else 'runtime'
gather_op.name = _helper.OperatorHelper. \
get_name() if auto_names else 'runtime'
backward_ops.append(gather_op)
g_op.output[g_output_idx] = split_name
......
......@@ -17,7 +17,8 @@ from __future__ import print_function
import math
import numpy
import dragon
from dragon.core import workspace as _workspace
class OperatorHelper(object):
......@@ -39,11 +40,11 @@ class OperatorHelper(object):
@classmethod
def get_index_and_name(cls, prefix='Op'):
name = dragon.workspace.GetDummyName(prefix, domain='Operator')
name = _workspace.GetDummyName(prefix, domain='Operator')
try:
_, op_idx = name.split('_')
except:
name = dragon.workspace.GetDummyName(prefix, domain='Operator')
name = _workspace.GetDummyName(prefix, domain='Operator')
_, op_idx = name.split('_')
return int(op_idx), name
......
......@@ -15,7 +15,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon.import_c_api as _C
from dragon import import_c_api as _C
_GLOBAL_MPI_IS_INIT = False
......
......@@ -9,7 +9,7 @@
#
# ------------------------------------------------------------
"""Define some helpful protobuf makers here."""
"""Define some helpful protocol buffer makers here."""
from __future__ import absolute_import
from __future__ import division
......@@ -17,28 +17,28 @@ from __future__ import print_function
import sys
import copy
import numpy as np
from google.protobuf.message import Message
import numpy
import dragon.config as cfg
import dragon.import_c_api as _C
from dragon.proto import dragon_pb2 as pb
from dragon.core.scope import get_default_device
from dragon import config as _cfg
from dragon import import_c_api as _C
from dragon.core import scope as _scope
from dragon.proto import dragon_pb2 as _proto_def
from google.protobuf.message import Message as _Message
if sys.version_info >= (3,0):
def MakeArgument(key, value):
argument = pb.Argument()
argument = _proto_def.Argument()
argument.name = key
if type(value) is float: argument.f = value
elif type(value) in (bool, int, np.int64) : argument.i = value
elif type(value) in (bool, int, numpy.int64) : argument.i = value
elif type(value) is bytes: argument.s = value
elif type(value) is str: argument.s = str.encode(value)
elif isinstance(value, Message): argument.s = value.SerializeToString()
elif isinstance(value, _Message): argument.s = value.SerializeToString()
elif all(type(v) is float for v in value): argument.floats.extend(value)
elif all(type(v) is int for v in value): argument.ints.extend(value)
elif all(type(v) is str for v in value): argument.strings.extend([str.encode(v) for v in value])
elif all(isinstance(v, Message) for v in value):
elif all(isinstance(v, _Message) for v in value):
argument.strings.extend([v.SerializeToString() for v in value])
else:
raise ValueError(
......@@ -47,20 +47,20 @@ if sys.version_info >= (3,0):
return argument
else:
def MakeArgument(key, value):
argument = pb.Argument()
argument = _proto_def.Argument()
argument.name = key
if type(value) is float: argument.f = value
elif type(value) in (bool, int, long, np.int64) : argument.i = value
elif type(value) in (bool, int, long, numpy.int64) : argument.i = value
elif type(value) is str: argument.s = value
elif type(value) is unicode: argument.s = str(value)
elif isinstance(value, Message): argument.s = value.SerializeToString()
elif isinstance(value, _Message): argument.s = value.SerializeToString()
elif all(type(v) is float for v in value): argument.floats.extend(value)
elif all(type(v) is int for v in value): argument.ints.extend(value)
elif all(type(v) is long for v in value): argument.ints.extend(value)
elif all(type(v) is str for v in value): argument.strings.extend(value)
elif all(type(v) is unicode for v in value):
argument.strings.extend([str(v) for v in value])
elif all(isinstance(v, Message) for v in value):
elif all(isinstance(v, _Message) for v in value):
argument.strings.extend([v.SerializeToString() for v in value])
else:
raise ValueError(
......@@ -70,10 +70,16 @@ else:
def MakeOperatorDef(
op_type, inputs=(), outputs=(),
name='', uid=None, device_option=None,
arg=None, **kwargs):
operator = pb.OperatorDef()
op_type,
inputs=(),
outputs=(),
name='',
uid=None,
device_option=None,
arg=None,
**kwargs
):
operator = _proto_def.OperatorDef()
operator.type = op_type
operator.name = name
operator.input.extend([str(tensor) for tensor in inputs])
......@@ -92,9 +98,15 @@ def MakeOperatorDef(
def MakeCXXOperatorDef(
op_type, inputs=(), outputs=(),
name='', uid=None, device_option=None,
arg=None, **kwargs):
op_type,
inputs=(),
outputs=(),
name='',
uid=None,
device_option=None,
arg=None,
**kwargs
):
c_def = _C.OperatorDef()
py_def = MakeOperatorDef(
op_type, inputs, outputs, name, uid,
......@@ -104,7 +116,7 @@ def MakeCXXOperatorDef(
def MakeDeviceOption(device_type, device_id, rng_seed=None):
option = pb.DeviceOption()
option = _proto_def.DeviceOption()
option.device_type = device_type
option.device_id = device_id
if rng_seed is not None: option.random_seed = rng_seed
......@@ -133,7 +145,7 @@ def GetDeviceOption(device_type, device_id=0, rng_seed=None):
def GetDefaultDeviceOption():
device_info = get_default_device()
device_info = _scope.get_default_device()
if device_info is not None:
return GetDeviceOption(
device_info['device_type'],
......@@ -142,10 +154,10 @@ def GetDefaultDeviceOption():
def GetGlobalDeviceOption():
option = cfg.GetGlobalOptions()
options = _cfg.GetGlobalOptions()
return GetDeviceOption(
option['device'],
option['device_id'])
options['device'],
options['device_id'])
# Fix the python stdout
......@@ -159,6 +171,5 @@ class Unbuffered(object):
return getattr(self.stream, attr)
# Clear the stdout buffer for mpi(C++ && Python)
import sys
# Clear the stdout buffer for mpi
sys.stdout = Unbuffered(sys.stdout)
\ No newline at end of file
......@@ -13,92 +13,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import dragon.import_c_api as _C
from contextlib import contextmanager
__all__ = [
'name_scope',
'phase_scope',
'device_scope',
'get_default_phase',
'get_default_device',
'get_default_name_scope',
'WorkspaceScope',
]
class _ThreadLocalStack(threading.local):
def __init__(self):
super(_ThreadLocalStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
class WorkspaceScope(object):
"""WorkspaceScope is a auxiliary to assign the specific workspace.
Examples
--------
>>> import dragon as dg
>>> with WorkspaceScope('session1'): pass
>>> with dg.ws_scope('session2'): pass
"""
def __init__(self, ws_name):
assert isinstance(ws_name, type('str')), \
'WorkspaceScope takes in a string as its argument.'
assert ws_name != '', \
'The workspace name should not be empty.'
self.ws = ws_name
self.prev = 'default'
def __enter__(self):
self.prev = _C.CurrentWorkspace()
_C.SwitchWorkspace(self.ws, True)
def __exit__(self, type, value, traceback):
_C.SwitchWorkspace(self.prev, True)
_GLOBAL_TENSOR_STACK = _ThreadLocalStack()
_GLOBAL_PHASE_STACK = _ThreadLocalStack()
_GLOBAL_DEVICE_STACK = _ThreadLocalStack()
_PREDEFINED_SCOPE_SEPARATOR = '/'
from dragon.core import tls as _tls
def name_scope(name):
......@@ -140,7 +55,7 @@ def device_scope(device_type, device_id=0):
"""
device_type, device_id, device_type.lower(), device_id
assert device_type in ['cpu', 'gpu', 'cuda', 'cnml']
assert device_type in ('cpu', 'gpu', 'cuda', 'cnml')
# Default names
if device_type == 'gpu': device_type = 'cuda'
return _GLOBAL_DEVICE_STACK.get_controller({
......@@ -213,3 +128,9 @@ def get_default_device():
"""
return _GLOBAL_DEVICE_STACK.get_default()
_GLOBAL_TENSOR_STACK = _tls.Stack()
_GLOBAL_PHASE_STACK = _tls.Stack()
_GLOBAL_DEVICE_STACK = _tls.Stack()
_PREDEFINED_SCOPE_SEPARATOR = '/'
\ No newline at end of file
......@@ -16,10 +16,10 @@ from __future__ import division
from __future__ import print_function
import numpy
import dragon
from dragon.core.tensor import Tensor
from dragon.core.proto_utils import GetDeviceOption
from dragon.core import workspace as _workspace
from dragon.core import proto_utils as _proto_utils
from dragon.core.tensor import Tensor as _Tensor
def FromShape(shape, dtype='float32', name=None):
......@@ -47,9 +47,8 @@ def FromShape(shape, dtype='float32', name=None):
tensor.shape = list(shape)
if not isinstance(shape, (tuple, list)):
raise TypeError('The shape should be a tuple or list.')
dragon.C.TensorFromShape(
_stringify_tensor(tensor),
list(shape), dtype)
_get_workspace().TensorFromShape(
_stringify_tensor(tensor), list(shape), dtype)
return tensor
......@@ -70,7 +69,8 @@ def SetShape(tensor, shape, dtype='float32'):
None
"""
dragon.C.TensorFromShape(_stringify_tensor(tensor), shape, dtype)
_get_workspace().TensorFromShape(
_stringify_tensor(tensor), shape, dtype)
def FromTensor(src, src_ctx=None, name=None, ctx=None):
......@@ -97,15 +97,17 @@ def FromTensor(src, src_ctx=None, name=None, ctx=None):
"""
tensor = _try_get_tensor(name)
if src_ctx is None: src_ctx = GetDeviceOption('cpu')
if ctx is None: ctx = GetDeviceOption('cpu')
dragon.C.TensorFromTensor(
_stringify_tensor(tensor), _stringify_tensor(src),
_stringify_proto(ctx), _stringify_proto(src_ctx))
if src_ctx is None: src_ctx = _proto_utils.GetDeviceOption('cpu')
if ctx is None: ctx = _proto_utils.GetDeviceOption('cpu')
_get_workspace().TensorFromTensor(
_stringify_tensor(tensor),
_stringify_tensor(src),
_stringify_proto(ctx),
_stringify_proto(src_ctx))
return tensor
def FromPyArray(array, name=None):
def FromArray(array, name=None):
"""Create a Tensor from a existing Array.
Note that memory of Tensor are ``zero-copied``.
......@@ -128,12 +130,13 @@ def FromPyArray(array, name=None):
"""
tensor = _try_get_tensor(name)
if not isinstance(array, numpy.ndarray):
raise TypeError('The given nd-array should be numpy.ndarray.')
dragon.C.TensorFromPyArray(_stringify_tensor(tensor), array)
raise TypeError('Excepted a numpy.ndarray.')
_get_workspace().TensorFromArray(
_stringify_tensor(tensor), array)
return tensor
def SetPyArray(tensor, array):
def SetArray(tensor, array):
"""Set a Tensor from a existing Array.
Note that memory of Tensor are ``zero-copied``.
......@@ -149,15 +152,12 @@ def SetPyArray(tensor, array):
-------
None
References
----------
The wrapper of ``TensorFromPyArrayCC``.
"""
dragon.C.TensorFromPyArray(_stringify_tensor(tensor), array)
_get_workspace().TensorFromArray(
_stringify_tensor(tensor), array)
def ToPyArray(tensor, readonly=False):
def ToArray(tensor, readonly=False):
"""Create a Array from a existing Tensor.
Note that memory of Array are *zero-copied*.
......@@ -175,7 +175,8 @@ def ToPyArray(tensor, readonly=False):
The array sharing the memory with original tensor.
"""
return dragon.C.TensorToPyArray(_stringify_tensor(tensor), readonly)
return _get_workspace().TensorToArray(
_stringify_tensor(tensor), readonly)
def GetStorage(tensor):
......@@ -193,8 +194,8 @@ def GetStorage(tensor):
"""
tensor = _stringify_tensor(tensor)
if not dragon.workspace.HasTensor(tensor): return None
return dragon.C.GetTensor(tensor)
if not _get_workspace().HasTensor(tensor): return None
return _get_workspace().GetTensor(tensor)
def _stringify_proto(obj):
......@@ -210,5 +211,10 @@ def _stringify_tensor(obj):
def _try_get_tensor(name=None):
"""Try to create or get a tensor"""
if name is None or name == '': return Tensor()
else: return Tensor.Ref(name)
\ No newline at end of file
if name is None or name == '': return _Tensor()
else: return _Tensor.Ref(name)
def _get_workspace():
"""Get the current default workspace."""
return _workspace.get_default_workspace()
\ No newline at end of file
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
"""Define the common thread local structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import contextlib
class Constant(threading.local):
def __init__(self, **attrs):
super(Constant, self).__init__()
self.__dict__.update(attrs)
class Stack(threading.local):
def __init__(self):
super(Stack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
\ No newline at end of file
......@@ -23,7 +23,6 @@ from __future__ import print_function
import sys
import logging as _logging
import atexit
try:
from dragon.libdragon import *
......@@ -32,9 +31,5 @@ except ImportError as e:
'Cannot import dragon. Error: {0}'.format(str(e)))
sys.exit(1)
REGISTERED_OPERATORS = set(s for s in RegisteredOperators())
NO_GRADIENT_OPERATORS = set(s for s in NoGradientOperators())
\
atexit.register(OnModuleExit)
\ No newline at end of file
......@@ -15,6 +15,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dragon import config as _cfg
def ShareGrads(enabled=True):
"""Enable gradients sharing globally.
......@@ -34,8 +36,8 @@ def ShareGrads(enabled=True):
>>> opt.ShareGrads()
"""
from dragon.config import option
option['share_grads'] = enabled
options = _cfg.GetGlobalOptions()
options['share_grads'] = enabled
def IsGradsShared():
......@@ -47,8 +49,8 @@ def IsGradsShared():
``True`` if sharing grads else ``False``.
"""
from dragon.config import option
return option['share_grads']
options = _cfg.GetGlobalOptions()
return options['share_grads']
def Drop(op_func, *args, **kwargs):
......
......@@ -13,8 +13,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
import dragon.utils.vision
from dragon.utils import vision as _vision
from dragon.core import workspace as _workspace
class MiniBatchOp(object):
......@@ -36,7 +36,7 @@ class MiniBatchOp(object):
"""
kwargs = eval(self.param_str)
self._data_batch = dragon.utils.vision.DataBatch(**kwargs)
self._data_batch = _vision.DataBatch(**kwargs)
def run(self, inputs, outputs):
"""Run method, i.e., forward pass.
......@@ -55,4 +55,4 @@ class MiniBatchOp(object):
"""
blobs = self._data_batch.get()
for idx, blob in enumerate(blobs):
dragon.workspace.FeedTensor(outputs[idx], blob)
\ No newline at end of file
_workspace.FeedTensor(outputs[idx], blob)
\ No newline at end of file
......@@ -15,149 +15,149 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .operators import initializer as init_ops
from .operators import vision as vision_ops
from .operators import loss as loss_ops
from .operators import data as data_ops
from .operators import activation as active_ops
from .operators import arithmetic as math_ops
from .operators import control_flow as control_flow_ops
from .operators import misc as misc_ops
from .operators import mpi as mpi_ops
from .operators import array as array_ops
from .operators import norm as norm_ops
from .operators import recurrent as recurrent_ops
from .operators import contrib as contrib_ops
from .operators import initializer as _init_ops
from .operators import vision as _vision_ops
from .operators import loss as _loss_ops
from .operators import data as _data_ops
from .operators import activation as _active_ops
from .operators import arithmetic as _math_ops
from .operators import control_flow as _control_flow_ops
from .operators import misc as _misc_ops
from .operators import mpi as _mpi_ops
from .operators import array as _array_ops
from .operators import norm as _norm_ops
from .operators import recurrent as _recurrent_ops
from .operators import contrib as _contrib_ops
# Data
LMDBData = data_ops.LMDBData
ImageData = data_ops.ImageData
LMDBData = _data_ops.LMDBData
ImageData = _data_ops.ImageData
# Initializer
Fill = init_ops.Fill
RandomUniform = init_ops.RandomUniform
RandomNormal = init_ops.RandomNormal
TruncatedNormal = init_ops.TruncatedNormal
GlorotUniform = init_ops.GlorotUniform
GlorotNormal = init_ops.GlorotNormal
Fill = _init_ops.Fill
RandomUniform = _init_ops.RandomUniform
RandomNormal = _init_ops.RandomNormal
TruncatedNormal = _init_ops.TruncatedNormal
GlorotUniform = _init_ops.GlorotUniform
GlorotNormal = _init_ops.GlorotNormal
# Vision
Conv2d = vision_ops.Conv2d
DepthwiseConv2d = vision_ops.DepthwiseConv2d
ConvTranspose2d = DeConv2d = Conv2dTranspose = vision_ops.ConvTranspose2d
Pool2d = vision_ops.Pool2d
ROIPool = vision_ops.ROIPool
ROIAlign = vision_ops.ROIAlign
LRN = vision_ops.LRN
NNResize = vision_ops.NNResize
BilinearResize = vision_ops.BilinearResize
BiasAdd = vision_ops.BiasAdd
DropBlock2d = vision_ops.DropBlock2d
Conv2d = _vision_ops.Conv2d
DepthwiseConv2d = _vision_ops.DepthwiseConv2d
ConvTranspose2d = DeConv2d = Conv2dTranspose = _vision_ops.ConvTranspose2d
Pool2d = _vision_ops.Pool2d
ROIPool = _vision_ops.ROIPool
ROIAlign = _vision_ops.ROIAlign
LRN = _vision_ops.LRN
NNResize = _vision_ops.NNResize
BilinearResize = _vision_ops.BilinearResize
BiasAdd = _vision_ops.BiasAdd
DropBlock2d = _vision_ops.DropBlock2d
# Recurrent
LSTMCell = recurrent_ops.LSTMCell
RNN = recurrent_ops.RNN
LSTM = recurrent_ops.LSTM
GRU = recurrent_ops.GRU
LSTMCell = _recurrent_ops.LSTMCell
RNN = _recurrent_ops.RNN
LSTM = _recurrent_ops.LSTM
GRU = _recurrent_ops.GRU
# Activation
Sigmoid = active_ops.Sigmoid
Tanh = active_ops.Tanh
Relu = active_ops.Relu
LRelu = active_ops.LRelu
PRelu = active_ops.PRelu
Elu = active_ops.Elu
SElu = active_ops.SElu
Softmax = active_ops.Softmax
Dropout = active_ops.Dropout
Sigmoid = _active_ops.Sigmoid
Tanh = _active_ops.Tanh
Relu = _active_ops.Relu
LRelu = _active_ops.LRelu
PRelu = _active_ops.PRelu
Elu = _active_ops.Elu
SElu = _active_ops.SElu
Softmax = _active_ops.Softmax
Dropout = _active_ops.Dropout
# Loss
NLLLoss = loss_ops.NLLLoss
SparseSoftmaxCrossEntropy = loss_ops.SparseSoftmaxCrossEntropy
SigmoidCrossEntropy = loss_ops.SigmoidCrossEntropy
SoftmaxCrossEntropy = loss_ops.SoftmaxCrossEntropy
SmoothL1Loss = loss_ops.SmoothL1Loss
L1Loss = loss_ops.L1Loss
L2Loss = loss_ops.L2Loss
SigmoidFocalLoss = loss_ops.SigmoidFocalLoss
SoftmaxFocalLoss = loss_ops.SoftmaxFocalLoss
CTCLoss = loss_ops.CTCLoss
NLLLoss = _loss_ops.NLLLoss
SparseSoftmaxCrossEntropy = _loss_ops.SparseSoftmaxCrossEntropy
SigmoidCrossEntropy = _loss_ops.SigmoidCrossEntropy
SoftmaxCrossEntropy = _loss_ops.SoftmaxCrossEntropy
SmoothL1Loss = _loss_ops.SmoothL1Loss
L1Loss = _loss_ops.L1Loss
L2Loss = _loss_ops.L2Loss
SigmoidFocalLoss = _loss_ops.SigmoidFocalLoss
SoftmaxFocalLoss = _loss_ops.SoftmaxFocalLoss
CTCLoss = _loss_ops.CTCLoss
# Arithmetic
Add = math_ops.Add
Sub = math_ops.Sub
Mul = math_ops.Mul
Div = math_ops.Div
Maximum = math_ops.Maximum
Minimum = math_ops.Minimum
Moments = math_ops.Moments
Clip = math_ops.Clip
Matmul = math_ops.Matmul
Pow = math_ops.Pow
Dot = math_ops.Dot
Log = math_ops.Log
Exp = math_ops.Exp
Square = math_ops.Square
Sqrt = math_ops.Sqrt
FullyConnected = math_ops.FullyConnected
Eltwise = math_ops.Eltwise
Affine = math_ops.Affine
GramMatrix = math_ops.GramMatrix
Accumulate = math_ops.Accumulate
MovingAverage = math_ops.MovingAverage
Add = _math_ops.Add
Sub = _math_ops.Sub
Mul = _math_ops.Mul
Div = _math_ops.Div
Maximum = _math_ops.Maximum
Minimum = _math_ops.Minimum
Moments = _math_ops.Moments
Clip = _math_ops.Clip
Matmul = _math_ops.Matmul
Pow = _math_ops.Pow
Dot = _math_ops.Dot
Log = _math_ops.Log
Exp = _math_ops.Exp
Square = _math_ops.Square
Sqrt = _math_ops.Sqrt
FullyConnected = _math_ops.FullyConnected
Eltwise = _math_ops.Eltwise
Affine = _math_ops.Affine
GramMatrix = _math_ops.GramMatrix
Accumulate = _math_ops.Accumulate
MovingAverage = _math_ops.MovingAverage
# Normalization
BatchNorm = norm_ops.BatchNorm
GroupNorm = norm_ops.GroupNorm
LayerNorm = norm_ops.LayerNorm
InstanceNorm = norm_ops.InstanceNorm
L2Norm = norm_ops.L2Norm
BatchNorm = _norm_ops.BatchNorm
GroupNorm = _norm_ops.GroupNorm
LayerNorm = _norm_ops.LayerNorm
InstanceNorm = _norm_ops.InstanceNorm
L2Norm = _norm_ops.L2Norm
# NDArray
Gather = array_ops.Gather
Crop = array_ops.Crop
Reduce = array_ops.Reduce
Sum = array_ops.Sum
Mean = array_ops.Mean
Max = array_ops.Max
ArgMax = array_ops.ArgMax
Min = array_ops.Min
ArgMin = array_ops.ArgMin
Slice = array_ops.Slice
Stack = array_ops.Stack
Concat = array_ops.Concat
Transpose = array_ops.Transpose
Repeat = array_ops.Repeat
Tile = array_ops.Tile
Pad = array_ops.Pad
OneHot = array_ops.OneHot
Flatten = array_ops.Flatten
Reshape = array_ops.Reshape
ExpandDims = array_ops.ExpandDims
Squeeze = array_ops.Squeeze
Shape = array_ops.Shape
Arange = array_ops.Arange
Multinomial = array_ops.Multinomial
Gather = _array_ops.Gather
Crop = _array_ops.Crop
Reduce = _array_ops.Reduce
Sum = _array_ops.Sum
Mean = _array_ops.Mean
Max = _array_ops.Max
ArgMax = _array_ops.ArgMax
Min = _array_ops.Min
ArgMin = _array_ops.ArgMin
Slice = _array_ops.Slice
Stack = _array_ops.Stack
Concat = _array_ops.Concat
Transpose = _array_ops.Transpose
Repeat = _array_ops.Repeat
Tile = _array_ops.Tile
Pad = _array_ops.Pad
OneHot = _array_ops.OneHot
Flatten = _array_ops.Flatten
Reshape = _array_ops.Reshape
ExpandDims = _array_ops.ExpandDims
Squeeze = _array_ops.Squeeze
Shape = _array_ops.Shape
Arange = _array_ops.Arange
Multinomial = _array_ops.Multinomial
# Control Flow
Copy = control_flow_ops.Copy
Assign = control_flow_ops.Assign
Equal = control_flow_ops.Equal
Less = control_flow_ops.Less
LessEqual = control_flow_ops.LessEqual
Greater = control_flow_ops.Greater
GreaterEqual = control_flow_ops.GreaterEqual
Copy = _control_flow_ops.Copy
Assign = _control_flow_ops.Assign
Equal = _control_flow_ops.Equal
Less = _control_flow_ops.Less
LessEqual = _control_flow_ops.LessEqual
Greater = _control_flow_ops.Greater
GreaterEqual = _control_flow_ops.GreaterEqual
# Misc
Cast = AsType = misc_ops.Cast
Run = misc_ops.Run
Template = misc_ops.Template
Accuracy = misc_ops.Accuracy
StopGradient = misc_ops.StopGradient
Cast = AsType = _misc_ops.Cast
Run = _misc_ops.Run
Template = _misc_ops.Template
Accuracy = _misc_ops.Accuracy
StopGradient = _misc_ops.StopGradient
# MPI
MPIBroadcast = mpi_ops.MPIBroadcast
MPIGather = mpi_ops.MPIGather
MPIBroadcast = _mpi_ops.MPIBroadcast
MPIGather = _mpi_ops.MPIGather
# Contrib
Proposal = contrib_ops.Proposal # R-CNN
\ No newline at end of file
Proposal = _contrib_ops.Proposal # R-CNN
\ No newline at end of file
......@@ -145,18 +145,6 @@ message GradientProto {
optional string external = 3;
}
// Record the updater information
message UpdaterProto {
// The operator name to use.
optional string name = 1;
// The operator type.
optional string type = 2;
// The tensor to update.
repeated string tensor = 3;
// The arguments.
repeated Argument arg = 4;
}
// Graph Definition
message GraphDef {
// The graph name.
......@@ -181,6 +169,4 @@ message GraphDef {
// The gradients information.
repeated GradientProto gradient = 9;
// The updaters information.
repeated UpdaterProto updater = 10;
}
\ No newline at end of file
......@@ -22,8 +22,8 @@ from __future__ import print_function
import pprint
from dragon.core import workspace
from dragon.core.tensor import Tensor
from dragon.core import workspace as _workspace
from dragon.core.tensor import Tensor as _Tensor
class BaseUpdater(object):
......@@ -32,12 +32,14 @@ class BaseUpdater(object):
# Store the global unique slot index
_DEFAULT_UNIQUE_SLOT_ID = 0
def __init__(self,
def __init__(
self,
scale_gradient=1.0,
clip_gradient=-1.0,
l2_decay=-1.0,
slot=None,
verbose=True):
verbose=True,
):
"""Construct a Updater to optimize the objectives.
Parameters
......@@ -84,7 +86,7 @@ class BaseUpdater(object):
None
"""
pair = (tensor.name if isinstance(tensor, Tensor) \
pair = (tensor.name if isinstance(tensor, _Tensor) \
else tensor for tensor in pair)
self._param_group.append((pair,
{'lr_mult': lr_mult, 'decay_mult': decay_mult}))
......@@ -93,7 +95,8 @@ class BaseUpdater(object):
defaults = self.__dict__.get('_defaults')
if item in defaults:
if self._registered:
return workspace.FetchTensor(self._slot + '/' + item)
return _workspace.FetchTensor(
self._slot + '/' + item)
else: return defaults[item]
return self.__dict__[item]
......@@ -101,7 +104,8 @@ class BaseUpdater(object):
defaults = self.__dict__.get('_defaults')
if defaults is not None and key in defaults:
if self._registered:
workspace.FeedTensor(self._slot + '/' + key, value,
_workspace.FeedTensor(
self._slot + '/' + key, value,
dtype='float32', force_cpu=True)
else:
self._defaults[key] = value
......@@ -111,7 +115,8 @@ class BaseUpdater(object):
def register_in_workspace(self):
if not self._registered:
for k, v in self._defaults.items():
workspace.FeedTensor(self._slot + "/" + k, v,
_workspace.FeedTensor(
self._slot + "/" + k, v,
dtype='float32', force_cpu=True)
self._registered = True
if self._verbose:
......@@ -206,8 +211,14 @@ class AdamUpdater(BaseUpdater):
Introduced by `[Kingma & Ba, 2014] <https://arxiv.org/abs/1412.6980>`_.
"""
def __init__(self, base_lr=0.01, beta1=0.9,
beta2=0.999, eps=1e-8, **kwargs):
def __init__(
self,
base_lr=0.01,
beta1=0.9,
beta2=0.999,
eps=1e-8,
**kwargs
):
"""Construct a Adam Updater to optimize the objectives.
Parameters
......@@ -222,7 +233,7 @@ class AdamUpdater(BaseUpdater):
The eps.
"""
super(AdamUpdater, self).__init__(**kwargs )
super(AdamUpdater, self).__init__(**kwargs)
self._defaults = dict({
'base_lr': base_lr,
'beta1': beta1,
......
......@@ -13,11 +13,11 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from multiprocessing import Process
import numpy
import multiprocessing
class BlobFetcher(Process):
class BlobFetcher(multiprocessing.Process):
"""BlobFetcher is deployed to queue blobs from `DataTransformer`_.
It is supported to form *NHWC* image blobs and *1d* label blobs.
......@@ -37,10 +37,9 @@ class BlobFetcher(Process):
"""
super(BlobFetcher, self).__init__()
self._batch_size = kwargs.get('batch_size', 100)
self._batch_size = kwargs.get('batch_size', 128)
self._partition = kwargs.get('partition', False)
if self._partition:
self._batch_size = self._batch_size // kwargs['group_size']
if self._partition: self._batch_size /= kwargs['group_size']
self.Q_in = self.Q_out = None
self.daemon = True
......@@ -54,9 +53,9 @@ class BlobFetcher(Process):
"""
im, labels = self.Q_in.get()
im_blob = np.zeros(shape=([self._batch_size] + list(im.shape)), dtype=np.uint8)
label_blob = np.zeros((self._batch_size, len(labels)), dtype=np.int64)
for ix in range(0, self._batch_size):
im_blob = numpy.zeros(shape=([self._batch_size] + list(im.shape)), dtype='uint8')
label_blob = numpy.zeros((self._batch_size, len(labels)), dtype='int64')
for ix in range(self._batch_size):
im_blob[ix, :, :, :], label_blob[ix, :] = im, labels
if ix != self._batch_size - 1: im, labels = self.Q_in.get()
return im_blob, label_blob
......
......@@ -14,11 +14,10 @@ from __future__ import division
from __future__ import print_function
import time
import pprint
from multiprocessing import Queue
import multiprocessing
import dragon.core.mpi as mpi
import dragon.core.logging as logging
from dragon.core import mpi as _mpi
from dragon.core import logging as _logging
from .data_reader import DataReader
from .data_transformer import DataTransformer
......@@ -77,10 +76,11 @@ class DataBatch(object):
super(DataBatch, self).__init__()
# Init mpi
global_rank = 0; local_rank = 0; group_size = 1
if mpi.Is_Init():
idx, group = mpi.AllowParallel()
if idx != -1: # DataParallel
global_rank = mpi.Rank()
if _mpi.Is_Init() and kwargs.get(
'phase', 'TRAIN') == 'TRAIN':
rank, group = _mpi.AllowParallel()
if rank != -1: # DataParallel
global_rank = _mpi.Rank()
group_size = len(group)
for i, node in enumerate(group):
if global_rank == node: local_rank = i
......@@ -105,7 +105,7 @@ class DataBatch(object):
self._num_transformers += 1
# Add 1 transformer for random crop
if kwargs.get('crop_size', 0) > 0 and \
kwargs.get('phase', 'TEST') == 'TRAIN':
kwargs.get('phase', 'TRAIN') == 'TRAIN':
self._num_transformers += 1
self._num_transformers = min(self._num_transformers, self._max_transformers)
......@@ -115,9 +115,12 @@ class DataBatch(object):
self._batch_size = int(self._batch_size / kwargs['group_size'])
# Init queues
self.Q_level_1 = Queue(self._prefetch * self._num_readers * self._batch_size)
self.Q_level_2 = Queue(self._prefetch * self._num_readers * self._batch_size)
self.Q_level_3 = Queue(self._prefetch * self._num_readers)
self.Q_level_1 = multiprocessing.Queue(
self._prefetch * self._num_readers * self._batch_size)
self.Q_level_2 = multiprocessing.Queue(
self._prefetch * self._num_readers * self._batch_size)
self.Q_level_3 = multiprocessing.Queue(
self._prefetch * self._num_readers)
# Init readers
self._readers = []
......@@ -167,11 +170,11 @@ class DataBatch(object):
process.terminate()
process.join()
terminate(self._fetchers)
if local_rank == 0: logging.info('Terminating BlobFetcher ......')
if local_rank == 0: _logging.info('Terminate BlobFetcher.')
terminate(self._transformers)
if local_rank == 0: logging.info('Terminating DataTransformer ......')
if local_rank == 0: _logging.info('Terminate DataTransformer.')
terminate(self._readers)
if local_rank == 0: logging.info('Terminating DataReader......')
if local_rank == 0: _logging.info('Terminate DataReader.')
import atexit
atexit.register(cleanup)
......
......@@ -14,15 +14,14 @@ from __future__ import division
from __future__ import print_function
import math
import numpy as np
import numpy.random as npr
from multiprocessing import Process
import numpy
import multiprocessing
import dragon.config as config
from dragon.tools.db import LMDB
from dragon import config as _cfg
from dragon.tools import db as _db
class DataReader(Process):
class DataReader(multiprocessing.Process):
"""DataReader is deployed to queue encoded str from `LMDB`_.
It is supported to adaptively partition and shuffle records over all distributed nodes.
......@@ -55,7 +54,7 @@ class DataReader(Process):
self._part_idx, self._num_parts = 0, 1
self._cur_idx, self._cur_chunk_idx = 0, 0
self._random_seed = config.GetRandomSeed()
self._random_seed = _cfg.GetRandomSeed()
self.Q_out = None
self.daemon = True
......@@ -106,7 +105,9 @@ class DataReader(Process):
"""
if self._multiple_nodes or self._use_shuffle:
if self._use_shuffle: self._perm = npr.permutation(self._num_shuffle_parts)
if self._use_shuffle:
self._perm = numpy.random.permutation(
self._num_shuffle_parts)
self._cur_chunk_idx = 0
self._start_idx = int(self._part_idx * self._num_shuffle_parts + self._perm[self._cur_chunk_idx])
self._start_idx = int(self._start_idx * self._chunk_size)
......@@ -158,23 +159,23 @@ class DataReader(Process):
"""
# fix seed
npr.seed(self._random_seed)
numpy.random.seed(self._random_seed)
# init db
self._db = LMDB()
self._db = _db.LMDB()
self._db.open(self._source)
self._zfill = self._db.zfill()
self._num_entries = self._db.num_entries()
self._epoch_size = int(self._num_entries/ self._num_parts + 1)
self._epoch_size = int(self._num_entries / self._num_parts + 1)
if self._use_shuffle:
if self._chunk_size == 1:
# Each chunk has at most 1 record [For Fully Shuffle]
# Each chunk has at most 1 record (Naive Shuffle)
self._chunk_size, self._num_shuffle_parts = \
1, int(self._num_entries / self._num_parts) + 1
else:
if self._use_shuffle and self._chunk_size == -1:
# Search a optimal chunk size by chunks [For Chunk Shuffle]
# Search a optimal chunk size by chunks (Chunk Shuffle)
max_chunk_size = self._db._total_size / ((self._num_chunks * (1 << 20)))
min_chunk_size = 1
while min_chunk_size * 2 < max_chunk_size: min_chunk_size *= 2
......@@ -184,17 +185,17 @@ class DataReader(Process):
self._chunk_size = int(self._num_entries / self._num_shuffle_parts / self._num_parts + 1)
limit = (self._num_parts - 0.5) * self._num_shuffle_parts * self._chunk_size
if self._num_entries <= limit:
# Roll back to fully shuffle
# Roll back to naive shuffle
self._chunk_size, self._num_shuffle_parts = \
1, int(self._num_entries / self._num_parts) + 1
else:
# Each chunk has at most K records [For Multiple Nodes]
# Note that if ``shuffle`` and ``multiple_nodes`` are all ``False``,
# Each chunk has at most K records
# Note that if ``shuffle`` and ``multiple_nodes`` are all *False*,
# ``chunk_size`` and ``num_shuffle_parts`` are meaningless
self._chunk_size = int(self._num_entries / self._num_parts) + 1
self._num_shuffle_parts = 1
self._perm = np.arange(self._num_shuffle_parts)
self._perm = numpy.arange(self._num_shuffle_parts)
# Init env
self.reset()
......
......@@ -13,12 +13,11 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
from multiprocessing import Process
import numpy
import multiprocessing
import dragon.config as config
import dragon.vm.caffe.proto.caffe_pb2 as pb
from dragon import config as _cfg
from dragon.vm.caffe.proto import caffe_pb2 as _proto_def
try:
import cv2
......@@ -31,7 +30,7 @@ except ImportError as e:
print("Failed to import PIL. \nIt's OK if disabling color augmentation.".format(str(e)))
class DataTransformer(Process):
class DataTransformer(multiprocessing.Process):
"""DataTransformer is deployed to queue transformed images from `DataReader`_.
Nearly all common image augmentation methods are supported.
......@@ -72,7 +71,7 @@ class DataTransformer(Process):
self._max_random_scale = kwargs.get('max_random_scale', 1.0)
self._force_color = kwargs.get('force_color', False)
self._phase = kwargs.get('phase', 'TRAIN')
self._random_seed = config.GetRandomSeed()
self._random_seed = _cfg.GetRandomSeed()
self.Q_in = self.Q_out = None
self.daemon = True
......@@ -91,16 +90,16 @@ class DataTransformer(Process):
"""
# decode
datum = pb.Datum()
datum = _proto_def.Datum()
datum.ParseFromString(serialized)
im = np.fromstring(datum.data, np.uint8)
im = numpy.fromstring(datum.data, numpy.uint8)
if datum.encoded is True:
im = cv2.imdecode(im, -1)
else:
im = im.reshape((datum.height, datum.width, datum.channels))
# Random scale
random_scale = npr.uniform() * (
random_scale = numpy.random.uniform() * (
self._max_random_scale - self._min_random_scale) \
+ self._min_random_scale
if random_scale != 1.0:
......@@ -109,7 +108,7 @@ class DataTransformer(Process):
# Padding
if self._padding > 0:
pad_img = np.empty((
pad_img = numpy.empty((
im.shape[0] + 2 * self._padding,
im.shape[1] + 2 * self._padding, im.shape[2]), dtype=im.dtype)
pad_img.fill(self._fill_value)
......@@ -120,8 +119,8 @@ class DataTransformer(Process):
# Random crop
if self._crop_size > 0:
if self._phase == 'TRAIN':
h_off = npr.randint(im.shape[0] - self._crop_size + 1)
w_off = npr.randint(im.shape[1] - self._crop_size + 1)
h_off = numpy.random.randint(im.shape[0] - self._crop_size + 1)
w_off = numpy.random.randint(im.shape[1] - self._crop_size + 1)
else:
h_off = int((im.shape[0] - self._crop_size) / 2)
w_off = int((im.shape[1] - self._crop_size) / 2)
......@@ -130,28 +129,28 @@ class DataTransformer(Process):
# Random mirror
if self._mirror:
if npr.randint(0, 2) > 0:
if numpy.random.randint(0, 2) > 0:
im = im[:, ::-1, :]
# Gray Transformation
if self._force_color:
if im.shape[2] == 1:
# duplicate to 3 channels
im = np.concatenate([im, im, im], axis=2)
im = numpy.concatenate([im, im, im], axis=2)
# Color Augmentation
if self._color_aug:
im = PIL.Image.fromarray(im)
delta_brightness = npr.uniform(-0.4, 0.4) + 1.0
delta_contrast = npr.uniform(-0.4, 0.4) + 1.0
delta_saturation = npr.uniform(-0.4, 0.4) + 1.0
delta_brightness = numpy.random.uniform(-0.4, 0.4) + 1.0
delta_contrast = numpy.random.uniform(-0.4, 0.4) + 1.0
delta_saturation = numpy.random.uniform(-0.4, 0.4) + 1.0
im = PIL.ImageEnhance.Brightness(im)
im = im.enhance(delta_brightness)
im = PIL.ImageEnhance.Contrast(im)
im = im.enhance(delta_contrast)
im = PIL.ImageEnhance.Color(im)
im = im.enhance(delta_saturation)
im = np.array(im)
im = numpy.array(im)
# Extract Labels
labels = []
......@@ -169,7 +168,7 @@ class DataTransformer(Process):
"""
# Fix the random seed
npr.seed(self._random_seed)
numpy.random.seed(self._random_seed)
# Run!
while True:
......
......@@ -16,8 +16,8 @@ import shutil
import argparse
import cv2
from dragon.tools.db import LMDB
from dragon.vm.caffe.proto import caffe_pb2
from dragon.tools import db as _db
from dragon.vm.caffe.proto import caffe_pb2 as _proto_def
def resize_image(im, resize):
......@@ -37,11 +37,10 @@ def resize_image(im, resize):
"""
if im.shape[0] > im.shape[1]:
newsize = (resize, im.shape[0] * resize / im.shape[1])
new_size = (resize, im.shape[0] * resize // im.shape[1])
else:
newsize = (im.shape[1] * resize / im.shape[0], resize)
im = cv2.resize(im, newsize)
return im
new_size = (im.shape[1] * resize // im.shape[0], resize)
return cv2.resize(im, new_size, interpolation=cv2.INTER_LINEAR)
def make_db(args):
......@@ -72,7 +71,7 @@ def make_db(args):
print('start time: ', time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime()))
db = LMDB(max_commit=10000)
db = _db.LMDB(max_commit=10000)
db.open(args.database, mode='w')
total_line = sum(1 for line in open(args.list))
......@@ -106,7 +105,7 @@ def make_db(args):
img = resize_image(img, args.resize)
result, imgencode = cv2.imencode('.jpg', img, encode_param)
datum = caffe_pb2.Datum()
datum = _proto_def.Datum()
datum.height, datum.width, datum.channels = img.shape
datum.label = int(label)
datum.encoded = True
......
......@@ -15,7 +15,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from dragon.core import scope as _scope
from dragon.core.tensor import Tensor as _Tensor
class Layer(object):
......@@ -74,12 +75,12 @@ class Layer(object):
# Note that a non-empty tensor scope will make it
# impossible to load/save caffe models. You should use
# a new workspace instead of the terrible name scope
scoped_name = dragon.get_default_name_scope() + self._name
scoped_name = _scope.get_default_name_scope() + self._name
param_name = scoped_name + '/param:{}'.format(len(self._blobs))
# Set the name explicitly
variable = dragon.Tensor.Ref(param_name)
variable_grad = dragon.Tensor.Ref(param_name + '_grad')
variable = _Tensor.Ref(param_name)
variable_grad = _Tensor.Ref(param_name + '_grad')
if filler is not None:
variable.Fill(**filler)
......
......@@ -15,13 +15,12 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from ..layer import Layer
from dragon import ops as _ops
from ..layer import Layer as _Layer
class DataLayer(Layer):
"""
The implementation of ``DataLayer``.
class DataLayer(_Layer):
"""The implementation of ``DataLayer``.
Different from ``Caffe``, we force to use `LMDB`_ backend.
......@@ -33,7 +32,7 @@ class DataLayer(Layer):
The prefetch count. Refer `DataParameter.prefetch`_.
batch_size : int
The size of a mini-batch. Refer `DataParameter.batch_size`_.
phase : caffe_pb2.Phase
phase : Phase
The phase of layer. Refer `LayerParameter.phase`_.
mirrow : boolean
Whether to randomly mirror. Refer `TransformationParameter.mirror`_.
......@@ -49,9 +48,9 @@ class DataLayer(Layer):
The min scale of the images. Extension of `TransformationParameter`_.
max_random_scale : float
The max scale of the images. Extension of `TransformationParameter`_.
dtype : caffe_pb2.MemoryDataParameter.DataType
The output data type. ``FLOAT32`` or ``FLOAT16``.
mean_value : list of float
dtype : MemoryDataParameter.DataType
The output data type. *FLOAT32* or *FLOAT16*.
mean_value : sequence of float
The mean of each channel. Refer `TransformationParameter.mean_value`_.
scale : float
The scaling factor. Refer `TransformationParameter.scale`_.
......@@ -93,20 +92,20 @@ class DataLayer(Layer):
[1. / transform_param.scale] * 3
def LayerSetup(self, bottom):
data, label = dragon.ops.LMDBData(**self.arguments)
return dragon.ops.ImageData(data, **self.arguments), label
data, label = _ops.LMDBData(**self.arguments)
return _ops.ImageData(data, **self.arguments), label
class MemoryDataLayer(Layer):
class MemoryDataLayer(_Layer):
"""The implementation of ``MemoryDataLayer``.
We extend it with ``FP16`` and ``NHWC => NCHW``.
Parameters
----------
dtype : caffe_pb2.MemoryDataParameter.DataType
dtype : MemoryDataParameter.DataType
The output data type. ``FLOAT32`` or ``FLOAT16``.
mean_value : list of float
mean_value : sequence of float
The mean of each channel. Refer `TransformationParameter.mean_value`_.
scale : float
The scaling factor. Refer `TransformationParameter.scale`_.
......@@ -131,4 +130,4 @@ class MemoryDataLayer(Layer):
[1. / transform_param.scale] * 3
def LayerSetup(self, bottom):
return dragon.ops.ImageData(bottom, **self.arguments)
\ No newline at end of file
return _ops.ImageData(bottom, **self.arguments)
\ No newline at end of file
......@@ -15,11 +15,11 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from ..layer import Layer
from dragon import ops as _ops
from ..layer import Layer as _Layer
class SoftmaxWithLossLayer(Layer):
class SoftmaxWithLossLayer(_Layer):
"""The implementation of ``SoftmaxWithLossLayer``.
Parameters
......@@ -52,12 +52,12 @@ class SoftmaxWithLossLayer(Layer):
}
def LayerSetup(self, bottom):
loss = dragon.ops.SparseSoftmaxCrossEntropy(bottom, **self.arguments)
loss = _ops.SparseSoftmaxCrossEntropy(bottom, **self.arguments)
if self._loss_weight is not None: loss *= self._loss_weight
return loss
class SigmoidCrossEntropyLossLayer(Layer):
class SigmoidCrossEntropyLossLayer(_Layer):
"""The implementation of ``SigmoidCrossEntropyLossLayer``.
Parameters
......@@ -79,12 +79,12 @@ class SigmoidCrossEntropyLossLayer(Layer):
self.arguments = {'normalization': normalization}
def LayerSetup(self, bottom):
loss = dragon.ops.SigmoidCrossEntropy(bottom, **self.arguments)
loss = _ops.SigmoidCrossEntropy(bottom, **self.arguments)
if self._loss_weight is not None: loss *= self._loss_weight
return loss
class L2LossLayer(Layer):
class L2LossLayer(_Layer):
"""The implementation of ``L2LossLayer``.
Parameters
......@@ -106,12 +106,12 @@ class L2LossLayer(Layer):
self.arguments = {'normalization': normalization}
def LayerSetup(self, bottom):
loss = dragon.ops.L2Loss(bottom, **self.arguments)
loss = _ops.L2Loss(bottom, **self.arguments)
if self._loss_weight is not None: loss *= self._loss_weight
return loss
class SmoothL1LossLayer(Layer):
class SmoothL1LossLayer(_Layer):
"""The implementation of ``SmoothL1LossLayer``.
Parameters
......@@ -140,12 +140,12 @@ class SmoothL1LossLayer(Layer):
}
def LayerSetup(self, bottom):
loss = dragon.ops.SmoothL1Loss(bottom, **self.arguments)
loss = _ops.SmoothL1Loss(bottom, **self.arguments)
if self._loss_weight is not None: loss *= self._loss_weight
return loss
class SigmoidWithFocalLossLayer(Layer):
class SigmoidWithFocalLossLayer(_Layer):
"""The implementation of ``SigmoidWithFocalLossLayer``.
Parameters
......@@ -183,12 +183,12 @@ class SigmoidWithFocalLossLayer(Layer):
}
def LayerSetup(self, bottom):
loss = dragon.ops.SigmoidFocalLoss(bottom, **self.arguments)
loss = _ops.SigmoidFocalLoss(bottom, **self.arguments)
if self._loss_weight is not None: loss *= self._loss_weight
return loss
class SoftmaxWithFocalLossLayer(Layer):
class SoftmaxWithFocalLossLayer(_Layer):
"""The implementation of ``SoftmaxWithFocalLossLayer``.
Parameters
......@@ -227,6 +227,6 @@ class SoftmaxWithFocalLossLayer(Layer):
}
def LayerSetup(self, bottom):
loss = dragon.ops.SoftmaxFocalLoss(bottom, **self.arguments)
loss = _ops.SoftmaxFocalLoss(bottom, **self.arguments)
if self._loss_weight is not None: loss *= self._loss_weight
return loss
\ No newline at end of file
......@@ -15,11 +15,11 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from ..layer import Layer
from dragon import ops as _ops
from ..layer import Layer as _Layer
class MPIBroadcastLayer(Layer):
class MPIBroadcastLayer(_Layer):
"""The implementation of ``MPIBroadcastLayer``.
Parameters
......@@ -33,10 +33,10 @@ class MPIBroadcastLayer(Layer):
self.arguments = {'root': LayerParameter.mpi_param.root}
def LayerSetup(self, bottom):
return dragon.ops.MPIBroadcast(bottom, **self.arguments)
return _ops.MPIBroadcast(bottom, **self.arguments)
class MPIGatherLayer(Layer):
class MPIGatherLayer(_Layer):
"""The implementation of ``MPIGatherLayer``.
Parameters
......@@ -53,4 +53,4 @@ class MPIGatherLayer(Layer):
}
def LayerSetup(self, bottom):
return dragon.ops.MPIGather(bottom, **self.arguments)
\ No newline at end of file
return _ops.MPIGather(bottom, **self.arguments)
\ No newline at end of file
......@@ -15,11 +15,11 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from ..layer import Layer
from dragon import ops as _ops
from ..layer import Layer as _Layer
class ReLULayer(Layer):
class ReLULayer(_Layer):
"""The implementation of ``ReLULayer``.
Parameters
......@@ -35,10 +35,10 @@ class ReLULayer(Layer):
self.arguments = {'slope': param.negative_slope}
def LayerSetup(self, bottom):
return dragon.ops.Relu(bottom, **self.arguments)
return _ops.Relu(bottom, **self.arguments)
class PReLULayer(Layer):
class PReLULayer(_Layer):
"""The implementation of ``PReLULayer``.
Parameters
......@@ -61,10 +61,10 @@ class PReLULayer(Layer):
def LayerSetup(self, bottom):
inputs = [bottom] + [blob['data'] for blob in self._blobs]
return dragon.ops.PRelu(inputs, **self.arguments)
return _ops.PRelu(inputs, **self.arguments)
class ELULayer(Layer):
class ELULayer(_Layer):
"""The implementation of ``ELULayer``.
Parameters
......@@ -78,40 +78,40 @@ class ELULayer(Layer):
self.arguments = {'alpha': float(LayerParameter.elu_param.alpha)}
def LayerSetup(self, bottom):
return dragon.ops.Elu(bottom, **self.arguments)
return _ops.Elu(bottom, **self.arguments)
class SELULayer(Layer):
class SELULayer(_Layer):
"""The implementation of ``SELULayer``."""
def __init__(self, LayerParameter):
super(SELULayer, self).__init__(LayerParameter)
def LayerSetup(self, bottom):
return dragon.ops.SElu(bottom, **self.arguments)
return _ops.SElu(bottom, **self.arguments)
class SigmoidLayer(Layer):
class SigmoidLayer(_Layer):
"""The implementation of ``SigmoidLayer``."""
def __init__(self, LayerParameter):
super(SigmoidLayer, self).__init__(LayerParameter)
def LayerSetup(self, bottom):
return dragon.ops.Sigmoid(bottom, **self.arguments)
return _ops.Sigmoid(bottom, **self.arguments)
class TanHLayer(Layer):
class TanHLayer(_Layer):
"""The implementation of ``TanHLayer``."""
def __init__(self, LayerParameter):
super(TanHLayer, self).__init__(LayerParameter)
def LayerSetup(self, bottom):
return dragon.ops.Tanh(bottom, **self.arguments)
return _ops.Tanh(bottom, **self.arguments)
class DropoutLayer(Layer):
class DropoutLayer(_Layer):
"""The implementation of ``DropoutLayer``.
Parameters
......@@ -132,10 +132,10 @@ class DropoutLayer(Layer):
}
def LayerSetup(self, bottom):
return dragon.ops.Dropout(bottom, **self.arguments)
return _ops.Dropout(bottom, **self.arguments)
class PowerLayer(Layer):
class PowerLayer(_Layer):
"""The implementation of ``PowerLayer``.
Parameters
......@@ -158,4 +158,4 @@ class PowerLayer(Layer):
}
def LayerSetup(self, bottom):
return dragon.ops.Pow(bottom, **self.arguments)
\ No newline at end of file
return _ops.Pow(bottom, **self.arguments)
\ No newline at end of file
......@@ -15,11 +15,11 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from ..layer import Layer
from dragon import ops as _ops
from ..layer import Layer as _Layer
class ConvolutionLayer(Layer):
class ConvolutionLayer(_Layer):
"""The implementation of ``ConvolutionLayer``.
Parameters
......@@ -28,19 +28,19 @@ class ConvolutionLayer(Layer):
The output channels. Refer `ConvolutionParameter.num_output`_.
bias_term : boolean
Whether to use bias. Refer `ConvolutionParameter.bias_term`_.
pad : list of int
pad : sequence of int
The zero padding size(s). Refer `ConvolutionParameter.pad`_.
kernel_size : list of int
The kernel size(s). Refer `ConvolutionParameter.kernel_size`_.
stride : list of int
stride : sequence of int
The stride(s). Refer `ConvolutionParameter.stride`_.
dilation : list of int
dilation : sequence of int
The dilation(s). Refer `ConvolutionParameter.dilation`_.
group : int
The group size. Refer `ConvolutionParameter.group`_.
weight_filler : FillerParameter
The filler of weights. Refer `ConvolutionParameter.weight_filler`_.
bias_filler : FillerParameters
bias_filler : FillerParameter
The filler of bias. Refer `ConvolutionParameter.bias_filler`_.
"""
......@@ -76,10 +76,10 @@ class ConvolutionLayer(Layer):
def LayerSetup(self, bottom):
inputs = [bottom] + [blob['data'] for blob in self._blobs]
return dragon.ops.Conv2d(inputs, **self.arguments)
return _ops.Conv2d(inputs, **self.arguments)
class DepthwiseConvolutionLayer(Layer):
class DepthwiseConvolutionLayer(_Layer):
"""The implementation of ``DepthwiseConvolutionLayer``.
Parameters
......@@ -88,15 +88,15 @@ class DepthwiseConvolutionLayer(Layer):
The output channels. Refer `ConvolutionParameter.num_output`_.
bias_term : boolean
Whether to use bias. Refer `ConvolutionParameter.bias_term`_.
pad : list of int
pad : sequence of int
The zero padding size(s). Refer `ConvolutionParameter.pad`_.
kernel_size : list of int
kernel_size : sequence of int
The kernel size(s). Refer `ConvolutionParameter.kernel_size`_.
stride : list of int
stride : sequence of int
The stride(s). Refer `ConvolutionParameter.stride`_.
weight_filler : FillerParameter
The filler of weights. Refer `ConvolutionParameter.weight_filler`_.
bias_filler : FillerParameters
bias_filler : FillerParameter
The filler of bias. Refer `ConvolutionParameter.bias_filler`_.
"""
......@@ -130,7 +130,7 @@ class DepthwiseConvolutionLayer(Layer):
def LayerSetup(self, bottom):
inputs = [bottom] + [blob['data'] for blob in self._blobs]
return dragon.ops.DepthwiseConv2d(inputs, **self.arguments)
return _ops.DepthwiseConv2d(inputs, **self.arguments)
class DeconvolutionLayer(ConvolutionLayer):
......@@ -142,19 +142,19 @@ class DeconvolutionLayer(ConvolutionLayer):
The output channels. Refer `ConvolutionParameter.num_output`_.
bias_term : boolean
Whether to use bias. Refer `ConvolutionParameter.bias_term`_.
pad : list of int
pad : sequence of int
The zero padding size(s). Refer `ConvolutionParameter.pad`_.
kernel_size : list of int
kernel_size : sequence of int
The kernel size(s). Refer `ConvolutionParameter.kernel_size`_.
stride : list of int
stride : sequence of int
The stride(s). Refer `ConvolutionParameter.stride`_.
dilation : list of int
dilation : sequence of int
The dilation(s). Refer `ConvolutionParameter.dilation`_.
group : int
The group size. Refer `ConvolutionParameter.group`_.
weight_filler : FillerParameter
The filler of weights. Refer `ConvolutionParameter.weight_filler`_.
bias_filler : FillerParameters
bias_filler : FillerParameter
The filler of bias. Refer `ConvolutionParameter.bias_filler`_.
"""
......@@ -163,29 +163,29 @@ class DeconvolutionLayer(ConvolutionLayer):
def LayerSetup(self, bottom):
inputs = [bottom] + [blob['data'] for blob in self._blobs]
return dragon.ops.ConvTranspose2d(inputs, **self.arguments)
return _ops.ConvTranspose2d(inputs, **self.arguments)
class PoolingLayer(Layer):
class PoolingLayer(_Layer):
"""The implementation of ``PoolingLayer``.
Parameters
----------
pool : PoolMethod
The method. Refer `PoolingParameter.pool`_.
pad : list of int
pad : sequence of int
The zero padding size(s). Refer `PoolingParameter.pad`_.
pad_h : int
The padding size of height. Refer `PoolingParameter.pad_h`_.
pad_w : int
The padding size of width. Refer `PoolingParameter.pad_w`_.
kernel_size : list of int
kernel_size : sequence of int
The kernel size(s). Refer `PoolingParameter.kernel_size`_.
kernel_h : int
The kernel size of height. Refer `PoolingParameter.kernel_h`_.
kernel_w : int
The kernel size of width. Refer `PoolingParameter.kernel_w`_.
stride : list of int
stride : sequence of int
The strides. Refer `PoolingParameter.stride`_.
stride_h : int
The stride of height. Refer `PoolingParameter.stride_h`_.
......@@ -212,10 +212,10 @@ class PoolingLayer(Layer):
else: self.arguments['strides'] = [param.stride_h, param.stride_w]
def LayerSetup(self, bottom):
return dragon.ops.Pool2d(bottom, **self.arguments)
return _ops.Pool2d(bottom, **self.arguments)
class ROIPoolingLayer(Layer):
class ROIPoolingLayer(_Layer):
"""The implementation of ``ROIPoolingLayer``.
Parameters
......@@ -238,10 +238,10 @@ class ROIPoolingLayer(Layer):
}
def LayerSetup(self, bottom):
return dragon.ops.ROIPool(bottom, **self.arguments)
return _ops.ROIPool(bottom, **self.arguments)
class ROIAlignLayer(Layer):
class ROIAlignLayer(_Layer):
"""The implementation of ``ROIAlignLayer``.
Parameters
......@@ -264,10 +264,10 @@ class ROIAlignLayer(Layer):
}
def LayerSetup(self, bottom):
return dragon.ops.ROIAlign(bottom, **self.arguments)
return _ops.ROIAlign(bottom, **self.arguments)
class LRNLayer(Layer):
class LRNLayer(_Layer):
"""The implementation of ``LRNLayer``.
Parameters
......@@ -296,15 +296,15 @@ class LRNLayer(Layer):
}
def LayerSetup(self, bottom):
return dragon.ops.LRN(bottom, **self.arguments)
return _ops.LRN(bottom, **self.arguments)
class NNResizeLayer(Layer):
class NNResizeLayer(_Layer):
"""The implementation of ``NNResizeLayer``.
Parameters
----------
shape : caffe_pb2.BlobShape
shape : BlobShape
The output shape. Refer `ResizeParameter.shape`_.
fx : float
The scale factor of height. Refer `ResizeParameter.fx`_.
......@@ -330,15 +330,15 @@ class NNResizeLayer(Layer):
raise ValueError('The second bottom should be provided to determine the shape.')
self.arguments['shape_like'] = bottom[1]
bottom = bottom[0]
return dragon.ops.NNResize(bottom, **self.arguments)
return _ops.NNResize(bottom, **self.arguments)
class BilinearResizeLayer(Layer):
class BilinearResizeLayer(_Layer):
"""The implementation of ``BilinearResizeLayer``.
Parameters
----------
shape : caffe_pb2.BlobShape
shape : BlobShape
The output shape. Refer `ResizeParameter.shape`_.
fx : float
The scale factor of height. Refer `ResizeParameter.fx`_.
......@@ -364,10 +364,10 @@ class BilinearResizeLayer(Layer):
raise ValueError('The second bottom should be provided to determine the shape.')
self.arguments['shape_like'] = bottom[1]
bottom = bottom[0]
return dragon.ops.BilinearResize(bottom, **self.arguments)
return _ops.BilinearResize(bottom, **self.arguments)
class DropBlockLayer(Layer):
class DropBlockLayer(_Layer):
"""The implementation of ``DropBlock2dLayer``.
Parameters
......@@ -394,4 +394,4 @@ class DropBlockLayer(Layer):
}
def LayerSetup(self, bottom):
return dragon.ops.DropBlock2d(bottom, **self.arguments)
\ No newline at end of file
return _ops.DropBlock2d(bottom, **self.arguments)
\ No newline at end of file
......@@ -15,10 +15,10 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from dragon import config as _cfg
_GLOBAL_ROOT_CAFFE_SOLVER = True
_GLOBAL_CAFFE_ROOT_SOLVER = True
def set_mode_cpu():
......@@ -33,7 +33,7 @@ def set_mode_cpu():
The implementation of `set_mode_cpu(_caffe.cpp, L51)`_.
"""
dragon.config.EnableCPU()
_cfg.EnableCPU()
def set_mode_gpu():
......@@ -48,7 +48,7 @@ def set_mode_gpu():
The implementation of `set_mode_gpu(_caffe.cpp, L52)`_.
"""
dragon.config.EnableCUDA()
_cfg.EnableCUDA()
def set_device(device):
......@@ -63,7 +63,7 @@ def set_device(device):
The implementation of `SetDevice(common.cpp, L65)`_.
"""
dragon.config.SetGPU(device)
_cfg.SetGPU(device)
def set_random_seed(seed):
......@@ -83,7 +83,7 @@ def set_random_seed(seed):
The implementation of `set_random_seed(_caffe.cpp, L71)`_.
"""
dragon.config.SetRandomSeed(seed)
_cfg.SetRandomSeed(seed)
def root_solver():
......@@ -99,7 +99,7 @@ def root_solver():
The implementation of `root_solver(common.hpp, L164)`_.
"""
return _GLOBAL_ROOT_CAFFE_SOLVER
return _GLOBAL_CAFFE_ROOT_SOLVER
def set_root_solver(val):
......@@ -115,5 +115,5 @@ def set_root_solver(val):
The implementation of `set_root_solver(common.hpp, L165)`_.
"""
global _GLOBAL_ROOT_CAFFE_SOLVER
_GLOBAL_ROOT_CAFFE_SOLVER = val
\ No newline at end of file
global _GLOBAL_CAFFE_ROOT_SOLVER
_GLOBAL_CAFFE_ROOT_SOLVER = val
\ No newline at end of file
......@@ -15,12 +15,16 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from collections import OrderedDict
from google.protobuf.text_format import Parse as parse_text_proto
from dragon.vm.caffe import layers as layer_factory
from dragon.vm.caffe.proto import caffe_pb2 as pb
from google.protobuf.text_format import Parse as _parse_text_proto
from dragon.core.tensor import Tensor as _Tensor
from dragon.core import workspace as _workspace
from dragon.vm.theano.gradient import grad as _Grad
from dragon.vm.theano.compile.function import function as _Function
from dragon.vm.caffe import layers as _layer_factory
from dragon.vm.caffe.proto import caffe_pb2 as _proto_def
class Blob(object):
......@@ -89,8 +93,8 @@ class Net(object):
The implementation of `Net_Init(_caffe.cpp, L109)`_.
"""
self._net = pb.NetParameter()
parse_text_proto(open(proto_txt,'r').read(), self._net)
self._net = _proto_def.NetParameter()
_parse_text_proto(open(proto_txt,'r').read(), self._net)
self._phase = phase
self._layers = []
self._inputs_to_tensors = {}
......@@ -100,16 +104,17 @@ class Net(object):
if len(self._net.input) > 0:
for input in self._net.input:
if not input in self._blobs:
variable = dragon.Tensor(input).Variable()
variable = _Tensor(input).Variable()
self._blobs[input] = {
'data': variable,
'diff': dragon.Tensor.Ref(variable.name + '_grad'),
'diff': _Tensor.Ref(variable.name + '_grad'),
}
self._inputs_to_tensors[input] = self._blobs[input]['data']
for layer in self._net.layer:
if not self.FilterLayer(layer): continue
self._layers.append(getattr(layer_factory, layer.type + 'Layer')(layer))
self._layers.append(getattr(
_layer_factory, layer.type + 'Layer')(layer))
self.Setup()
......@@ -199,7 +204,7 @@ class Net(object):
for idx, top in enumerate(layer._top):
self._blobs[top] = {
'data': outputs[idx],
'diff': dragon.Tensor.Ref(outputs[idx].name + '_grad'),
'diff': _Tensor.Ref(outputs[idx].name + '_grad'),
}
self._net_outputs.add(top)
......@@ -271,14 +276,14 @@ class Net(object):
for loss in self.losses:
for var in self.trainable_variables:
dragon.grad(loss, var)
_Grad(loss, var)
self._function = dragon.function(
self._function = _Function(
outputs=[self.blobs[key].data
for key in self.outputs])
if hasattr(self, '_model'):
dragon.workspace.Restore(self._model, format='caffe')
_workspace.Restore(self._model, format='caffe')
return self._function
......@@ -299,7 +304,7 @@ class Net(object):
The implementation of `CopyTrainedLayersFromBinaryProto(net.cpp, L780)`_.
"""
dragon.workspace.Restore(model, format='caffe')
_workspace.Restore(model, format='caffe')
def forward(self, **kwargs):
"""Forward pass. [**PyCaffe Style**]
......@@ -322,11 +327,11 @@ class Net(object):
def GetOutputs(net, net_outputs):
ret = {}
for output in net_outputs:
ret[output] = dragon.workspace.FetchTensor(net.blobs[output].data)
ret[output] = net.blobs[output].data.get_value()
return ret
for name, blob in kwargs.items():
dragon.workspace.FeedTensor(self._inputs_to_tensors[name], blob)
_workspace.FeedTensor(self._inputs_to_tensors[name], blob)
self.function()(return_outputs=False, stage='forward')
......@@ -347,7 +352,7 @@ class Net(object):
"""
for name, blob in kwargs.items():
dragon.workspace.FeedTensor(self._inputs_to_tensors[name], blob)
_workspace.FeedTensor(self._inputs_to_tensors[name], blob)
self.function()(return_outputs=False, stage='forward')
def backward(self, **kwargs):
......@@ -368,7 +373,7 @@ class Net(object):
"""
for name, blob in kwargs.items():
dragon.workspace.FeedTensor(self.blobs[name].diff, blob)
_workspace.FeedTensor(self.blobs[name].diff, blob)
self.function()(return_outputs=False, stage='backward')
def save(self, filename):
......@@ -399,7 +404,7 @@ class Net(object):
if param.data.name not in keys:
tensors.append(param.data)
keys.add(param.data.name)
dragon.workspace.Snapshot(tensors, filename, suffix='', format='caffe')
_workspace.Snapshot(tensors, filename, suffix='', format='caffe')
@property
def blobs(self):
......
......@@ -16,12 +16,16 @@ from __future__ import division
from __future__ import print_function
import time
import dragon
from google.protobuf.text_format import Parse as parse_text_proto
from dragon.vm.caffe.misc import root_solver
from dragon.vm.caffe.net import Net
from dragon.vm.caffe.proto import caffe_pb2 as pb
from dragon import updaters as _updaters
from dragon.core import mpi as _mpi
from dragon.core import workspace as _workspace
from google.protobuf.text_format import Parse as _parse_text_proto
from dragon.vm.caffe.net import Net as _Net
from dragon.vm.caffe.proto import caffe_pb2 as _proto_def
from dragon.vm.caffe.misc import root_solver as _root_solver
from dragon.vm.theano.compile.function import function as _Function
class Solver(object):
......@@ -48,8 +52,8 @@ class Solver(object):
>>> solver = Solver('solver.prototxt')
"""
self._param = pb.SolverParameter()
parse_text_proto(open(proto_txt, 'r').read(), self._param)
self._param = _proto_def.SolverParameter()
_parse_text_proto(open(proto_txt, 'r').read(), self._param)
if self._param.iter_size > 1:
raise NotImplementedError('Gradients accumulating is deprecated.')
self._net = None
......@@ -75,12 +79,12 @@ class Solver(object):
"""
if self._param.HasField('net'):
self._net = Net(self._param.net, "TRAIN")
self._net = _Net(self._param.net, "TRAIN")
if self._param.HasField('train_net'):
if self._net is not None:
raise RuntimeError('net or train_net can not be specified both.')
self._net = Net(self._param.train_net, "TRAIN")
self._net = _Net(self._param.train_net, "TRAIN")
def InitTestNets(self):
"""Initialize the test nets.
......@@ -94,10 +98,10 @@ class Solver(object):
The implementation of `InitTestNets(solver.cpp, L104)`_.
"""
if dragon.mpi.Is_Init():
idx, group = dragon.mpi.AllowParallel()
if _mpi.Is_Init():
rank, group = _mpi.AllowParallel()
# Only the root in a parallel group can test
if idx != -1 and dragon.mpi.Rank() != group[0]: return
if rank != -1 and _mpi.Rank() != group[0]: return
num_test_net = len(self._param.test_iter)
if num_test_net > 0:
......@@ -106,12 +110,12 @@ class Solver(object):
if len(self._param.test_net) > 0:
for test_net in self._param.test_net:
self._test_nets.append(Net(test_net, "TEST"))
self._test_nets.append(_Net(test_net, "TEST"))
num_test_net -= len(self._param.test_net)
# Consider generic_net
if num_test_net > 0:
self._test_nets.append(Net(self._param.net, "TEST"))
self._test_nets.append(_Net(self._param.net, "TEST"))
def BuildNets(self):
"""Build the nets.
......@@ -164,7 +168,7 @@ class Solver(object):
blob.decay_multiplier)
# Compile
self.update = dragon.function(updater=self.optimizer)
self.update = _Function(updater=self.optimizer)
def GetLearningRate(self):
"""Get learning rate based on the preset policy.
......@@ -244,7 +248,7 @@ class Solver(object):
for iter in range(test_iter):
self.tests[test_idx](return_outputs=False)
if not root_solver(): continue
if not _root_solver(): continue
if iter == 0:
for key in net.outputs:
values = net.blobs[key].data.get_value().flatten()
......@@ -259,7 +263,7 @@ class Solver(object):
test_score[i] += value
i += 1
if not root_solver(): return
if not _root_solver(): return
print('Iteration {}, Test net #{}'.format(self.iter, test_idx))
for idx, score in enumerate(test_score):
......@@ -299,12 +303,12 @@ class Solver(object):
loss = 0.0
for i in range(self._param.iter_size):
self.train(return_outputs=False)
if root_solver():
if _root_solver():
for e in self.net.losses:
values = e.get_value().flatten()
for v in values: loss += v
if root_solver():
if _root_solver():
loss /= self._param.iter_size
if len(loss_vec) < self._param.average_loss:
loss_vec.append(loss)
......@@ -319,7 +323,7 @@ class Solver(object):
self.update()
# Display
if root_solver() and self._param.display:
if _root_solver() and self._param.display:
if self.iter % self._param.display == 0:
base_lr = self.optimizer.base_lr
print('Iteration %d, lr = %s, loss = %f, time = %.2fs' % \
......@@ -410,7 +414,7 @@ class Solver(object):
"""
tensors = [blob.data for blob in self._layer_blobs]
filename = "_iter_" + str(self.iter)
dragon.workspace.Snapshot(tensors, filename,
_workspace.Snapshot(tensors, filename,
prefix=self._param.snapshot_prefix,
suffix='.caffemodel', format='caffe')
......@@ -492,7 +496,7 @@ class SGDSolver(Solver):
"""
def __init__(self, proto_txt):
super(SGDSolver, self).__init__(proto_txt=proto_txt)
self.optimizer = dragon.updaters.SGDUpdater(**self._optimizer_arguments)
self.optimizer = _updaters.SGDUpdater(**self._optimizer_arguments)
self.BuildOptimizer()
def ParseOptimizerArguments(self):
......@@ -514,7 +518,7 @@ class NesterovSolver(Solver):
"""
def __init__(self, proto_txt):
super(NesterovSolver, self).__init__(proto_txt=proto_txt)
self.optimizer = dragon.updaters.NesterovUpdater(**self._optimizer_arguments)
self.optimizer = _updaters.NesterovUpdater(**self._optimizer_arguments)
self.BuildOptimizer()
def ParseOptimizerArguments(self):
......@@ -538,7 +542,7 @@ class RMSPropSolver(Solver):
"""
def __init__(self, proto_txt):
super(RMSPropSolver, self).__init__(proto_txt=proto_txt)
self.optimizer = dragon.updaters.RMSPropUpdater(**self._optimizer_arguments)
self.optimizer = _updaters.RMSPropUpdater(**self._optimizer_arguments)
self.BuildOptimizer()
def ParseOptimizerArguments(self):
......@@ -565,7 +569,7 @@ class AdamSolver(Solver):
"""
def __init__(self, proto_txt):
super(AdamSolver, self).__init__(proto_txt=proto_txt)
self.optimizer = dragon.updaters.AdamUpdater(**self._optimizer_arguments)
self.optimizer = _updaters.AdamUpdater(**self._optimizer_arguments)
self.BuildOptimizer()
def ParseOptimizerArguments(self):
......
......@@ -17,17 +17,18 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
import itertools
import numpy as np
from collections import defaultdict
from onnx import (checker, mapping, numpy_helper, GraphProto, OperatorSetIdProto)
from onnx import checker, mapping, numpy_helper, GraphProto, OperatorSetIdProto
from onnx.helper import make_tensor_value_info, make_model, printable_graph
from dragon.vm.onnx.helper import \
(extract_initializer, extract_leaf_tensors,
native_run_graph, fetch_initializer,)
from dragon.core import workspace as _workspace
from dragon.vm.onnx.helper import native_run_graph
from dragon.vm.onnx.helper import fetch_initializer
from dragon.vm.onnx.helper import extract_initializer
from dragon.vm.onnx.helper import extract_leaf_tensors
from dragon.vm.onnx.nodes.factory import get_nodes_def
......@@ -104,15 +105,22 @@ class DragonFrontend(object):
if run_native_graph and not enforce_no_running:
inputs = {}
for name, (elem_type, shape) in value_info.items():
inputs[name] = np.random.randn(*shape).astype(
inputs[name] = numpy.random.randn(*shape).astype(
mapping.TENSOR_TYPE_TO_NP_TYPE[elem_type])
ws, outputs, initializer = native_run_graph(
graph_def, inputs, initializer, init_func)
for name in graph_def.output:
output = outputs[name]
elem_type = mapping.NP_TYPE_TO_TENSOR_TYPE[output.dtype]
shape = output.shape
value_info[name] = (elem_type, shape)
if enforce_no_running:
# In some cases(e.g. PyTorch), we had ran the graph
# outputs had been in ``value_info`` already
import dragon.core.workspace as ws
ws = _workspace.get_default_workspace()
initializer = fetch_initializer(initializer)
# Prepare to make the graph
......
......@@ -21,8 +21,8 @@ import sys
from onnx.backend.base import namedtupledict
from onnx import numpy_helper
import dragon as dg
from dragon.vm.onnx.workspace import Workspace
from dragon.core import workspace as _workspace
from dragon.core.tensor import Tensor as _Tensor
INITIALIZER_TAG = {
......@@ -65,7 +65,7 @@ def fetch_initializer(initializer):
# Fetch the initializer
return [
numpy_helper.from_array(
dg.workspace.FetchTensor(name), name=name)
_workspace.FetchTensor(name), name=name)
for name in initializer
]
......@@ -87,32 +87,32 @@ def native_run_graph(graph_def, inputs, initializer, init_func=None):
graph_def.arg[i].i = 0
# Create an anonymous workspace
ws = Workspace()
ws = _workspace.Workspace()
with dg.ws_scope(ws.name):
with ws.as_default():
# Register all the initializer before feeding them
for name in initializer:
dg.Tensor(name=name).Variable()
_Tensor(name=name).Variable()
# Feed the given values if necessary
if init_func: init_func()
# Feed the external inputs
for name, blob in inputs.items():
dg.workspace.FeedTensor(name, blob)
_workspace.FeedTensor(name, blob)
# Create and Run the graph
graph_name = dg.workspace.CreateGraph(graph_def)
dg.workspace.RunGraph(graph_name, return_outputs=False)
graph_name = _workspace.CreateGraph(graph_def)
_workspace.RunGraph(graph_name, return_outputs=False)
# Fetch the outputs
output_names = graph_def.output
output_values = [dg.workspace.FetchTensor(name) for name in output_names]
output_values = [_workspace.FetchTensor(name) for name in output_names]
# Fetch the initializer
initializer = [
numpy_helper.from_array(
dg.workspace.FetchTensor(name), name=name)
_workspace.FetchTensor(name), name=name)
for name in initializer
]
......
......@@ -16,12 +16,12 @@ from __future__ import division
from __future__ import print_function
import os
import numpy as np
from onnx import mapping
from google.protobuf.text_format import Parse as parse_text_proto
import numpy
import dragon.proto.dragon_pb2 as pb
import dragon.import_c_api as C
from onnx import mapping as _mapping
from dragon.core import workspace as _workspace
from dragon.proto import dragon_pb2 as _proto_def
from google.protobuf.text_format import Parse as _parse_text_proto
from dragon.vm.theano.compile.function import Function
from dragon.vm.onnx.frontend import graph_def_to_onnx_model
......@@ -119,8 +119,8 @@ def export_from_graph_text(
"""
with open(text_file, 'r') as rf:
graph_def = pb.GraphDef()
parse_text_proto(rf.read(), graph_def)
graph_def = _proto_def.GraphDef()
_parse_text_proto(rf.read(), graph_def)
export_from_graph_def(
graph_def=graph_def,
......@@ -148,8 +148,10 @@ def import_to_graph_def(model_path):
"""
if not os.path.exists(model_path):
raise ValueError('Given model({}) is not existed.'.format(model_path))
graph_def = pb.GraphDef()
serialized_proto = C.ImportONNXModel(model_path)
graph_def = _proto_def.GraphDef()
serialized_proto = _workspace \
.get_default_workspace() \
.ImportONNXModel(model_path)
graph_def.ParseFromString(serialized_proto)
return graph_def
......@@ -238,4 +240,4 @@ def surgery_on_graph_def(
def make_value_info(shape, dtype='float32'):
return mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], shape
\ No newline at end of file
return _mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(dtype)], shape
\ No newline at end of file
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# Codes are based on:
#
# <https://github.com/pytorch/pytorch/blob/master/caffe2/python/onnx/workspace.py>
#
# ------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
import dragon as dg
class Workspace(object):
def __init__(self):
self.name = 'onnx/' + str(uuid.uuid4())
def __getattr__(self, attr):
def f(*args, **kwargs):
with dg.ws_scope(self.name, ):
return getattr(dg.workspace, attr)(*args, **kwargs)
return f
def __del__(self):
self.ResetWorkspace(self.name)
\ No newline at end of file
......@@ -16,45 +16,42 @@ from __future__ import print_function
import warnings
from collections import defaultdict
import dragon
from dragon.core import workspace as _workspace
from dragon.core.tensor import Tensor as _Tensor
from dragon.vm.theano.compile import function as _Function
from dragon.vm.tensorflow.protobuf import config_pb2
from dragon.vm.tensorflow.training.optimizer import Optimizer
from dragon.vm.tensorflow.ops.variables import VariablesInitializer
from dragon.vm.tensorflow.framework import ops
_GLOBAL_DATA_FLOW_KEYS = defaultdict(dict)
class _DataFlow(object):
"""DataFlow takes a group of expressions and
the specified output tensors.
We store the flows that requiring the same output names,
i.e., those flows can be reused and should not to create a new graph.
i.e., those flows can be reused and should not be created again.
"""
def __init__(self, functions):
self.functions = functions
def run(self, feed_dict=None):
for i, function in enumerate(self.functions):
for i, func in enumerate(self.functions):
if i == 0 and feed_dict is not None:
for tensor, value in feed_dict.items():
dragon.workspace.FeedTensor(tensor, value)
function(return_outputs=False)
_workspace.FeedTensor(tensor, value)
func(return_outputs=False)
@classmethod
def try_get(cls, workspace, flow_key):
global _GLOBAL_DATA_FLOW_KEYS
if flow_key in _GLOBAL_DATA_FLOW_KEYS[workspace]:
return _GLOBAL_DATA_FLOW_KEYS[workspace][flow_key]
def try_get(cls, graph_id, flow_key):
if flow_key in _GLOBAL_DATA_FLOWS[graph_id]:
return _GLOBAL_DATA_FLOWS[graph_id][flow_key]
@classmethod
def try_add(cls, workspace, flow_key, flow):
global _GLOBAL_DATA_FLOW_KEYS
_GLOBAL_DATA_FLOW_KEYS[workspace][flow_key] = flow
def try_add(cls, graph_id, flow_key, flow):
global _GLOBAL_DATA_FLOWS
_GLOBAL_DATA_FLOWS[graph_id][flow_key] = flow
class BaseSession(object):
......@@ -115,7 +112,7 @@ class BaseSession(object):
for e in fetches:
if isinstance(e, Optimizer): optimizers.append(e)
elif isinstance(e, VariablesInitializer): tensors.extend(e.var_list)
elif isinstance(e, dragon.Tensor): tensors.append(e)
elif isinstance(e, _Tensor): tensors.append(e)
# Find minimum solving targets
targets = set()
......@@ -124,24 +121,23 @@ class BaseSession(object):
for t in optimizer._targets: targets.add(t)
targets = list(targets)
gen_flow_key = tuple(e.name for e in targets)
flow_key = tuple(e.name for e in targets)
# Exist this data flow before?
data_flow = _DataFlow.try_get(
self._graph._workspace, gen_flow_key)
flow = _DataFlow.try_get(id(self._graph), flow_key)
# Run by feeding
if feed_dict is not None:
# Check the feed dict
for key, value in feed_dict.items():
if not isinstance(key, dragon.Tensor):
raise TypeError('The key of feed_dict key should be a Tensor.')
if not isinstance(key, _Tensor):
raise TypeError('The key of ``feed_dict`` should be a Tensor.')
if key.shape is not None:
# Align the number of dimensions
if len(key.shape) != len(value.shape):
raise RuntimeError(
'The Tensor({}) was limited to {} dimensions, \
while feed a value with {} dimensions.'
'The Tensor({}) was limited to {} dimensions, '\
'while feed a value with {} dimensions.'
.format(key.name, len(key.shape), len(value.shape)))
# Verify for the each dimension
for i in range(len(key.shape)):
......@@ -150,19 +146,20 @@ class BaseSession(object):
raise RuntimeError(
'The shape of Tensor({}) was limited as ('.format(key.name) +
','.join([str(dim) for dim in key.shape]) + '), ' +
'while feed a value with (' + ','.join([str(dim) for dim in value.shape]) + ').')
'while feed a value with (' +
','.join([str(dim) for dim in value.shape]) + ').')
# Create a new data flow if necessary
if data_flow is None:
functions = [dragon.function(outputs=targets)]
if flow is None:
functions = [_Function(outputs=targets)]
for optimizer in optimizers:
functions.append(dragon.function(
functions.append(_Function(
updater=optimizer.updater))
data_flow = _DataFlow(functions)
_DataFlow.try_add(self.graph._workspace, gen_flow_key, data_flow)
flow = _DataFlow(functions)
_DataFlow.try_add(id(self._graph), flow_key, flow)
# Run this data flow
data_flow.run(feed_dict)
flow.run(feed_dict)
# Fetch after running
returns = []
......@@ -234,3 +231,8 @@ class InteractiveSession(BaseSession):
@staticmethod
def reset(target, containers=None, config=None):
pass
# Store the flows for different graphs
# ThreadLocal is not necessary
_GLOBAL_DATA_FLOWS = defaultdict(dict)
\ No newline at end of file
......@@ -13,8 +13,11 @@ from dragon.vm.tensorflow.framework import ops
from dragon.vm.tensorflow.ops import var_scope as variable_scope
def get_variables(scope=None, suffix=None,
collection=ops.GraphKeys.GLOBAL_VARIABLES):
def get_variables(
scope=None,
suffix=None,
collection=ops.GraphKeys.GLOBAL_VARIABLES,
):
if isinstance(scope, variable_scope.VariableScope):
scope = scope.name
if suffix is not None:
......
......@@ -19,25 +19,31 @@ from __future__ import print_function
import math
from dragon.vm.tensorflow.framework import dtypes
from dragon.vm.tensorflow.ops import random_ops
from dragon.vm.tensorflow.framework import dtypes
__all__ = ['xavier_initializer',
'xavier_initializer_conv2d',
'variance_scaling_initializer']
def xavier_initializer(uniform=True, seed=None, dtype=dtypes.float32):
return variance_scaling_initializer(factor=1.0, mode='FAN_AVG',
uniform=uniform, seed=seed, dtype=dtype)
xavier_initializer_conv2d = xavier_initializer
def xavier_initializer(
uniform=True,
seed=None,
dtype=dtypes.float32,
):
return variance_scaling_initializer(
factor=1.0,
mode='FAN_AVG',
uniform=uniform,
seed=seed,
dtype=dtype,
)
def variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False,
seed=None, dtype=dtypes.float32):
def variance_scaling_initializer(
factor=2.0,
mode='FAN_IN',
uniform=False,
seed=None,
dtype=dtypes.float32,
):
if not dtype.is_floating:
raise TypeError('Cannot create initializer for non-floating point type.')
if mode not in ['FAN_IN', 'FAN_OUT', 'FAN_AVG']:
......@@ -79,3 +85,7 @@ def variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False,
seed=seed)
return _initializer
# Alias
xavier_initializer_conv2d = xavier_initializer
\ No newline at end of file
......@@ -17,20 +17,14 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import dragon.ops as op_lib
import dragon.vm.tensorflow.framework.ops as ops
from dragon.vm.tensorflow.framework import ops
from dragon.vm.tensorflow.contrib.layers import initializers
from dragon.vm.tensorflow.ops import init_ops
from dragon.vm.tensorflow.ops import nn
from dragon.vm.tensorflow.ops import var_scope as vs
from dragon.vm.tensorflow.layers import layers
from dragon.ops import Flatten as _FlattenOp
__all__ = ['flatten']
_LAYERS_UID_DICT = defaultdict(int)
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
......@@ -38,53 +32,52 @@ DATA_FORMAT_NCDHW = 'NCDHW'
DATA_FORMAT_NDHWC = 'NDHWC'
def _default_scope(scope, key, indicator):
if scope is None:
return indicator
# global _LAYERS_UID_DICT
# _LAYERS_UID_DICT[key] += 1
# return '{}{}'.format(indicator, _LAYERS_UID_DICT[key])
else:
return scope
def avg_pool2d(inputs,
def avg_pool2d(
inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
scope=None,
):
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
return layers.average_pooling2d(inputs=inputs,
return layers.average_pooling2d(
inputs=inputs,
pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df)
data_format=df,
)
def max_pool2d(inputs,
def max_pool2d(
inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
scope=None,
):
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
return layers.max_pooling2d(inputs=inputs,
return layers.max_pooling2d(
inputs=inputs,
pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df)
data_format=df,
)
def convolution(inputs,
def convolution(
inputs,
num_outputs,
kernel_size,
stride=1,
......@@ -102,8 +95,9 @@ def convolution(inputs,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
scope = _default_scope(scope, 'CONVOLUTION', 'Conv')
scope=None,
):
scope = _default_scope(scope, 'Conv')
if data_format not in [None, 'NHWC', 'NCHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
data_format = 'channels_first' if data_format == 'NCHW' else 'channels_last'
......@@ -126,7 +120,8 @@ def convolution(inputs,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
reuse=reuse)
reuse=reuse,
)
# Simple alias.
......@@ -134,7 +129,8 @@ convolution2d = convolution
conv2d = convolution2d
def fully_connected(inputs,
def fully_connected(
inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
......@@ -147,8 +143,9 @@ def fully_connected(inputs,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
scope = _default_scope(scope, 'FULLY_CONNECTED', 'fully_connected')
scope=None,
):
scope = _default_scope(scope, 'fully_connected')
with vs.variable_scope(scope, reuse=reuse) as sc:
return layers.dense(
inputs=inputs,
......@@ -160,10 +157,12 @@ def fully_connected(inputs,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
reuse=reuse)
reuse=reuse,
)
def batch_norm(inputs,
def batch_norm(
inputs,
decay=0.999,
center=True,
scale=False,
......@@ -184,8 +183,9 @@ def batch_norm(inputs,
scope=None,
renorm=False,
renorm_clipping=None,
renorm_decay=0.99):
scope = _default_scope(scope, 'BATCH_NORM', 'BatchNorm')
renorm_decay=0.99,
):
scope = _default_scope(scope, 'BatchNorm')
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
axis = 1 if data_format == DATA_FORMAT_NCHW else -1
......@@ -193,10 +193,14 @@ def batch_norm(inputs,
with vs.variable_scope(scope, reuse=reuse) as sc:
if not param_initializers:
param_initializers = {}
beta_initializer = param_initializers.get('beta', init_ops.zeros_initializer())
gamma_initializer = param_initializers.get('gamma', init_ops.ones_initializer())
moving_mean_initializer = param_initializers.get('moving_mean', init_ops.zeros_initializer())
moving_variance_initializer = param_initializers.get('moving_variance', init_ops.ones_initializer())
beta_initializer = param_initializers.get(
'beta', init_ops.zeros_initializer())
gamma_initializer = param_initializers.get(
'gamma', init_ops.ones_initializer())
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
if not param_regularizers:
param_regularizers = {}
......@@ -222,11 +226,19 @@ def batch_norm(inputs,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_decay,
fused=fused,
training=is_training)
training=is_training,
)
def flatten(inputs,
def flatten(
inputs,
outputs_collections=None,
scope=None):
return op_lib.Flatten(inputs, axis=0, keep_axes=2)
scope=None,
):
return _FlattenOp(inputs, axis=0, keep_axes=2)
def _default_scope(scope, indicator):
"""Return the default scope."""
if scope is None: return indicator
else: return scope
\ No newline at end of file
......@@ -13,60 +13,68 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
import numpy as np
import numpy
from dragon.core import scope as _scope
from dragon.core import workspace as _workspace
from dragon.core.tensor import Tensor as _Tensor
def constant(value, dtype=None, shape=None, name=None, verify_shape=False):
def constant(
value,
dtype=None,
shape=None,
name=None,
verify_shape=False,
):
if dtype is not None:
if isinstance(value, np.ndarray):
feed = value.astype(dtype.as_numpy_dtype)
elif isinstance(value, list):
feed = np.array(value, dtype.as_numpy_dtype)
else:
feed = np.array([value], dtype.as_numpy_dtype)
if isinstance(value, numpy.ndarray):
value = value.astype(dtype.as_numpy_dtype)
else:
if isinstance(value, np.ndarray): feed = value
value = numpy.array(value, dtype.as_numpy_dtype)
else:
feed = np.array(value)
if not isinstance(value, numpy.ndarray):
value = numpy.array(value)
# Discard the default float64
if feed.dtype == np.float64:
feed = feed.astype(np.float32)
if value.dtype == numpy.float64:
value = value.astype(numpy.float32)
# Determine the shape
if shape is not None:
if feed.size == 1:
if value.size == 1:
# Case 1: Broadcast with scalar value
c = feed.flatten()[0]
feed = np.zeros(shape, feed.dtype)
feed.fill(c)
scalar = value.flatten()[0]
value = numpy.empty(shape, value.dtype)
value.fill(scalar)
else:
# Case 2: Reshape directly
if verify_shape:
if shape is not None:
if len(shape) != len(value.shape):
raise RuntimeError(
'The constant was limited to {} dimensions, \
while feed a value with {} dimensions.'.
format(len(shape), len(value.shape)))
'The constant was limited to {} dimensions, ' \
'while feed a value with {} dimensions.'
.format(len(shape), len(value.shape)))
for i in range(len(shape)):
if shape[i] is None: continue
if shape[i] != value.shape[i]:
raise RuntimeError(
'The shape of constant was limited as (' +
','.join([str(dim) for dim in shape]) + '), ' +
'while feed a value with (' + ','.join([str(dim) for dim in value.shape]) + ').')
feed = feed.reshape(shape)
'while feed a value with (' +
','.join([str(dim) for dim in value.shape]) + ').')
value = value.reshape(shape)
defined_name = dragon.workspace.GetDummyName(
dragon.get_default_name_scope() +
# Get a available name
defined_name = \
_workspace.GetDummyName(
basename=_scope.get_default_name_scope() +
(name if name else 'Const'),
suffix=':0', domain='Tensor')
# Feed into the workspace
tensor = dragon.Tensor.Ref(
return _Tensor.Ref(
name=defined_name,
shape=list(feed.shape),
dtype=str(feed.dtype))
tensor.set_value(feed)
return tensor
\ No newline at end of file
shape=list(value.shape),
dtype=str(value.dtype)
).set_value(value)
\ No newline at end of file
......@@ -13,9 +13,10 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dragon.vm.tensorflow.framework.ops import Graph
# The Graph (Workspace:))
from dragon.core.workspace import Workspace as Graph
# Utilities used when building a Graph.
# Utilities used when building a Graph
from dragon.vm.tensorflow.framework.ops import device
from dragon.vm.tensorflow.framework.ops import name_scope
from dragon.vm.tensorflow.framework.ops import get_default_graph
......@@ -27,5 +28,6 @@ from dragon.vm.tensorflow.framework.ops import GraphKeys
from dragon.vm.tensorflow.framework.constant_op import *
from dragon.vm.tensorflow.framework.dtypes import *
# Utilities used to represent a Tensor
from dragon.vm.tensorflow.framework.tensor_shape import Dimension
from dragon.vm.tensorflow.framework.tensor_shape import TensorShape
\ No newline at end of file
......@@ -17,16 +17,20 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import uuid
import threading
import dragon
from dragon.core import tls as _tls
from dragon.core import scope as _scope
from dragon.core import workspace as _workspace
from dragon.core.tensor import Tensor as _Tensor
from dragon.vm.tensorflow.framework import constant_op
from dragon.vm.tensorflow.util import tf_contextlib
def convert_to_tensor(value, dtype=None, name=None, preferred_dtype=None):
def convert_to_tensor(
value,
dtype=None,
name=None,
preferred_dtype=None,
):
"""Converts the given value to a Tensor.
Parameters
......@@ -46,73 +50,10 @@ def convert_to_tensor(value, dtype=None, name=None, preferred_dtype=None):
The output tensor.
"""
if isinstance(value, dragon.Tensor): return value
if isinstance(value, _Tensor): return value
return constant_op.constant(value, dtype=dtype, name=name)
class Graph(object):
"""A wrapper to connect ``Function`` to ``Workspace``.
Note that official TensorFlow trace the expressions explicitly
in this class, while we have done in the virtual stack.
Besides, organizing a ``Flow``, i.e., expressions with specified
outputs should also be done here.
"""
def __init__(self):
self._collections = {}
self._workspace = 'tf/graph/' + str(uuid.uuid4())
def get_collection_ref(self, name):
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
coll_list = self._collections.get(name, None)
if coll_list is None:
return []
if scope is None:
return list(coll_list)
else:
filter_coll_list = []
regex = re.compile(scope)
for item in coll_list:
if hasattr(item, "name") and regex.match(item.name):
filter_coll_list.append(item)
return filter_coll_list
def add_to_collection(self, name, value):
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
for name in names:
self.add_to_collection(name, value)
def device(self, device_name_or_function):
if not isinstance(device_name_or_function, str):
raise TypeError('The device function should be a str.')
device_and_id = device_name_or_function.split('/')[1]
device, id = device_and_id.split(':')
if device not in ['cpu', 'gpu']:
raise ValueError('The device should either be cpu or gpu.')
try:
id = int(id)
except Exception as e:
raise ValueError('The device id should be a integer.')
return dragon.device_scope(device, device_id=id)
def as_default(self):
return _default_graph_stack.get_controller(self)
class GraphKeys(object):
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
......@@ -202,112 +143,15 @@ def add_to_collections(names, value):
def name_scope(name, default_name=None, values=None):
name = default_name if name is None else name
name = '' if name is None else name
return dragon.name_scope(name)
##############################################
# #
# Default Stack #
# #
##############################################
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
class _DefaultGraphStack(_DefaultStack):
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
# Rewritten the random workspace name
self._global_default_graph._workspace = 'default'
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
# We should call dragon api to reset the workspace
dragon.workspace.ResetWorkspace(self._global_default_graph._workspace)
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default):
with super(_DefaultGraphStack, self).get_controller(default) as g:
with dragon.ws_scope(g._workspace):
yield g
_default_graph_stack = _DefaultGraphStack()
_default_session_stack = _DefaultStack()
return _scope.name_scope(name)
def get_default_graph():
return _default_graph_stack.get_default()
return _workspace.get_default_workspace()
def reset_default_graph():
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
_workspace.reset_default_workspace()
def default_session(session):
......@@ -319,7 +163,17 @@ def get_default_session():
def device(device_name_or_function):
return get_default_graph().device(device_name_or_function)
if not isinstance(device_name_or_function, str):
raise TypeError('The device function should be a str.')
device_and_id = device_name_or_function.split('/')[1]
device, id = device_and_id.split(':')
if device not in ['cpu', 'gpu']:
raise ValueError('The device should either be cpu or gpu.')
try:
id = int(id)
except Exception as _:
raise ValueError('The device id should be a integer.')
return _scope.device_scope(device, device_id=id)
def _eval_using_default_session(tensors, feed_dict, session=None):
......@@ -333,6 +187,10 @@ def _eval_using_default_session(tensors, feed_dict, session=None):
return session.run(tensors, feed_dict)
_default_session_stack = _tls.Stack()
# The Monkey Patching
# Require "import dragon.vm.tensorflow"
dragon.Tensor.eval = lambda self, feed_dict=None, session=None : \
_Tensor.eval = lambda self, feed_dict=None, session=None : \
_eval_using_default_session(self, feed_dict, session)
\ No newline at end of file
......@@ -13,7 +13,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dragon.core.tensor import Tensor
from dragon.core.tensor import Tensor as _Tensor
class Dimension(object):
......@@ -114,4 +114,5 @@ def get_shape(self):
return TensorShape(self.shape)
Tensor.get_shape = get_shape
\ No newline at end of file
# The Monkey Patching
_Tensor.get_shape = get_shape
\ No newline at end of file
......@@ -28,7 +28,13 @@ from dragon.vm.tensorflow.util import nest
class Layer(object):
def __init__(self, trainable=True, name=None, dtype=dtypes.float32, **kwargs):
def __init__(
self,
trainable=True,
name=None,
dtype=dtypes.float32,
**kwargs
):
allowed_kwargs = {'_scope', '_reuse'}
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
......@@ -79,13 +85,15 @@ class Layer(object):
_add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
return outputs
def add_variable(self,
def add_variable(
self,
name,
shape,
dtype=None,
trainable=True,
initializer=None,
regularizer=None):
regularizer=None,
):
if dtype is None: dtype = self.dtype
variable = vs.get_variable(
name,
......@@ -93,7 +101,8 @@ class Layer(object):
initializer=initializer,
regularizer=regularizer,
dtype=dtypes.as_dtype(dtype),
trainable=trainable and self.trainable)
trainable=trainable and self.trainable,
)
if trainable:
self._trainable_weights.append(variable)
else:
......@@ -105,9 +114,14 @@ class Layer(object):
class InputSpec(object):
def __init__(self,
dtype=None, shape=None, ndim=None,
max_ndim=None, min_ndim=None, axes=None
def __init__(
self,
dtype=None,
shape=None,
ndim=None,
max_ndim=None,
min_ndim=None,
axes=None,
):
self.dtype = dtype
self.shape = shape
......@@ -125,9 +139,6 @@ def _to_snake_case(name):
return 'private' + insecure
PER_GRAPH_LAYER_NAME_UIDS = weakref.WeakKeyDictionary()
def _unique_layer_name(name):
global PER_GRAPH_LAYER_NAME_UIDS
graph = ops.get_default_graph()
......@@ -153,3 +164,6 @@ def _add_elements_to_collection(elements, collection_list):
for element in elements:
if element not in collection_set:
collection.append(element)
PER_GRAPH_LAYER_NAME_UIDS = weakref.WeakKeyDictionary()
\ No newline at end of file
......@@ -20,7 +20,8 @@ from dragon.vm.tensorflow.ops import nn
class _Conv(base.Layer):
def __init__(self,
def __init__(
self,
rank,
filters,
kernel_size,
......@@ -37,7 +38,8 @@ class _Conv(base.Layer):
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
**kwargs
):
super(_Conv, self).__init__(trainable=trainable, name=name, **kwargs)
self.rank = rank
self.filters = filters
......@@ -82,7 +84,8 @@ class _Conv(base.Layer):
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
dtype=self.dtype)
dtype=self.dtype,
)
if self.use_bias:
self.bias = self.add_variable(
......@@ -90,7 +93,8 @@ class _Conv(base.Layer):
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
dtype=self.dtype)
dtype=self.dtype,
)
else:
self.bias = None
......@@ -108,10 +112,15 @@ class _Conv(base.Layer):
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=tf_data_format)
data_format=tf_data_format,
)
if self.bias is not None:
outputs = nn.bias_add(outputs, self.bias, data_format=tf_data_format)
outputs = nn.bias_add(
outputs,
self.bias,
data_format=tf_data_format,
)
if self.activation is not None:
return self.activation(outputs)
......@@ -119,7 +128,9 @@ class _Conv(base.Layer):
class Conv2D(_Conv):
def __init__(self, filters,
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
......@@ -134,7 +145,8 @@ class Conv2D(_Conv):
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
**kwargs
):
super(Conv2D, self).__init__(
rank=2,
filters=filters,
......@@ -154,7 +166,8 @@ class Conv2D(_Conv):
name=name, **kwargs)
def conv2d(inputs,
def conv2d(
inputs,
filters,
kernel_size,
strides=(1, 1),
......@@ -170,7 +183,8 @@ def conv2d(inputs,
activity_regularizer=None,
trainable=True,
name=None,
reuse=None):
reuse=None,
):
return Conv2D(
filters=filters,
kernel_size=kernel_size,
......@@ -188,4 +202,5 @@ def conv2d(inputs,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name).apply(inputs)
\ No newline at end of file
_scope=name,
).apply(inputs)
\ No newline at end of file
......@@ -24,7 +24,8 @@ from dragon.vm.tensorflow.ops import standard_ops
class Dense(base.Layer):
def __init__(self,
def __init__(
self,
units,
activation=None,
use_bias=True,
......@@ -35,7 +36,8 @@ class Dense(base.Layer):
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
**kwargs
):
super(Dense, self).__init__(trainable=trainable, name=name, **kwargs)
self.units = units
self.activation = activation
......@@ -61,7 +63,8 @@ class Dense(base.Layer):
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
dtype=self.dtype)
dtype=self.dtype,
)
if self.use_bias:
self.bias = self.add_variable(
......@@ -69,7 +72,8 @@ class Dense(base.Layer):
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
dtype=self.dtype)
dtype=self.dtype,
)
else:
self.bias = None
self.built = True
......@@ -83,7 +87,8 @@ class Dense(base.Layer):
return outputs
def dense(inputs,
def dense(
inputs,
units,
activation=None,
use_bias=True,
......@@ -94,7 +99,8 @@ def dense(inputs,
activity_regularizer=None,
trainable=True,
name=None,
reuse=None):
reuse=None,
):
return Dense(
units,
activation=activation,
......@@ -107,4 +113,5 @@ def dense(inputs,
trainable=trainable,
name=name,
_scope=name,
_reuse=reuse).apply(inputs)
\ No newline at end of file
_reuse=reuse,
).apply(inputs)
\ No newline at end of file
......@@ -13,20 +13,20 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dragon.vm.tensorflow.layers.convolutional import (
from .convolutional import (
conv2d, Conv2D,
)
from dragon.vm.tensorflow.layers.core import (
from .core import (
dense, Dense,
)
from dragon.vm.tensorflow.layers.normalization import (
from .normalization import (
batch_normalization, BatchNormalization,
batch_norm, BatchNorm,
)
from dragon.vm.tensorflow.layers.pooling import (
from .pooling import (
average_pooling2d, AveragePooling2D,
max_pooling2d, MaxPooling2D,
)
......@@ -17,7 +17,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from dragon.ops import BatchNorm as _BatchNormOp
from dragon.vm.tensorflow.framework import tensor_shape
from dragon.vm.tensorflow.layers import base
......@@ -25,7 +25,8 @@ from dragon.vm.tensorflow.ops import init_ops
class BatchNormalization(base.Layer):
def __init__(self,
def __init__(
self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
......@@ -43,8 +44,10 @@ class BatchNormalization(base.Layer):
fused=None,
trainable=True,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(trainable=trainable, name=name, **kwargs)
**kwargs
):
super(BatchNormalization, self).__init__(
trainable=trainable, name=name, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
......@@ -92,33 +95,37 @@ class BatchNormalization(base.Layer):
name='moving_mean',
shape=(param_dim.value,),
initializer=self.moving_mean_initializer,
trainable=False)
trainable=False,
)
self.moving_variance = self.add_variable(
name='moving_variance',
shape=(param_dim.value,),
initializer=self.moving_variance_initializer,
trainable=False)
trainable=False,
)
self.gamma = self.add_variable(
name='gamma',
shape=(param_dim.value,),
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
trainable=self.scale)
trainable=self.scale,
)
self.beta = self.add_variable(
name='beta',
shape=(param_dim.value,),
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
trainable=self.center)
trainable=self.center,
)
self.built = True
def call(self, inputs, training=False, *args, **kwargs):
use_stats = 0 if training else 1
return dragon.ops.BatchNorm([
return _BatchNormOp([
inputs,
self.moving_mean,
self.moving_variance,
......@@ -127,7 +134,8 @@ class BatchNormalization(base.Layer):
axis=self.axis,
momentum=self.momentum,
eps=self.epsilon,
use_stats=use_stats)
use_stats=use_stats,
)
def batch_normalization(
......@@ -170,7 +178,8 @@ def batch_normalization(
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name).apply(inputs, training=training)
_scope=name,
).apply(inputs, training=training)
# Aliases
......
......@@ -22,9 +22,16 @@ from dragon.vm.tensorflow.layers import base, utils
class _Pooling2D(base.Layer):
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
def __init__(
self,
pool_function,
pool_size,
strides,
padding='valid',
data_format='channels_last',
name=None,
**kwargs
):
super(_Pooling2D, self).__init__(name=name, **kwargs)
self.pool_function = pool_function
self.pool_size = utils.normalize_tuple(pool_size, 2, 'pool_size')
......@@ -40,19 +47,25 @@ class _Pooling2D(base.Layer):
else:
pool_shape = (1, 1) + self.pool_size
strides = (1, 1) + self.strides
outputs = self.pool_function(
return self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper(),
data_format=utils.convert_data_format(self.data_format, 4))
return outputs
data_format=utils.convert_data_format(self.data_format, 4),
)
class MaxPooling2D(_Pooling2D):
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
def __init__(
self,
pool_size,
strides,
padding='valid',
data_format='channels_last',
name=None,
**kwargs
):
super(MaxPooling2D, self).__init__(
nn.max_pool,
pool_size=pool_size,
......@@ -63,9 +76,15 @@ class MaxPooling2D(_Pooling2D):
class AveragePooling2D(_Pooling2D):
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
def __init__(
self,
pool_size,
strides,
padding='valid',
data_format='channels_last',
name=None,
**kwargs
):
super(AveragePooling2D, self).__init__(
nn.avg_pool,
pool_size=pool_size,
......@@ -76,22 +95,34 @@ class AveragePooling2D(_Pooling2D):
def max_pooling2d(
inputs, pool_size, strides, padding='valid',
data_format='channels_last', name=None):
inputs,
pool_size,
strides,
padding='valid',
data_format='channels_last',
name=None,
):
return MaxPooling2D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name).apply(inputs)
name=name,
).apply(inputs)
def average_pooling2d(
inputs, pool_size, strides, padding='valid',
data_format='channels_last', name=None):
inputs,
pool_size,
strides,
padding='valid',
data_format='channels_last',
name=None,
):
return AveragePooling2D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name).apply(inputs)
\ No newline at end of file
name=name,
).apply(inputs)
\ No newline at end of file
......@@ -13,8 +13,10 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from dragon import ops as _ops
from dragon.core import scope as _scope
from dragon.core import workspace as _workspace
from dragon.core.tensor import Tensor as _Tensor
from dragon.vm.tensorflow.framework import dtypes
......@@ -23,19 +25,19 @@ def expand_dims(input, axis=None, name=None, dim=None):
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'dim'.")
axis = dim
return dragon.ops.ExpandDims(input, axis=axis, name=name)
return _ops.ExpandDims(input, axis=axis, name=name)
def shape(input, name=None, out_type=dtypes.float32):
return dragon.ops.Shape(input, name=name)
def shape(input, name=None, out_type=dtypes.int64):
return _ops.Shape(input, name=name)
def zeros(shape, dtype=dtypes.float32, name=None):
return dragon.ops.Fill(shape, value=0.0, dtype=dtype.name, name=name)
return _ops.Fill(shape, value=0.0, dtype=dtype.name, name=name)
def ones(shape, dtype=dtypes.float32, name=None):
return dragon.ops.Fill(shape, value=1.0, dtype=dtype.name, name=name)
return _ops.Fill(shape, value=1.0, dtype=dtype.name, name=name)
def placeholder(dtype, shape=None, name=None):
......@@ -45,29 +47,41 @@ def placeholder(dtype, shape=None, name=None):
raise TypeError('The dtype should be a valid tensorflow data type.')
# Construct a tensor from the explicit name
return dragon.Tensor.Ref(
dragon.workspace.GetDummyName(
dragon.get_default_name_scope() + name
return _Tensor.Ref(
_workspace.GetDummyName(
_scope.get_default_name_scope() + name
if name else 'Placeholder',
suffix=':0', domain='Tensor'),
dtype=dtype.name, shape=shape).Placeholder()
def concat(values, axis, name=None):
return dragon.ops.Concat(values, axis=axis, name=name)
return _ops.Concat(values, axis=axis, name=name)
def transpose(a, perm=None, name=None):
return dragon.ops.Transpose(a, perm=perm, name=name)
return _ops.Transpose(a, perm=perm, name=name)
def tile(input, multiples, name=None):
return dragon.ops.Tile(input, multiples=multiples, name=name)
return _ops.Tile(input, multiples=multiples, name=name)
def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0):
return dragon.ops.Pad(tensor, paddings, mode=mode, name=name, value=constant_values)
def pad(
tensor,
paddings,
mode="CONSTANT",
name=None,
constant_values=0,
):
return _ops.Pad(
tensor,
paddings,
mode=mode,
name=name,
value=constant_values,
)
def reshape(tensor, shape, name=None):
return dragon.ops.Reshape(tensor, shape=shape, name=name)
return _ops.Reshape(tensor, shape=shape, name=name)
......@@ -13,8 +13,17 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from dragon import ops as _ops
def equal(a, b, name=None):
return dragon.ops.Equal([a, b], name=name)
\ No newline at end of file
return _ops.Equal([a, b], name=name)
def greater(a, b, name=None):
return _ops.Greater([a, b], name=name)
def less(a, b, name=None):
return _ops.Less([a, b], name=name)
......@@ -13,7 +13,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from dragon.vm.theano.gradient import grad as _Grad
def gradients(ys, xs, **kwargs):
......@@ -34,5 +34,5 @@ def gradients(ys, xs, **kwargs):
"""
dxs = []
if not isinstance(ys, list): ys = [ys]
for y in ys: dxs.append(dragon.grad(y, xs))
for y in ys: dxs.append(_Grad(y, xs))
if len(dxs) == 1: return dxs[0]
\ No newline at end of file
......@@ -13,8 +13,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from dragon import ops as _ops
from dragon.vm.tensorflow.framework import dtypes
......@@ -59,7 +58,7 @@ class Zeros(Initializer):
def __call__(self, shape, dtype=None, **kwargs):
if dtype is None: dtype = self.dtype
return dragon.ops.Fill(shape, value=0, dtype=dtype.name)
return _ops.Fill(shape, value=0, dtype=dtype.name)
class Ones(Initializer):
......@@ -83,7 +82,7 @@ class Ones(Initializer):
def __call__(self, shape, dtype=None, **kwargs):
if dtype is None: dtype = self.dtype
return dragon.ops.Fill(shape, value=1, dtype=dtype.name)
return _ops.Fill(shape, value=1, dtype=dtype.name)
class Constant(Initializer):
......@@ -93,7 +92,7 @@ class Constant(Initializer):
def __call__(self, shape, dtype=None, **kwargs):
if dtype is None: dtype = self.dtype
return dragon.ops.Fill(shape, value=self.value, dtype=dtype.name)
return _ops.Fill(shape, value=self.value, dtype=dtype.name)
class RandomUniform(Initializer):
......@@ -104,8 +103,12 @@ class RandomUniform(Initializer):
def __call__(self, shape, dtype=None, **kwargs):
if dtype is None: dtype = self.dtype
return dragon.ops.RandomUniform(
shape, self.minval, self.maxval, dtype=dtype.name)
return _ops.RandomUniform(
shape=shape,
low=self.minval,
high=self.maxval,
dtype=dtype.name,
)
class RandomNormal(Initializer):
......@@ -117,8 +120,12 @@ class RandomNormal(Initializer):
def __call__(self, shape, dtype=None, **kwargs):
if dtype is None: dtype = self.dtype
return dragon.ops.RandomNormal(
shape, self.mean, self.stddev, dtype=dtype.name)
return _ops.RandomNormal(
shape=shape,
mean=self.mean,
std=self.stddev,
dtype=dtype.name,
)
class TruncatedNormal(Initializer):
......@@ -130,15 +137,21 @@ class TruncatedNormal(Initializer):
def __call__(self, shape, dtype=None, **kwargs):
if dtype is None: dtype = self.dtype
return dragon.ops.TruncatedNormal(
shape, self.mean, self.stddev, dtype=dtype.name)
return _ops.TruncatedNormal(
shape=shape,
mean=self.mean,
std=self.stddev,
dtype=dtype.name,
)
class VarianceScaling(Initializer):
def __init__(self,
scale=1.0, mode="fan_in",
def __init__(
self,
scale=1.0,
mode="fan_in",
distribution="normal",
dtype=dtypes.float32
dtype=dtypes.float32,
):
if scale <= 0.:
raise ValueError("`scale` must be positive float.")
......@@ -159,13 +172,40 @@ class VarianceScaling(Initializer):
def __call__(self, shape, dtype=None, **kwargs):
if dtype is None: dtype = self.dtype
if self.distribution == "normal":
return dragon.ops.GlorotNormal(shape=shape, scale=self.scale * 2.,
mode=self.mode, dtype=dtype.name)
return _ops.GlorotNormal(
shape=shape,
scale=self.scale * 2.,
mode=self.mode,
dtype=dtype.name,
)
else:
return dragon.ops.GlorotUniform(shape=shape, scale=self.scale * 3.,
mode=self.mode, dtype=dtype.name)
return _ops.GlorotUniform(
shape=shape,
scale=self.scale * 3.,
mode=self.mode,
dtype=dtype.name,
)
def glorot_uniform_initializer(dtype=dtypes.float32):
return variance_scaling_initializer(
scale=1.0,
mode='fan_avg',
distribution='uniform',
dtype=dtype,
)
def glorot_normal_initializer(dtype=dtypes.float32):
return variance_scaling_initializer(
scale=1.0,
mode='fan_avg',
distribution='normal',
dtype=dtype,
)
# Aliases
zeros_initializer = Zeros
ones_initializer = Ones
constant_initializer = Constant
......@@ -173,13 +213,3 @@ random_uniform_initializer = RandomUniform
random_normal_initializer = RandomNormal
truncated_normal_initializer = TruncatedNormal
variance_scaling_initializer = VarianceScaling
\ No newline at end of file
def glorot_uniform_initializer(dtype=dtypes.float32):
return variance_scaling_initializer(scale=1.0,
mode='fan_avg', distribution='uniform', dtype=dtype)
def glorot_normal_initializer(dtype=dtypes.float32):
return variance_scaling_initializer(scale=1.0,
mode='fan_avg', distribution='normal', dtype=dtype)
\ No newline at end of file
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!