Commit 18b664b1 by Ting PAN

TensorBoard Support

1 parent b179dc12
...@@ -175,6 +175,7 @@ execute_process(COMMAND protoc -I=${PROTOS_DIR} --cpp_out=${PROTOS_DIR} ${PROTOS ...@@ -175,6 +175,7 @@ execute_process(COMMAND protoc -I=${PROTOS_DIR} --cpp_out=${PROTOS_DIR} ${PROTOS
# ---[ Subdirectories # ---[ Subdirectories
add_subdirectory(modules/python) add_subdirectory(modules/python)
#add_subdirectory(modules/cc) # Compile CC module if necessary
# ---[ Utils # ---[ Utils
file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/../lib) file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/../lib)
\ No newline at end of file
...@@ -57,7 +57,7 @@ inline void LoadCaffeModel(string file, Workspace* ws) { ...@@ -57,7 +57,7 @@ inline void LoadCaffeModel(string file, Workspace* ws) {
for (int i = 0; i < net_param.layer_size(); i++) { for (int i = 0; i < net_param.layer_size(); i++) {
const LayerParameter& layer = net_param.layer(i); const LayerParameter& layer = net_param.layer(i);
const string& layer_name = layer.name(); const string& layer_name = layer.name();
string prefix = layer_name + "@param"; string prefix = layer_name + "/param:";
for (int j = 0; j < layer.blobs_size(); j++) { for (int j = 0; j < layer.blobs_size(); j++) {
string tensor_name = prefix + dragon_cast<string, int>(j); string tensor_name = prefix + dragon_cast<string, int>(j);
if (!ws->HasTensor(tensor_name)) if (!ws->HasTensor(tensor_name))
...@@ -97,7 +97,7 @@ inline void SavaCaffeModel(string file, const vector<Tensor*>& tensors) { ...@@ -97,7 +97,7 @@ inline void SavaCaffeModel(string file, const vector<Tensor*>& tensors) {
int layer_idx = -1; int layer_idx = -1;
for (int i = 0; i < tensors.size(); i++) { for (int i = 0; i < tensors.size(); i++) {
if (tensors[i]->count() <= 0) continue; if (tensors[i]->count() <= 0) continue;
vector<string> splits = SplitString(tensors[i]->name(), "@"); vector<string> splits = SplitString(tensors[i]->name(), "/param:");
if (layer_hash.count(splits[0]) == 0) { if (layer_hash.count(splits[0]) == 0) {
layer_hash[splits[0]] = ++layer_idx; layer_hash[splits[0]] = ++layer_idx;
LayerParameter* layer = net_param.add_layer(); LayerParameter* layer = net_param.add_layer();
......
message(STATUS "Found CC Module: ${CMAKE_CURRENT_LIST_DIR}")
FILE(GLOB_RECURSE MODULE_FILES *.h *.hpp *.c *.cpp *.cu *.cc)
FILE(GLOB_RECURSE SRC_FILES ../../src/*.c ../../src/*.cpp ../../src/*.cu ../../src/*.cc)
FILE(GLOB_RECURSE REMOVE_FILES ../../src/python*)
LIST(REMOVE_ITEM SRC_FILES ${REMOVE_FILES})
# ---[ complier
if (WITH_CUDA)
CUDA_ADD_LIBRARY(${PROJECT_NAME}_cc SHARED ${MODULE_FILES} ${SRC_FILES})
TARGET_LINK_LIBRARIES(${PROJECT_NAME}_cc ${CUDA_LIBRARIES} ${CUDA_cublas_LIBRARY} ${CUDA_curand_LIBRARY})
else ()
ADD_LIBRARY(${PROJECT_NAME}_cc SHARED ${MODULE_FILES} ${SRC_FILES})
endif()
# ---[ link basics
FILE(GLOB targets ${3RDPARTY_LIBS}/*.so ${3RDPARTY_LIBS}/*.lib)
foreach(target ${targets})
TARGET_LINK_LIBRARIES(${PROJECT_NAME}_cc ${target})
endforeach()
# ---[ link optional libs
if (UNIX AND WITH_CUDNN)
TARGET_LINK_LIBRARIES(${PROJECT_NAME}_cc cudnn)
endif()
if (UNIX AND WITH_BLAS)
TARGET_LINK_LIBRARIES(${PROJECT_NAME}_cc openblas)
endif()
if (UNIX AND WITH_MPI_NCCL)
TARGET_LINK_LIBRARIES(${PROJECT_NAME}_cc nccl)
endif()
# ---[ link platforms
if(UNIX)
TARGET_LINK_LIBRARIES(${PROJECT_NAME}_cc protobuf pthread)
endif()
if(WIN32)
TARGET_LINK_LIBRARIES(${PROJECT_NAME}_cc shlwapi.lib)
endif()
set_target_properties(${PROJECT_NAME}_cc PROPERTIES OUTPUT_NAME dragon_cc)
# ---[ install
install (TARGETS ${PROJECT_NAME}_cc DESTINATION ${PROJECT_BINARY_DIR}/../lib)
\ No newline at end of file
#include "dragon.h"
#include "core/common.h"
namespace dragon {
int type_from_string(std::string type) {
if (type == "CPU") return 0;
else if (type == "GPU") return 1;
else if (type == "CUDA") return 1;
LOG(FATAL) << "Unknown device type: " << type << ", "
<< "known device types: "
<< "CPU, "
<< "GPU, "
<< "CUDA";
return -1;
}
Device::Device()
: device_type_(CPU), device_id_(0) {}
Device::Device(std::string device_type, int device_id)
: device_type_((DeviceType)type_from_string(device_type)), device_id_(device_id) {}
Device::Device(std::string device_type)
: device_type_((DeviceType)type_from_string(device_type)), device_id_(0) {}
} // namespace dragon
\ No newline at end of file
// --------------------------------------------------------
// Dragon
// Copyright(c) 2017 SeetaTech
// Written by Ting Pan
// --------------------------------------------------------
#ifndef DRAGON_MODULES_CC_DRAGON_H_
#define DRAGON_MODULES_CC_DRAGON_H_
#include <string>
#include <cstdint>
#include <vector>
#ifdef WIN32
#define EXPORT __declspec(dllexport)
#else
#define EXPORT
#endif
namespace dragon {
typedef int64_t TIndex;
class Workspace;
class Device {
enum DeviceType { CPU, CUDA };
public:
EXPORT Device();
EXPORT explicit Device(std::string device_type);
EXPORT Device(std::string device_type, int device_id);
EXPORT const DeviceType& device_type() const { return device_type_; }
EXPORT const int device_id() const { return device_id_; }
private:
DeviceType device_type_;
int device_id_;
};
EXPORT Workspace* CreateWorkspace(const std::string& name);
EXPORT Workspace* ResetWorkspace(const std::string& name);
EXPORT void ReleaseWorkspace(const std::string& name);
EXPORT void MoveWorkspace(Workspace* main, Workspace* sub);
EXPORT std::string CreateGraph(const std::string& graph_file, Workspace* ws);
EXPORT std::string CreateGraph(const std::string& graph_file, const Device& device, Workspace* ws);
EXPORT void RunGraph(const std::string& graph_name, Workspace* ws);
EXPORT void CreateTensor(const std::string& name, Workspace* ws);
template <typename T>
void FeedTensor(const std::string& name,
const std::vector<TIndex>& shape,
const T* data,
const Device& device,
Workspace* ws);
template <typename T>
T* FetchTensor(const std::string& name,
std::vector<TIndex>& shape,
Workspace* ws);
template EXPORT float* FetchTensor(const std::string&,
std::vector<TIndex>&,
Workspace*);
template EXPORT void FeedTensor(const std::string&,
const std::vector<TIndex>&,
const float*,
const Device&,
Workspace*);
template EXPORT void FeedTensor(const std::string&,
const std::vector<TIndex>&,
const int*,
const Device&,
Workspace*);
template EXPORT void FeedTensor(const std::string&,
const std::vector<TIndex>&,
const uint8_t*,
const Device&,
Workspace*);
EXPORT void LoadCaffemodel(const std::string& model_file, Workspace* ws);
EXPORT void TransplantCaffeModel(const std::string& input_model, const std::string& output_model);
EXPORT void LoadDragonmodel(const std::string& model_file, Workspace* ws);
EXPORT void SetLogLevel(const std::string& level);
} // namespace dragon
#endif // DRAGON_MODULES_CC_DRAGON_H_
\ No newline at end of file
message(STATUS "Found Python Module: ${CMAKE_CURRENT_LIST_DIR}")
FILE(GLOB_RECURSE MODULE_FILES *.h *.hpp *.c *.cpp *.cu *.cc) FILE(GLOB_RECURSE MODULE_FILES *.h *.hpp *.c *.cpp *.cu *.cc)
FILE(GLOB_RECURSE SRC_FILES ../../src/*.c ../../src/*.cpp ../../src/*.cu ../../src/*.cc) FILE(GLOB_RECURSE SRC_FILES ../../src/*.c ../../src/*.cpp ../../src/*.cu ../../src/*.cc)
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
<!-- Install --> <!-- Install -->
<li><a href="{{ pathto("../../helper/install") }}">Install</a></li> <li><a href="{{ pathto("../../helper/install") }}">Install</a></li>
<!-- Github --> <!-- Github -->
<li><a href="https://github.com/neopenx/Dragon">Github</a></li> <li><a href="https://github.com/seetaresearch/Dragon">Github</a></li>
<!-- API --> <!-- API -->
<li class="dropdown globaltoc-container"> <li class="dropdown globaltoc-container">
<a role="button" id="dLabelGlobalToc" data-toggle="dropdown" data-target="#" href="#" aria-expanded="true">API<b class="caret"></b></a> <a role="button" id="dLabelGlobalToc" data-toggle="dropdown" data-target="#" href="#" aria-expanded="true">API<b class="caret"></b></a>
......
...@@ -21,6 +21,7 @@ ToolBox ...@@ -21,6 +21,7 @@ ToolBox
tools/db tools/db
tools/im2db tools/im2db
tools/summary_writer tools/summary_writer
tools/tensorboard
==================== ==================================================================================== ==================== ====================================================================================
List Brief List Brief
...@@ -28,6 +29,7 @@ List Brief ...@@ -28,6 +29,7 @@ List Brief
`LMDB`_ A wrapper of LMDB package. `LMDB`_ A wrapper of LMDB package.
`IM2DB`_ Make the sequential database for images. `IM2DB`_ Make the sequential database for images.
`SummaryWriter`_ Write summaries for DragonBoard. `SummaryWriter`_ Write summaries for DragonBoard.
`TensorBoard`_ Write summaries for TensorBoard.
==================== ==================================================================================== ==================== ====================================================================================
...@@ -40,3 +42,4 @@ List Brief ...@@ -40,3 +42,4 @@ List Brief
.. _LMDB: tools/db.html .. _LMDB: tools/db.html
.. _IM2DB: tools/im2db.html .. _IM2DB: tools/im2db.html
.. _SummaryWriter: tools/summary_writer.html .. _SummaryWriter: tools/summary_writer.html
.. _TensorBoard: tools/tensorboard.html
==================
:mod:`TensorBoard`
==================
.. toctree::
:hidden:
Quick Shortcut
--------------
==================== =============================================================================
List Brief
==================== =============================================================================
`scalar_summary`_ Write a scalar variable.
`histogram_summary`_ Write a histogram of values.
`image_summary`_ Write a list of images.
`close`_ Close the board and apply all cached summaries.
==================== =============================================================================
API Reference
-------------
.. currentmodule:: dragon.tools.tensorboard
.. autoclass:: TensorBoard
:members:
.. automethod:: __init__
.. _scalar_summary: tensorboard.html#dragon.tools.tensorboard.TensorBoard.scalar_summary
.. _histogram_summary: tensorboard.html#dragon.tools.tensorboard.TensorBoard.histogram_summary
.. _image_summary: tensorboard.html#dragon.tools.tensorboard.TensorBoard.image_summary
.. _close: tensorboard.html#dragon.tools.tensorboard.TensorBoard.close
...@@ -8,7 +8,7 @@ Get the Latest Version ...@@ -8,7 +8,7 @@ Get the Latest Version
.. code-block:: shell .. code-block:: shell
git clone https://github.com/neopenx/Dragon.git git clone https://github.com/seetaresearch/Dragon.git
We will call the directory that you cloned Dragon as ``REPO_ROOT``. We will call the directory that you cloned Dragon as ``REPO_ROOT``.
......
...@@ -400,7 +400,7 @@ def NNResize(inputs, dsize, shape_like=None, ...@@ -400,7 +400,7 @@ def NNResize(inputs, dsize, shape_like=None,
fy=-1.0, fx=-1.0, data_format='NCHW', **kwargs): fy=-1.0, fx=-1.0, data_format='NCHW', **kwargs):
"""Resize the image with Nearest-Neighbor method. """Resize the image with Nearest-Neighbor method.
Set ``dsize`` to None if you want to use ``fy`` and ``fx``. Set ``dsize`` to None if you want to use ``shape_like`` or ``fy/fx``.
Parameters Parameters
---------- ----------
...@@ -473,7 +473,7 @@ def BilinearResize(inputs, dsize, shape_like=None, ...@@ -473,7 +473,7 @@ def BilinearResize(inputs, dsize, shape_like=None,
fy=-1.0, fx=-1.0, data_format='NCHW', **kwargs): fy=-1.0, fx=-1.0, data_format='NCHW', **kwargs):
"""Resize the image with Bi-linear method. """Resize the image with Bi-linear method.
Set ``dsize`` to None if you want to use ``fy`` and ``fx``. Set ``dsize`` to None if you want to use ``shape_like`` or ``fy/fx``.
Parameters Parameters
---------- ----------
......
# Code referenced from https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/04-utils/tensorboard/logger.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
import numpy as np
import PIL.Image
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import BytesIO # Python 3.x
try:
import tensorflow as tf
except ImportError as e:
logging.warning(
'cannot import tensorflow. Error: {0}'.format(str(e)))
class TensorBoard(object):
"""The board app based on TensorFlow.
Examples
--------
>>> board = TensorBoard(log_dir='./logs')
>>> board.scalar_summary('loss', '2.3', step=0)
>>> board.histogram_summary('weights', np.ones((2, 3)), step=0)
>>> board.image_summary('images', [im], step=0)
"""
def __init__(self, log_dir=None):
"""Create a summary writer logging to log_dir.
If ``log_dir`` is None, ``./logs/localtime`` will be used.
Parameters
----------
log_dir : str or None
The root dir for monitoring.
Returns
-------
TensorBoard
The board app.
"""
if log_dir is None:
log_dir = './logs/' + time.strftime('%Y%m%d_%H%M%S',
time.localtime(time.time()))
self.writer = tf.summary.FileWriter(log_dir)
def close(self):
"""Close the board and apply all cached summaries.
Returns
-------
None
"""
self.writer.close()
def scalar_summary(self, tag, value, step):
"""Write a scalar variable.
Parameters
----------
tag : str
The key of the summary.
value : scalar
The scalar value.
step : number
The global step.
Returns
-------
None
"""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def image_summary(self, tag, images, step, order='BGR'):
"""Write a list of images.
The images could be stacked in the type of ``numpy.ndarray``.
Otherwise, the type of images should be list.
Parameters
----------
tag : str
The key of the summary.
images : list or numpy.ndarray
The images to show.
step : number
The global step.
order : str
The color order. ``BGR`` or ``RGB``.
Returns
-------
None
"""
img_summaries = []
for i, img in enumerate(images):
# Write the image to a string
try:
s = StringIO()
except:
s = BytesIO()
if order == 'BGR':
if len(img.shape) == 3: img = img[:, :, ::-1]
elif len(img.shape) == 4: img = img[:, :, ::-1, :]
PIL.Image.fromarray(img).save(s, format='png')
# Create an Image object
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=img.shape[0],
width=img.shape[1])
# Create a Summary value
img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))
# Create and write Summary
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
def histogram_summary(self, tag, values, step, bins=1000):
"""Write a histogram of values.
Parameters
----------
tag : str
The key of the summary.
values : list, tuple or numpy.ndarray
The values to be shown in the histogram.
step : number
The global step.
bins : int
The number of bins in the the histogram.
Returns
-------
None
"""
# Create a histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values ** 2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
\ No newline at end of file
...@@ -34,14 +34,15 @@ class InnerProductLayer(Layer): ...@@ -34,14 +34,15 @@ class InnerProductLayer(Layer):
self._param = {'axis': param.axis, self._param = {'axis': param.axis,
'num_output': param.num_output, 'num_output': param.num_output,
'TransW': not param.transpose} 'TransW': not param.transpose}
weight = Tensor(LayerParameter.name + '@param0') scope = LayerParameter.name
weight_diff = Tensor(LayerParameter.name + '@param0_grad') weight = Tensor(scope + '/param:0')
weight_diff = Tensor(scope + '/param:0_grad')
self.Fill(weight, param, 'weight_filler') self.Fill(weight, param, 'weight_filler')
self._blobs.append({'data': weight, 'diff': weight_diff}) self._blobs.append({'data': weight, 'diff': weight_diff})
if param.bias_term: if param.bias_term:
bias = Tensor(LayerParameter.name + '@param1') bias = Tensor(scope + '/param:1')
bias_diff = Tensor(LayerParameter.name + '@param1_grad') bias_diff = Tensor(scope + '/param:1_grad')
self.Fill(bias, param, 'bias_filler') self.Fill(bias, param, 'bias_filler')
self._blobs.append({'data': bias, 'diff': bias_diff}) self._blobs.append({'data': bias, 'diff': bias_diff})
...@@ -351,10 +352,11 @@ class BatchNormLayer(Layer): ...@@ -351,10 +352,11 @@ class BatchNormLayer(Layer):
'eps': param.eps, 'eps': param.eps,
'axis': 1, 'axis': 1,
'mode': 'CAFFE'} 'mode': 'CAFFE'}
scope = LayerParameter.name
# mean, var, factor are set to 0 in order to do statistics # mean, var, factor are set to 0 in order to do statistics
mean = Tensor(LayerParameter.name + '@param0').Constant(value=0.0) mean = Tensor(scope + '/param:0').Constant(value=0.0)
var = Tensor(LayerParameter.name + '@param1').Constant(value=0.0) var = Tensor(scope + '/param:1').Constant(value=0.0)
factor = Tensor(LayerParameter.name + '@param2').Constant(value=0.0) factor = Tensor(scope + '/param:2').Constant(value=0.0)
# in dragon, set diff as None will ignore computing grad automatically # in dragon, set diff as None will ignore computing grad automatically
# but in bvlc-caffe1, you must set lr_mult = 0 manually # but in bvlc-caffe1, you must set lr_mult = 0 manually
self._blobs.append({'data': mean, 'diff': None}) self._blobs.append({'data': mean, 'diff': None})
...@@ -397,9 +399,10 @@ class BatchRenormLayer(Layer): ...@@ -397,9 +399,10 @@ class BatchRenormLayer(Layer):
't_delta': float(param.t_delta), 't_delta': float(param.t_delta),
'axis': 1, 'axis': 1,
'mode': 'CAFFE'} 'mode': 'CAFFE'}
mean = Tensor(LayerParameter.name + '@param0').Constant(value=0.0) scope = LayerParameter.name
var = Tensor(LayerParameter.name + '@param1').Constant(value=0.0) mean = Tensor(scope + '/param:0').Constant(value=0.0)
factor = Tensor(LayerParameter.name + '@param2').Constant(value=0.0) var = Tensor(scope + '/param:1').Constant(value=0.0)
factor = Tensor(scope + '/param:2').Constant(value=0.0)
self._blobs.append({'data': mean, 'diff': None}) self._blobs.append({'data': mean, 'diff': None})
self._blobs.append({'data': var, 'diff': None}) self._blobs.append({'data': var, 'diff': None})
self._blobs.append({'data': factor, 'diff': None}) self._blobs.append({'data': factor, 'diff': None})
...@@ -446,15 +449,16 @@ class ScaleLayer(Layer): ...@@ -446,15 +449,16 @@ class ScaleLayer(Layer):
param = LayerParameter.scale_param param = LayerParameter.scale_param
self._param = {'axis': param.axis, self._param = {'axis': param.axis,
'num_axes': param.num_axes} 'num_axes': param.num_axes}
scale = Tensor(LayerParameter.name + '@param0') scope = LayerParameter.name
scale_diff = Tensor(LayerParameter.name + '@param0_grad') scale = Tensor(scope + '/param:0')
scale_diff = Tensor(scope + '/param:0_grad')
if param.HasField('filler'): if param.HasField('filler'):
self.Fill(scale, param, 'filler') self.Fill(scale, param, 'filler')
else: scale.Constant(value=1.0) else: scale.Constant(value=1.0)
self._blobs.append({'data': scale, 'diff': scale_diff}) self._blobs.append({'data': scale, 'diff': scale_diff})
if param.bias_term: if param.bias_term:
bias = Tensor(LayerParameter.name + '@param1') bias = Tensor(scope + '/param:1')
bias_diff = Tensor(LayerParameter.name + '@param1_grad') bias_diff = Tensor(scope + '/param:1_grad')
# auto fill 0 if not specficed bias_filler # auto fill 0 if not specficed bias_filler
self.Fill(bias, param, 'bias_filler') self.Fill(bias, param, 'bias_filler')
self._blobs.append({'data': bias, 'diff': bias_diff}) self._blobs.append({'data': bias, 'diff': bias_diff})
...@@ -490,12 +494,13 @@ class BNLayer(Layer): ...@@ -490,12 +494,13 @@ class BNLayer(Layer):
'momentum': bn_param.moving_average_fraction, 'momentum': bn_param.moving_average_fraction,
'eps': bn_param.eps, 'eps': bn_param.eps,
'axis': 1} 'axis': 1}
mean = Tensor(LayerParameter.name + '@param0').Constant(value=0.0) scope = LayerParameter.name
var = Tensor(LayerParameter.name + '@param1').Constant(value=0.0) mean = Tensor(scope + '/param:0').Constant(value=0.0)
scale = Tensor(LayerParameter.name + '@param2') var = Tensor(scope + '/param:1').Constant(value=0.0)
scale_diff = Tensor(LayerParameter.name + '@param2_grad') scale = Tensor(scope + '/param:2')
bias = Tensor(LayerParameter.name + '@param3') scale_diff = Tensor(scope + '/param:2_grad')
bias_diff = Tensor(LayerParameter.name + '@param3_grad') bias = Tensor(scope + '/param:3')
bias_diff = Tensor(scope + '/param:3_grad')
if scale_param.HasField('filler'): if scale_param.HasField('filler'):
self.Fill(scale, scale_param, 'filler') self.Fill(scale, scale_param, 'filler')
...@@ -536,7 +541,8 @@ class NormalizeLayer(Layer): ...@@ -536,7 +541,8 @@ class NormalizeLayer(Layer):
'eps': param.eps} 'eps': param.eps}
self._scale_param = {'axis': 1, self._scale_param = {'axis': 1,
'num_axes': 0 if param.channel_shared else 1} 'num_axes': 0 if param.channel_shared else 1}
scale = Tensor(LayerParameter.name + '@param0') scope = LayerParameter.name
scale = Tensor(scope + '/param:0')
if param.HasField('scale_filler'): if param.HasField('scale_filler'):
self.Fill(scale, param, 'scale_filler') self.Fill(scale, param, 'scale_filler')
else: scale.Constant(value=1.0) else: scale.Constant(value=1.0)
......
...@@ -46,8 +46,9 @@ class PReLULayer(Layer): ...@@ -46,8 +46,9 @@ class PReLULayer(Layer):
param = LayerParameter.prelu_param param = LayerParameter.prelu_param
self._param = {'channel_shared': param.channel_shared, self._param = {'channel_shared': param.channel_shared,
'data_format': 'NCHW'} 'data_format': 'NCHW'}
slope = Tensor(LayerParameter.name + '@param0') scope = LayerParameter.name
slope_diff = Tensor(LayerParameter.name + '@param0_grad') slope = Tensor(scope + '/param:0')
slope_diff = Tensor(scope + '/param:0_grad')
if param.HasField('filler'): if param.HasField('filler'):
self.Fill(slope, param, 'filler') self.Fill(slope, param, 'filler')
else: else:
......
...@@ -54,16 +54,17 @@ class ConvolutionLayer(Layer): ...@@ -54,16 +54,17 @@ class ConvolutionLayer(Layer):
if param.HasField('pad_h'): if param.HasField('pad_h'):
assert param.HasField('pad_w') assert param.HasField('pad_w')
self._param['pad'] = [param.pad_h, param.pad_w] self._param['pad'] = [param.pad_h, param.pad_w]
weight = Tensor(LayerParameter.name + '@param0') scope = LayerParameter.name
weight_diff = Tensor(LayerParameter.name + '@param0_grad') weight = Tensor(scope + '/param:0')
weight_diff = Tensor(scope + '/param:0_grad')
if len(LayerParameter.param) > 0: if len(LayerParameter.param) > 0:
if LayerParameter.param[0].lr_mult <= 0: weight_diff = None if LayerParameter.param[0].lr_mult <= 0: weight_diff = None
self.Fill(weight, param, 'weight_filler') self.Fill(weight, param, 'weight_filler')
self._blobs.append({'data': weight, 'diff': weight_diff}) self._blobs.append({'data': weight, 'diff': weight_diff})
if param.bias_term: if param.bias_term:
bias = Tensor(LayerParameter.name + '@param1') bias = Tensor(scope + '/param:1')
bias_diff = Tensor(LayerParameter.name + '@param1_grad') bias_diff = Tensor(scope + '/param:1_grad')
self.Fill(bias, param, 'bias_filler') self.Fill(bias, param, 'bias_filler')
if len(LayerParameter.param) > 1: if len(LayerParameter.param) > 1:
if LayerParameter.param[1].lr_mult <= 0: bias_diff = None if LayerParameter.param[1].lr_mult <= 0: bias_diff = None
......
...@@ -422,11 +422,13 @@ class Net(object): ...@@ -422,11 +422,13 @@ class Net(object):
The implementation of `Net_Save(_caffe.cpp, L153)`_. The implementation of `Net_Save(_caffe.cpp, L153)`_.
""" """
tensors = [] keys = set(); tensors = []
for layer in self._net.layer: for layer in self._net.layer:
if layer.name in self.params: if layer.name in self.params:
for param in self.params[layer.name]: for param in self.params[layer.name]:
tensors.append(param.data) if param.data.name not in keys:
tensors.append(param.data)
keys.add(param.data.name)
ws.Snapshot(tensors, filename, suffix='', format='caffe') ws.Snapshot(tensors, filename, suffix='', format='caffe')
@property @property
......
...@@ -36,7 +36,7 @@ find_packages('dragon') ...@@ -36,7 +36,7 @@ find_packages('dragon')
find_modules() find_modules()
setup(name = 'dragon', setup(name = 'dragon',
version='0.2.1.11', version='0.2.1.12',
description = 'Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework', description = 'Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework',
url='https://github.com/neopenx/Dragon', url='https://github.com/neopenx/Dragon',
author='Ting Pan', author='Ting Pan',
......
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!