Commit 2356c658 by Ting PAN

remove default inplace for densenet

1 parent 7e98dfd9
......@@ -3,4 +3,4 @@ This directory holds (*after you download them*):
- libprotobuf.lib (For ``google protobuf``, Windows Only)
- cudnn.lib (For ``cudnn``, Windows Only)
- libopenblas.lib (For ``cblas``, Windows Only)
- python27.lib (For ``python27``, Windows Only)
- python27.lib/python35.lib/python36.lib (For ``python27/35/36``, Windows Only)
......@@ -23,7 +23,7 @@ set(3RDPARTY_DIR ${PROJECT_SOURCE_DIR}/../3rdparty)
# set your python environment
set(PYTHON_DIR /usr/include/python2.7) # prefer
#set(PYTHON_DIR /usr/include/python3.x) # optional, set specific version
#set(ANACONDA_DIR /xxx/anaconda) # optional, set specific version below if using py3
#set(ANACONDA_DIR /xxx/anaconda) # optional, root folder of anaconda, preset for 2.7, 3.5, and 3.6
set(NUMPY_DIR /xxx/numpy) # require, root folder of numpy package
# set CUDA compiling architecture
......@@ -83,7 +83,8 @@ include_directories(${NUMPY_DIR}/core/include)
include_directories(${NUMPY_DIR})
include_directories(${NUMPY_DIR}/numpy)
include_directories(${ANACONDA_DIR}/include/python2.7)
include_directories(${ANACONDA_DIR}/include/python3.x)
include_directories(${ANACONDA_DIR}/include/python3.5)
include_directories(${ANACONDA_DIR}/include/python3.6)
include_directories(${PYTHON_DIR})
include_directories(${ANACONDA_DIR}/include)
......
......@@ -19,7 +19,7 @@ class BatchNormOp : public Operator<Context> {
momentum(OperatorBase::GetSingleArg<float>("momentum", float(0.9))),
eps(OperatorBase::GetSingleArg<float>("eps", float(1e-3))),
use_stats(OperatorBase::GetSingleArg<int>("use_stats", -1)),
inplace(OperatorBase::GetSingleArg<bool>("inplace", true)) {}
inplace(OperatorBase::GetSingleArg<bool>("inplace", false)) {}
void RunOnDevice() override;
template <typename T> void RunWithType();
......
......@@ -22,7 +22,7 @@ class BatchRenormOp : public Operator<Context> {
d_max(OperatorBase::GetSingleArg<float>("d_max", float(5.0))),
t_delta(OperatorBase::GetSingleArg<float>("t_delta", float(1.0))),
use_stats(OperatorBase::GetSingleArg<int>("use_stats", -1)),
inplace(OperatorBase::GetSingleArg<bool>("inplace", true)),
inplace(OperatorBase::GetSingleArg<bool>("inplace", false)),
t_r_max(float(1.0)), t_d_max(float(0.0)), t_val(float(0.0)) {}
void RunOnDevice() override;
......
......@@ -17,7 +17,7 @@ class InstanceNormOp : public Operator<Context> {
InstanceNormOp(const OperatorDef& op_def, Workspace* ws)
: Operator<Context>(op_def, ws),
eps(OperatorBase::GetSingleArg<float>("eps", float(1e-3))),
inplace(OperatorBase::GetSingleArg<bool>("inplace", true)) {}
inplace(OperatorBase::GetSingleArg<bool>("inplace", false)) {}
void RunOnDevice() override;
template <typename T> void RunWithType();
......
......@@ -20,6 +20,9 @@ endforeach()
if (UNIX AND WITH_CUDNN)
TARGET_LINK_LIBRARIES(${PROJECT_NAME}_python cudnn)
endif()
if (UNIX AND WITH_BLAS)
TARGET_LINK_LIBRARIES(${PROJECT_NAME}_python openblas)
endif()
# ---[ link platforms
if(UNIX)
......
......@@ -177,9 +177,9 @@ class BatchNormLayer(Layer):
'momentum': param.moving_average_fraction,
'eps': param.eps}
# mean, var, factor are set to 0 in order to do statistics
mean = Tensor(LayerParameter.name + '@param0').Constant()
var = Tensor(LayerParameter.name + '@param1').Constant()
factor = Tensor(LayerParameter.name + '@param2').Constant()
mean = Tensor(LayerParameter.name + '@param0').Constant(value=0.0)
var = Tensor(LayerParameter.name + '@param1').Constant(value=0.0)
factor = Tensor(LayerParameter.name + '@param2').Constant(value=0.0)
# in dragon, set diff as None will ignore computing grad automatically
# but in bvlc-caffe1, you must set lr_mult = 0 manually
self._blobs.append({'data': mean, 'diff': None})
......@@ -202,9 +202,9 @@ class BatchRenormLayer(Layer):
'r_max': float(param.r_max),
'd_max': float(param.d_max),
't_delta': float(param.t_delta)}
mean = Tensor(LayerParameter.name + '@param0').Constant()
var = Tensor(LayerParameter.name + '@param1').Constant()
factor = Tensor(LayerParameter.name + '@param2').Constant()
mean = Tensor(LayerParameter.name + '@param0').Constant(value=0.0)
var = Tensor(LayerParameter.name + '@param1').Constant(value=0.0)
factor = Tensor(LayerParameter.name + '@param2').Constant(value=0.0)
self._blobs.append({'data': mean, 'diff': None})
self._blobs.append({'data': var, 'diff': None})
self._blobs.append({'data': factor, 'diff': None})
......@@ -253,19 +253,27 @@ class BNLayer(Layer):
def __init__(self, LayerParameter):
super(BNLayer, self).__init__(LayerParameter)
param = LayerParameter.batch_norm_param
self._param = {'use_stats': int(param.use_global_stats)
if param.HasField('use_global_stats') else -1,
'momentum': param.moving_average_fraction,
'eps': param.eps}
mean = Tensor(LayerParameter.name + '@param0').Constant()
var = Tensor(LayerParameter.name + '@param1').Constant()
scale = Tensor(LayerParameter.name + '@param2').Constant(value=1.0)
bias = Tensor(LayerParameter.name + '@param3').Constant(value=0.0)
bn_param = LayerParameter.batch_norm_param
scale_param = LayerParameter.scale_param
self._param = {'use_stats': int(bn_param.use_global_stats)
if bn_param.HasField('use_global_stats') else -1,
'momentum': bn_param.moving_average_fraction,
'eps': bn_param.eps}
mean = Tensor(LayerParameter.name + '@param0').Constant(value=0.0)
var = Tensor(LayerParameter.name + '@param1').Constant(value=0.0)
scale = Tensor(LayerParameter.name + '@param2')
scale_diff = Tensor(LayerParameter.name + '@param2_grad')
bias = Tensor(LayerParameter.name + '@param3')
bias_diff = Tensor(LayerParameter.name + '@param3_grad')
if scale_param.HasField('filler'):
self.Fill(scale, scale_param, 'filler')
else: scale.Constant(value=1.0)
self.Fill(bias, scale_param, 'bias_filler')
self.norm_blobs = [{'data': mean, 'diff': None},
{'data': var, 'diff': None}]
self.scale_blobs = [{'data': scale, 'diff': Tensor(scale.name + '_grad')},
{'data': bias, 'diff': Tensor(bias.name + '_grad')}]
self.scale_blobs = [{'data': scale, 'diff': scale_diff},
{'data': bias, 'diff': bias_diff}]
self._blobs.extend(self.norm_blobs)
self._blobs.extend(self.scale_blobs)
......
......@@ -18,10 +18,16 @@
3. (Optional) Download 3rdparty.zip and unzip to Dragon/3rdparty (Out of source code dir)
[*Win64*](https://pan.baidu.com/s/1pLmGOLt) (OpenBLAS / Protobuf2.6 for VS2013 / CUDNN v6 / Microsoft MPI)
[*Win64-VS2013*](https://pan.baidu.com/s/1miGAZl2) (OpenBLAS / Protobuf2.6 for VS2013 / CUDNN v7 / Microsoft MPI)
[*Win64-VS2015*](https://pan.baidu.com/s/1c2eX6lq) (OpenBLAS / Protobuf2.6 for VS2015 / CUDNN v7 / Microsoft MPI)
[*Linux64*](https://pan.baidu.com/s/1qXPEOWG) (OpenMPI)
For Windows, ``python27/35/36.lib`` should be copied to ``Dragon/3rdparty/lib``, it depends on the version of Python.
For Linux, ``libpython-dev``, ``libprotobuf-dev``, ``libopenblas-dev`` and ``cuDNN`` should be installed by yourself.
4. Install Python Requirements
```Shell
......@@ -30,12 +36,12 @@
```
5. Configure Dragon/CMakeLists.txt
- Select optional libraries [PYTHON3 / CUDA / CUDNN / BLAS / SSE / MPI / MPI_CUDA_AWARE / CUDA_FP16]
- Select optional libraries [PYTHON3 / CUDA / CUDNN / BLAS / SSE / MPI]
- Set 3rdparty path (recommend to keep defualt)
- Set Python include path & Numpy root path
- Set CUDA compiling architectures if necessary
- GCC version(4.8+, 5.0-) should add ``-std=c++11`` to ``CUDA_NVCC_FLAGS``, if ``nullptr`` is not found
- We pre-generated files under the ``Dragon/src/protos`` with protobuf-2.6, run protoc by yourself if higher are required
- We pre-generated files under ``Dragon/src/protos`` with protobuf-2.6, run ``protoc`` by yourself if higher are required
6. Environment Variables
### Linux(Only for OpenMPI):
......
......@@ -32,12 +32,6 @@ def wrapper_str(raw_str):
def extract_images():
prefix = 'data/cifar-10-batches-py'
extract_path = 'data/extract'
if not os.path.exists(os.path.join(extract_path, 'JPEGImages')):
os.makedirs(os.path.join(extract_path, 'JPEGImages'))
if not os.path.exists(os.path.join(extract_path, 'ImageSets')):
os.makedirs(os.path.join(extract_path, 'ImageSets'))
batches = [os.path.join(prefix, 'data_batch_{}'.format(i)) for i in xrange(1, 6)]
batches += [os.path.join(prefix, 'test_batch')]
......@@ -60,28 +54,13 @@ def extract_images():
label = dict[wrapper_str('labels')][item_idx]
im = im.transpose((1, 2, 0))
im = im[:, :, ::-1]
filename = str(total_idx).zfill(ZFILL) + '.jpg'
cv2.imwrite(os.path.join(extract_path, 'JPEGImages', filename), im)
images_list.append((filename, str(label)))
images_list.append((im, str(label)))
total_idx += 1
# make list
with open(os.path.join(extract_path, 'ImageSets', 'train.txt'), 'w') as f:
for i in xrange(50000):
item = images_list[i][0] + ' ' + images_list[i][1]
if i != 49999: item += '\n'
f.write(item)
with open(os.path.join(extract_path, 'ImageSets', 'test.txt'), 'w') as f:
for i in xrange(50000, 60000):
item = images_list[i][0] + ' ' + images_list[i][1]
if i != 59999: item += '\n'
f.write(item)
return images_list
def make_db(image_path, label_path, database_path, pad=0):
if os.path.isfile(label_path) is False:
raise ValueError('input path is empty or wrong.')
def make_db(images_list, database_path, pad=0):
if os.path.isdir(database_path) is True:
raise ValueError('the database path is already exist.')
......@@ -90,42 +69,35 @@ def make_db(image_path, label_path, database_path, pad=0):
db = LMDB(max_commit=10000)
db.open(database_path, mode='w')
total_line = sum(1 for line in open(label_path))
total_line = len(images_list)
count = 0
zfill_flag = '{0:0%d}' % (ZFILL)
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 95]
start_time = time.time()
with open(label_path, 'r') as input_file:
for record in input_file:
count += 1
if count % 10000 == 0:
now_time = time.time()
print('{0} / {1} in {2:.2f} sec'.format(
count, total_line, now_time - start_time))
db.commit()
record = record.split()
path = record[0]
label = record[1]
img = cv2.imread(os.path.join(image_path ,path))
if pad > 0:
pad_img = np.zeros((img.shape[0] + 2 * pad,
img.shape[1] + 2 * pad, 3), dtype=np.uint8)
pad_img[pad : pad + img.shape[0],
for record in images_list:
count += 1
if count % 10000 == 0:
now_time = time.time()
print('{0} / {1} in {2:.2f} sec'.format(
count, total_line, now_time - start_time))
db.commit()
img = record[0]
label = record[1]
if pad > 0:
pad_img = np.zeros((img.shape[0] + 2 * pad,
img.shape[1] + 2 * pad, 3), dtype=np.uint8)
pad_img[pad : pad + img.shape[0],
pad : pad + img.shape[1], :] = img
img = pad_img
result, imgencode = cv2.imencode('.jpg', img, encode_param)
img = pad_img
datum = caffe_pb2.Datum()
datum.height, datum.width, datum.channels = img.shape
datum.label = int(label)
datum.encoded = True
datum.data = imgencode.tostring()
db.put(zfill_flag.format(count - 1), datum.SerializeToString())
datum = caffe_pb2.Datum()
datum.height, datum.width, datum.channels = img.shape
datum.label = int(label)
datum.encoded = False
datum.data = img.tostring()
db.put(zfill_flag.format(count - 1), datum.SerializeToString())
now_time = time.time()
print('{0} / {1} in {2:.2f} sec'.format(count, total_line, now_time - start_time))
......@@ -134,7 +106,6 @@ def make_db(image_path, label_path, database_path, pad=0):
db.commit()
db.close()
shutil.copy(label_path, database_path + '/image_list.txt')
end_time = time.time()
print('{0} images have been stored in the database.'.format(total_line))
print('This task finishes within {0:.2f} seconds.'.format(
......@@ -147,12 +118,8 @@ if __name__ == '__main__':
untar('data/cifar-10-python.tar.gz')
extract_images()
images_list = extract_images()
make_db('data/extract/JPEGImages',
'data/extract/ImageSets/train.txt',
'data/train_lmdb')
make_db(images_list[0:50000], 'data/train_lmdb')
make_db('data/extract/JPEGImages',
'data/extract/ImageSets/test.txt',
'data/test_lmdb')
make_db(images_list[50000:60000], 'data/test_lmdb')
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!