Commit 7bc8fb22 by Ting PAN

Remove Avatar on gathering gradients

1 parent c9db9eee
......@@ -4,6 +4,10 @@
# Written by Ting Pan
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from six.moves import range as xrange
......@@ -98,7 +102,7 @@ def Conv2d(inputs, num_output, kernel_size,
input_size = output.shape[i + spatial_axis]
output_size = (input_size + s - 1) / float(s)
dp = int(max(0, (output_size - 1) * s + k - input_size))
output.shape[i + spatial_axis] = (output.shape[i + spatial_axis] + dp - dk) / s + 1
output.shape[i + spatial_axis] = int(output.shape[i + spatial_axis] + dp - dk / s) + 1
return output
......@@ -289,13 +293,13 @@ def Pool2d(inputs, kernel_size, stride, pad=0, padding='VALID',
if not global_pooling:
if padding != 'SAME':
input_size = output.shape[i + spatial_axis]
output_size = int(math.ceil(float(output.shape[i + spatial_axis] + 2 * p - k) / s) + 1)
output_size = int(math.ceil((output.shape[i + spatial_axis] + 2 * p - k) / s) + 1)
if ((output_size - 1) * s >= input_size + p):
output_size = output_size - 1
output.shape[i + spatial_axis] = output_size
else:
output.shape[i + spatial_axis] = \
int((output.shape[i + spatial_axis] + s - 1) / float(s))
int((output.shape[i + spatial_axis] + s - 1) / s)
else:
output.shape[i + spatial_axis] = 1
......
......@@ -68,6 +68,7 @@ class SigmoidCrossEntropyLossLayer(Layer):
super(SigmoidCrossEntropyLossLayer, self).Setup(bottom)
loss = ops.SigmoidCrossEntropy(bottom, **self._param)
if self._loss_weight is not None: loss *= self._loss_weight
return loss
class L2LossLayer(Layer):
......
......@@ -255,7 +255,7 @@ class NNResizeLayer(Layer):
super(NNResizeLayer, self).__init__(LayerParameter)
param = LayerParameter.resize_param
dsize = [int(dim) for dim in param.shape.dim] \
if param.HasField('shape') else []
if param.HasField('shape') else None
self._param = {'dsize': dsize,
'fx': float(param.fx),
'fy': float(param.fy),
......@@ -287,7 +287,7 @@ class BilinearResizeLayer(Layer):
super(BilinearResizeLayer, self).__init__(LayerParameter)
param = LayerParameter.resize_param
dsize = [int(dim) for dim in param.shape.dim] \
if param.HasField('shape') else []
if param.HasField('shape') else None
self._param = {'dsize': dsize,
'fx': float(param.fx),
'fy': float(param.fy),
......
......@@ -35,9 +35,11 @@ template <class Context> template <typename T>
void GradientGatherOp<Context>::RunWithType() {
auto* dXdata = output(0)->template mutable_data<T, Context>();
TIndex count = output(0)->count();
for (int i = 1; i < indices.size(); i++) {
for (int i = 0; i < indices.size(); i++) {
CHECK(output(0)->dims() == input(indices[i]).dims());
math::Add<T, Context>(count, dXdata, input(indices[i]).template data<T, Context>(), dXdata);
auto* dYdata = input(indices[i]).template data<T, Context>();
if (i == 0) ctx().template Copy<T, Context, Context>(count, dXdata, dYdata);
else math::Add<T, Context>(count, dXdata, dYdata, dXdata);
input(indices[i]).Reset();
}
}
......@@ -45,7 +47,7 @@ void GradientGatherOp<Context>::RunWithType() {
template <class Context>
void GradientGatherOp<Context>::RunOnDevice() {
if (indices.size() == 0) return;
ws()->CreateAvatar(output(0), &input(indices[0]));
output(0)->ReshapeLike(input(indices[0]));
if (input(indices[0]).template IsType<float>()) RunWithType<float>();
else LOG(FATAL) << "Unsupported input types.";
......
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!