Commit d64a3943 by Ting PAN

fix the potential crash of DragonBoard

1 parent 53d5742d
......@@ -57,8 +57,8 @@ std::string MakeString(const Args&... args) {
return std::string(ss.str());
}
inline void PrErr_SetString(PyObject* type, const std::string& str) {
PyErr_SetString(type, str.c_str());
inline void PrErr_SetString(PyObject* type, const std::string& str) {
PyErr_SetString(type, str.c_str());
}
class TensorFetcherBase {
......@@ -70,8 +70,8 @@ class TensorFetcherBase {
class TensorFeederBase {
public:
virtual ~TensorFeederBase() {}
virtual PyObject* Feed(const DeviceOption& option,
PyArrayObject* array,
virtual PyObject* Feed(const DeviceOption& option,
PyArrayObject* array,
Tensor* tensor) = 0;
};
......@@ -79,8 +79,8 @@ DECLARE_TYPED_REGISTRY(TensorFetcherRegistry, TypeId, TensorFetcherBase);
#define REGISTER_TENSOR_FETCHER(type, ...) \
REGISTER_TYPED_CLASS(TensorFetcherRegistry, type, __VA_ARGS__)
inline TensorFetcherBase* createFetcher(TypeId type) {
return TensorFetcherRegistry()->Create(type);
inline TensorFetcherBase* CreateFetcher(TypeId type) {
return TensorFetcherRegistry()->Create(type);
}
DECLARE_TYPED_REGISTRY(TensorFeederRegistry, TypeId, TensorFeederBase);
......@@ -107,11 +107,11 @@ class NumpyFetcher : public TensorFetcherBase {
// copy the tensor data to the numpy array
if (tensor.memory_state() == MixedMemory::STATE_AT_CUDA) {
CUDAContext::Memcpy<CPUContext, CUDAContext>(tensor.nbytes(),
PyArray_DATA(reinterpret_cast<PyArrayObject*>(array)),
PyArray_DATA(reinterpret_cast<PyArrayObject*>(array)),
tensor.raw_data<CUDAContext>());
} else {
CPUContext::Memcpy<CPUContext, CPUContext>(tensor.nbytes(),
PyArray_DATA(reinterpret_cast<PyArrayObject*>(array)),
PyArray_DATA(reinterpret_cast<PyArrayObject*>(array)),
tensor.raw_data<CPUContext>());
}
return array;
......@@ -128,8 +128,8 @@ class StringFetcher : public TensorFetcherBase {
class NumpyFeeder : public TensorFeederBase {
public:
PyObject* Feed(const DeviceOption& option,
PyArrayObject* original_array,
PyObject* Feed(const DeviceOption& option,
PyArrayObject* original_array,
Tensor* tensor) override {
PyArrayObject* array = PyArray_GETCONTIGUOUS(original_array);
const TypeMeta& meta = NumpyTypeToDragon(PyArray_TYPE(array));
......@@ -150,14 +150,14 @@ class NumpyFeeder : public TensorFeederBase {
#ifdef WITH_CUDA
CUDAContext context(option);
context.SwitchToDevice();
context.Memcpy<CUDAContext, CPUContext>(tensor->nbytes(),
tensor->raw_mutable_data<CUDAContext>(),
context.Memcpy<CUDAContext, CPUContext>(tensor->nbytes(),
tensor->raw_mutable_data<CUDAContext>(),
static_cast<void*>(PyArray_DATA(array)));
#else
#else
LOG(FATAL) << "CUDA is not compilied.";
#endif
} else{
CPUContext::Memcpy<CPUContext, CPUContext>(tensor->nbytes(),
CPUContext::Memcpy<CPUContext, CPUContext>(tensor->nbytes(),
tensor->raw_mutable_data<CPUContext>(),
static_cast<void*>(PyArray_DATA(array)));
}
......@@ -166,4 +166,4 @@ class NumpyFeeder : public TensorFeederBase {
}
};
#endif // DRAGON_MODULES_PYTHON_DRAGON_H_
\ No newline at end of file
#endif // DRAGON_MODULES_PYTHON_DRAGON_H_
......@@ -23,7 +23,7 @@ class Tensor(object):
self.name = name
self.shape = shape
# ------------------------ Properies ------------------------
# ------------------------ Properties ------------------------
@property
def expressions(self):
......
......@@ -79,4 +79,4 @@ class DragonBoard(Process):
return make_response(jsonify(sample_scalar))
else: return make_response(jsonify(sclar))
app.run(host='0.0.0.0', port=self.config['port'])
app.run(host='0.0.0.0', port=self.config['port'], threaded=True)
numpy==1.12.1
six==1.10.0
protobuf==3.3.0
lmdb==0.93
opencv-python==3.1.0
Pillow==4.1.1
\ No newline at end of file
numpy
six
protobuf
lmdb
opencv-python
Pillow
\ No newline at end of file
......@@ -22,7 +22,7 @@
[*Win64-VS2015*](https://pan.baidu.com/s/1c2eX6lq) (OpenBLAS / Protobuf2.6 for VS2015 / CUDNN v7 / Microsoft MPI)
[*Linux64*](https://pan.baidu.com/s/1qXPEOWG) (OpenMPI)
[*Linux64*](https://pan.baidu.com/s/1c2ChKHy) (OpenMPI)
For Windows, ``python27/35/36.lib`` should be copied to ``Dragon/3rdparty/lib``, it depends on the version of Python.
......@@ -73,12 +73,12 @@
- Run 3rdparty/setup_mpi.sh
```Shell
./setup_mpi.sh
bash ./setup_mpi.sh
```
- Install
```Shell
sudo cp openmpi/install/bin/mpirun /usr/bin
sudo cp 3rdparty/openmpi/install/bin/mpirun /usr/bin
```
#### Windows:
- We use Microsoft MPI which can perfectly run at lastest Windows10
......
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!