diff --git a/dnn/src/cuda/check_non_finite/kern.cu b/dnn/src/cuda/check_non_finite/kern.cu index 0a46daa1..71e44c84 100644 --- a/dnn/src/cuda/check_non_finite/kern.cu +++ b/dnn/src/cuda/check_non_finite/kern.cu @@ -8,10 +8,9 @@ namespace cuda { #define COMMA , -#define cb(_dtype) \ - INST_REDUCE( \ - device_reduce::CheckNonFiniteOp< \ - _dtype COMMA dt_float32 COMMA dt_int32 COMMA dt_int32>, \ +#define cb(_dtype) \ + INST_REDUCE( \ + device_reduce::CheckNonFiniteOp<_dtype COMMA dt_int32 COMMA dt_int32>, \ false); cb(dt_float32); diff --git a/dnn/src/cuda/check_non_finite/opr_impl.cpp b/dnn/src/cuda/check_non_finite/opr_impl.cpp index 32928547..2724abc9 100644 --- a/dnn/src/cuda/check_non_finite/opr_impl.cpp +++ b/dnn/src/cuda/check_non_finite/opr_impl.cpp @@ -14,7 +14,7 @@ using device_reduce::CheckNonFiniteOp; template size_t CheckNonFiniteImpl::_get_workspace_in_bytes() { // Call the _get_workspace_in_bytes to reduce the loop fetch workspace bytes - typedef CheckNonFiniteOp Op; + typedef CheckNonFiniteOp Op; megdnn_assert(m_size > 0); WorkspaceBundle bundle( nullptr, { @@ -59,7 +59,7 @@ void CheckNonFiniteImpl::_exec( _megdnn_in const TensorNDArray& srcs, _megdnn_tensor_out dst, _megdnn_workspace workspace) { check_exec(srcs, dst, workspace.size); - typedef CheckNonFiniteOp Op; + typedef CheckNonFiniteOp Op; auto stream = cuda_stream(this->handle()); SmallVector workspace_sizes{ sizeof(T*) * m_size, diff --git a/imperative/python/megengine/core/_config.py b/imperative/python/megengine/core/_config.py index a9e2102e..8a16ec86 100644 --- a/imperative/python/megengine/core/_config.py +++ b/imperative/python/megengine/core/_config.py @@ -247,4 +247,4 @@ def _override( def _get_actual_op_param(function_param, config_param): - return function_param if config_param is "default" else config_param + return function_param if config_param == "default" else config_param diff --git a/imperative/python/megengine/optimizer/optimizer.py b/imperative/python/megengine/optimizer/optimizer.py index 8c8a11c6..504a978e 100644 --- a/imperative/python/megengine/optimizer/optimizer.py +++ b/imperative/python/megengine/optimizer/optimizer.py @@ -97,7 +97,7 @@ class Optimizer(metaclass=ABCMeta): "optimizer can only optimize Parameters, but one of the params is " + str(type(param)) ) - param._reset(Tensor(param, no_cache=True)) + param._reset(Tensor(param.numpy(), no_cache=True, format=param.format)) for name, default in self._defaults.items(): if default is required and name not in param_group: diff --git a/imperative/src/impl/transformations/format.cpp b/imperative/src/impl/transformations/format.cpp index 30d2ae6e..5a4fa7c0 100644 --- a/imperative/src/impl/transformations/format.cpp +++ b/imperative/src/impl/transformations/format.cpp @@ -581,9 +581,9 @@ ValueRefList FormatTransformation::apply_transformation( (GenericFunction&)inputs[1].cast(); // make param grads as FormattedTensor GenericFunction new_callback = - [this, callback, format](Span inputs_) -> ValueRefList { + [&, callback, format](Span inputs_) -> ValueRefList { auto wrapped_inputs = SmallVector{ - this->value_type().make(inputs_.item(), format)}; + m_value_type.make(inputs_.item(), format)}; auto ret = callback(wrapped_inputs); return ret; }; diff --git a/imperative/src/include/megbrain/imperative/value.h b/imperative/src/include/megbrain/imperative/value.h index 9543aa49..ecf63b48 100644 --- a/imperative/src/include/megbrain/imperative/value.h +++ b/imperative/src/include/megbrain/imperative/value.h @@ -67,7 +67,6 @@ template class Type : public IType { protected: Type(std::string name) : IType(std::move(name)) {} - Type(IType&& type) : IType(std::move(type)) {} // TODO: each type owns an allocator public: @@ -105,7 +104,6 @@ template class ObjectType : public Type { public: ObjectType(std::string name) : Type(name) {} - ObjectType(IType&& type) : Type(std::move(type)) {} }; /**