Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support input check for pool operator #10532

Open
wants to merge 24 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 21 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions oneflow/api/python/functional/tensor_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,7 @@ class TensorWithShapeGenericCtorFunctor {
Maybe<Tensor> operator()(const Shape& shape, const Symbol<DType>& dtype,
const Optional<Symbol<Device>>& device) const {
// NOTE(chengcheng): flow.Tensor or flow.tensor ONLY created by EagerTensor now.
JUST(CheckSizeNonNegative(shape));
LazyMode::Guard lazy_mode_disabled_guard(/*is_enabled*/ false);
Symbol<Device> device_;
if (device) {
Expand Down
9 changes: 9 additions & 0 deletions oneflow/core/functional/impl/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,15 @@ Maybe<void> CheckInplaceValid(const std::shared_ptr<Tensor>& x);
Maybe<void> CheckInplaceCastValid(const std::shared_ptr<Tensor>& x,
const std::shared_ptr<Tensor>& x_cast);
Maybe<void> CheckInplaceShapeCanExpandTo(const Shape& shape, const Shape& expand_shape);

inline Maybe<void> CheckSizeNonNegative(const Shape& shape) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个名字改成CheckShapeNonNegative

for (const auto& s : shape) {
CHECK_OR_RETURN(s >= 0) << "Trying to create tensor with negative dimension " << s << ": "
<< shape;
}
return Maybe<void>::Ok();
}

Optional<Stride> ComputeStride(const Shape& shape, const Stride& stride, const Shape& target_shape);
Maybe<Shape> InferShapeUnspecifiedDim(const int64_t& elem_count, const Shape& shape);

Expand Down
2 changes: 2 additions & 0 deletions oneflow/core/functional/impl/random_functor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,7 @@ class RandFunctor {
OF_UNIMPLEMENTED() << "Only support floating dtype in rand().";
}
}
JUST(CheckSizeNonNegative(shape));

auto gen = generator.value_or(JUST(one::DefaultAutoGenerator()));
gen = JUST(GetGeneratorForLazyOrGlobal(gen, LazyMode::is_enabled(), NullOpt, NullOpt));
Expand Down Expand Up @@ -275,6 +276,7 @@ class RandNFunctor {
if (dtype.has_value() && !JUST(dtype)->is_floating_point()) {
OF_UNIMPLEMENTED() << "Only support floating dtype in randn().";
}
JUST(CheckSizeNonNegative(shape));
const auto& out = Optional<one::Tensor>();
return Normal(static_cast<double>(0), static_cast<double>(1), shape, out, dtype, device,
generator, requires_grad);
Expand Down
3 changes: 3 additions & 0 deletions python/oneflow/nn/modules/constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ def __init__(
self.device = flow.device(self.device)
self.requires_grad = requires_grad
size = _single(size)
assert all(
s >= 0 for s in size
), f"Trying to create tensor with negative dimension: {size}"
if dtype is None:
dtype = flow.get_default_dtype()
if placement is None:
Expand Down
4 changes: 4 additions & 0 deletions python/oneflow/nn/modules/empty.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,10 @@ def empty_op(

shape = _single(_handle_size_arg(size))

assert all(
s >= 0 for s in shape
), f"Trying to create tensor with negative dimension: {shape}"

if dtype is None:
dtype = flow.get_default_dtype()
if placement is not None:
Expand Down
127 changes: 127 additions & 0 deletions python/oneflow/nn/modules/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -674,6 +674,10 @@ def __init__(self, output_size: _size_1_t) -> None:
super().__init__()
assert output_size is not None, "'output_size' cannot be NoneType"
self.output_size = _single(output_size)
assert len(self.output_size) == 1, "'output_size' should contain one int"
assert (
self.output_size[0] is None or self.output_size[0] >= 0
), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}"

def forward(self, x):
assert (
Expand Down Expand Up @@ -741,6 +745,10 @@ def __init__(self, output_size, data_format=None) -> None:
super().__init__()
assert output_size is not None, "'output_size' cannot be NoneType"
self.output_size = _pair(output_size)
assert len(self.output_size) == 2, "'output_size' must be 2"
assert (self.output_size[0] is None or self.output_size[0] >= 0) and (
self.output_size[1] is None or self.output_size[1] >= 0
), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}"
if data_format:
if not data_format in ["channels_first", "channels_last"]:
raise ValueError(
Expand Down Expand Up @@ -824,6 +832,12 @@ def __init__(self, output_size) -> None:
super().__init__()
assert output_size is not None, "'output_size' cannot be NoneType"
self.output_size = _triple(output_size)
assert len(self.output_size) == 3, "'output_size' must be 3"
assert (
(self.output_size[0] is None or self.output_size[0] >= 0)
and (self.output_size[1] is None or self.output_size[1] >= 0)
and (self.output_size[2] is None or self.output_size[2] >= 0)
), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}"

def forward(self, x):
assert (
Expand Down Expand Up @@ -892,6 +906,9 @@ def forward(self, input):
assert (
len(input.shape) == 3 and len(self.output_size) == 1
), "the length of 'output_size' does not match the input size, 1 expected"
assert (
self.output_size[0] is None or self.output_size[0] >= 0
), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}"
new_output_size = _generate_output_size(input.shape, self.output_size)
return flow.nn.functional.adaptive_max_pool1d(
input, self.output_size, self.return_indices
Expand Down Expand Up @@ -964,6 +981,10 @@ def forward(self, input):
assert (
len(input.shape) == 4
), f"expected 4-dimensional tensor, but got {len(input.shape)}-dimensional tensor"
assert len(self.output_size) == 2, "'output_size' must be 2"
assert (self.output_size[0] is None or self.output_size[0] >= 0) and (
self.output_size[1] is None or self.output_size[1] >= 0
), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}"
new_output_size = _generate_output_size(input.shape, self.output_size)
return flow.nn.functional.adaptive_max_pool2d(
input, self.output_size, self.return_indices, self.channel_pos
Expand Down Expand Up @@ -1019,12 +1040,55 @@ def forward(self, input):
assert (
len(input.shape) == 5
), f"expected 5-dimensional tensor, but got {len(input.shape)}-dimensional tensor"
assert len(self.output_size) == 3, "'output_size' must be 3"
assert (
(self.output_size[0] is None or self.output_size[0] >= 0)
and (self.output_size[1] is None or self.output_size[1] >= 0)
and (self.output_size[2] is None or self.output_size[2] >= 0)
), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}"
new_output_size = _generate_output_size(input.shape, self.output_size)
return flow.nn.functional.adaptive_max_pool3d(
input, self.output_size, self.return_indices
)


def _unpool_output_size_check(
input,
kernel_size: List[int],
stride: List[int],
padding: List[int],
output_size: Optional[List[int]],
) -> List[int]:
input_size = input.size()
default_size = []
for d in range(len(kernel_size)):
default_size.append(
(input_size[-len(kernel_size) + d] - 1) * stride[d]
+ kernel_size[d]
- 2 * padding[d]
)
if output_size is None:
ret = default_size
else:
if len(output_size) == len(kernel_size) + 2:
output_size = output_size[2:]
if len(output_size) != len(kernel_size):
raise ValueError(
"output_size should be a sequence containing "
f"{len(kernel_size)} or {len(kernel_size) + 2} elements, but it has a length of '{len(output_size)}'"
)
for d in range(len(kernel_size)):
min_size = default_size[d] - stride[d]
max_size = default_size[d] + stride[d]
if not (min_size < output_size[d] < max_size):
raise ValueError(
f'invalid output_size "{output_size}" (dim {d} must be between {min_size} and {max_size})'
)

ret = output_size
return ret


class MaxUnpool1d(Module):
r"""Computes a partial inverse of :class:`MaxPool1d`.

Expand Down Expand Up @@ -1100,6 +1164,27 @@ def __init__(
self.padding = padding

def forward(self, x, indices, output_size=None):
kernel_size = _single(self.kernel_size)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

重复逻辑封装成函数

if self.stride is not None:
_stride = _single(self.stride)
else:
_stride = kernel_size
padding = _single(self.padding)
check_output_size = _unpool_output_size_check(
x, kernel_size, _stride, padding, output_size
)
assert (
len(check_output_size) == 1
), f"There should be exactly one element in output_size, but got {len(check_output_size)}"
assert (
indices.dtype == flow.int64
), f"elements in indices should be type int64 but got: {indices.dtype}"
assert (
len(x.size()) == 2 or len(x.size()) == 3
), f"Input to max_unpooling1d should be a 2d or 3d Tensor, but got {len(x.size())} dimensions"
assert (
x.size() == indices.size()
), f"Expected shape of indices to be same as that of the input tensor"
return flow._C.max_unpool1d(
x, indices, self.kernel_size, self.stride, self.padding, output_size
)
Expand Down Expand Up @@ -1188,6 +1273,27 @@ def __init__(
self.padding = padding

def forward(self, x, indices, output_size=None):
kernel_size = _pair(self.kernel_size)
if self.stride is not None:
_stride = _pair(self.stride)
else:
_stride = kernel_size
padding = _pair(self.padding)
check_output_size = _unpool_output_size_check(
x, kernel_size, _stride, padding, output_size
)
assert (
len(check_output_size) == 2
), f"There should be exactly two elements in output_size, but got {len(check_output_size)}"
assert (
indices.dtype == flow.int64
), f"elements in indices should be type int64 but got: {indices.dtype}"
assert (
len(x.size()) == 3 or len(x.size()) == 4
), f"Input to max_unpooling1d should be a 3d or 4d Tensor, but got {len(x.size())} dimensions"
assert (
x.size() == indices.size()
), f"Expected shape of indices to be same as that of the input tensor"
return flow._C.max_unpool2d(
x, indices, self.kernel_size, self.stride, self.padding, output_size
)
Expand Down Expand Up @@ -1266,6 +1372,27 @@ def __init__(
self.padding = padding

def forward(self, x, indices, output_size=None):
kernel_size = _triple(self.kernel_size)
if self.stride is not None:
_stride = _triple(self.stride)
else:
_stride = kernel_size
padding = _triple(self.padding)
check_output_size = _unpool_output_size_check(
x, kernel_size, _stride, padding, output_size
)
assert (
len(check_output_size) == 3
), f"There should be exactly three elements in output_size, but got {len(check_output_size)}"
assert (
indices.dtype == flow.int64
), f"elements in indices should be type int64 but got: {indices.dtype}"
assert (
len(x.size()) == 4 or len(x.size()) == 5
), f"Input to max_unpooling1d should be a 4d or 5d Tensor, but got {len(x.size())} dimensions"
assert (
x.size() == indices.size()
), f"Expected shape of indices to be same as that of the input tensor"
return flow._C.max_unpool3d(
x, indices, self.kernel_size, self.stride, self.padding, output_size
)
Expand Down
Loading