Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 27 additions & 8 deletions paddle/phi/core/distributed/check/static_check.cc
Original file line number Diff line number Diff line change
Expand Up @@ -183,14 +183,33 @@ void CommStaticCheck::GatherLikeShape(const DenseTensor& out_tensor,
int cur_rank,
int world_size,
phi::AllocationType place) {
CheckShape(out_tensor,
in_tensor,
dst_rank,
cur_rank,
world_size,
/*out_size_factor*/ 1,
/*in_size_factor*/ world_size,
place);
CheckRank(dst_rank, world_size);
CheckRank(cur_rank, world_size);

CheckPlace(out_tensor, in_tensor, place);
CheckDataType(out_tensor, in_tensor);

CheckGatherShape(out_tensor);
CheckGatherShape(in_tensor);
int64_t out_size = out_tensor.numel(), in_size = in_tensor.numel();
PADDLE_ENFORCE_EQ(
out_size,
in_size * world_size,
common::errors::InvalidArgument(
"Input and output tensors should have matching sizes. "
"out_size=%ld, out_size_factor=%d, in_size=%ld, in_size_factor=%d",
out_size,
1,
in_size,
world_size));
}

void CommStaticCheck::CheckGatherShape(const phi::DenseTensor& tensor) {
PADDLE_ENFORCE_GE(
tensor.numel(),
0,
common::errors::InvalidArgument("Size of tensor should be greater equal "
"than 0 in gather-liked communication."));
}

} // namespace phi::distributed
2 changes: 2 additions & 0 deletions paddle/phi/core/distributed/check/static_check.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ struct CommStaticCheck {
int cur_rank,
int world_size,
phi::AllocationType place = phi::AllocationType::GPU);

static void CheckGatherShape(const phi::DenseTensor& tensor);
};

} // namespace distributed
Expand Down
12 changes: 7 additions & 5 deletions python/paddle/distributed/communication/all_gather.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def all_gather(


def all_gather_object(
object_list: list[_T], obj: _T, group: Group = None
object_list: list[_T] | list[None], obj: _T, group: Group = None
) -> None:
"""

Expand All @@ -110,7 +110,7 @@ def all_gather_object(
>>> import paddle.distributed as dist

>>> dist.init_parallel_env()
>>> object_list = [] # type: ignore
>>> object_list = [None for _ in range(dist.get_world_size())]
>>> if dist.get_rank() == 0:
... obj = {"foo": [1, 2, 3]}
>>> else:
Expand Down Expand Up @@ -139,7 +139,9 @@ def all_gather_object(

tensor_list = []
all_gather(tensor_list, input_tensor, group)
# Ensure object_list has enough slots for all gathered objects
while len(object_list) < len(tensor_list):
object_list.append(None)

for i, tensor in enumerate(tensor_list):
object_list.append(
convert_tensor_to_object(tensor, list_len_of_tensor[i])
)
object_list[i] = convert_tensor_to_object(tensor, list_len_of_tensor[i])
Loading