Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions paddle/fluid/pybind/place.cc
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,8 @@ void BindPlace(pybind11::module &m) { // NOLINT
[](phi::Place &self) { return phi::is_ipu_place(self); })
.def("is_cuda_pinned_place",
[](phi::Place &self) { return phi::is_cuda_pinned_place(self); })
.def("is_gpu_pinned_place",
[](phi::Place &self) { return phi::is_gpu_pinned_place(self); })
.def("is_xpu_pinned_place",
[](phi::Place &self) { return phi::is_xpu_pinned_place(self); })
.def("is_custom_place",
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/pybind/tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1273,7 +1273,7 @@ void BindTensor(pybind11::module &m) { // NOLINT
auto holder = self.Holder();
PADDLE_ENFORCE_EQ(
phi::is_cpu_place(holder->place()) ||
phi::is_cuda_pinned_place(holder->place()),
phi::is_gpu_pinned_place(holder->place()),
true, common::errors::InvalidArgument(
"Tensor is not on CPU. share_filename only "
"support CPU Tensor."));
Expand Down Expand Up @@ -1309,7 +1309,7 @@ void BindTensor(pybind11::module &m) { // NOLINT
handle, shared_fd, flags, data_size, find_id);

// copy data & reset holder
if (phi::is_cuda_pinned_place(holder->place())) {
if (phi::is_gpu_pinned_place(holder->place())) {
#ifdef PADDLE_WITH_CUDA
memory::Copy(phi::CPUPlace(), shared_holder->ptr(),
phi::GPUPinnedPlace(), data_ptr, data_size);
Expand Down
5 changes: 4 additions & 1 deletion paddle/phi/common/place.cc
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,9 @@ bool is_cuda_pinned_place(const Place &p) {
return p.GetType() == phi::AllocationType::GPUPINNED;
}

bool is_gpu_pinned_place(const Place &p) {
return p.GetType() == phi::AllocationType::GPUPINNED;
}
bool is_xpu_pinned_place(const Place &p) {
return p.GetType() == phi::AllocationType::XPUPINNED;
}
Expand Down Expand Up @@ -230,7 +233,7 @@ bool places_are_same_class(const Place &p1, const Place &p2) {

bool is_same_place(const Place &p1, const Place &p2) {
if (places_are_same_class(p1, p2)) {
if (is_cpu_place(p1) || is_cuda_pinned_place(p1) ||
if (is_cpu_place(p1) || is_gpu_pinned_place(p1) ||
is_xpu_pinned_place(p1)) {
return true;
} else {
Expand Down
4 changes: 3 additions & 1 deletion paddle/phi/common/place.h
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,9 @@ PADDLE_API bool is_xpu_place(const Place&);
PADDLE_API bool is_ipu_place(const Place&);
PADDLE_API bool is_cpu_place(const Place&);
PADDLE_API bool is_pinned_place(const Place&);
PADDLE_API bool is_cuda_pinned_place(const Place&);
PADDLE_API bool is_cuda_pinned_place(
const Place&); // Deprecated, use is_gpu_pinned_place instead.
PADDLE_API bool is_gpu_pinned_place(const Place&);
PADDLE_API bool is_xpu_pinned_place(const Place&);
PADDLE_API bool is_custom_place(const Place& p);
PADDLE_API bool is_accelerat_place(const Place& p);
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/core/platform/profiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ RecordMemEvent::RecordMemEvent(const void *ptr,
uint64_t peak_allocated = 0;
uint64_t current_reserved = 0; // 0 means keep the same as before
uint64_t peak_reserved = 0; // 0 means keep the same as before
if (phi::is_cpu_place(place) || phi::is_cuda_pinned_place(place)) {
if (phi::is_cpu_place(place) || phi::is_gpu_pinned_place(place)) {
if (RecordMemEvent::has_initialized["cpu"][place.GetDeviceId()] ==
false) {
RecordMemEvent::size_cache["cpu"][place.GetDeviceId()].push_back(
Expand Down Expand Up @@ -182,7 +182,7 @@ RecordMemEvent::RecordMemEvent(const void *ptr,
uint64_t peak_reserved = 0;
uint64_t current_allocated = 0; // 0 means keep the same as before
uint64_t peak_allocated = 0; // 0 means keep the same as before
if (phi::is_cpu_place(place) || phi::is_cuda_pinned_place(place)) {
if (phi::is_cpu_place(place) || phi::is_gpu_pinned_place(place)) {
if (RecordMemEvent::has_initialized["cpu"][place.GetDeviceId()] ==
false) {
RecordMemEvent::size_cache["cpu"][place.GetDeviceId()].push_back(
Expand Down Expand Up @@ -264,7 +264,7 @@ RecordMemEvent::RecordMemEvent(const void *ptr,
uint64_t peak_allocated = 0;
uint64_t current_reserved = 0; // 0 means keep the same as before
uint64_t peak_reserved = 0; // 0 means keep the same as before
if (phi::is_cpu_place(place) || phi::is_cuda_pinned_place(place)) {
if (phi::is_cpu_place(place) || phi::is_gpu_pinned_place(place)) {
if (RecordMemEvent::has_initialized["cpu"][place.GetDeviceId()] ==
false) {
RecordMemEvent::size_cache["cpu"][place.GetDeviceId()].push_back(
Expand Down Expand Up @@ -346,7 +346,7 @@ RecordMemEvent::RecordMemEvent(const void *ptr,
uint64_t peak_reserved = 0;
uint64_t current_allocated = 0; // 0 means keep the same as before
uint64_t peak_allocated = 0; // 0 means keep the same as before
if (phi::is_cpu_place(place) || phi::is_cuda_pinned_place(place)) {
if (phi::is_cpu_place(place) || phi::is_gpu_pinned_place(place)) {
if (RecordMemEvent::has_initialized["cpu"][place.GetDeviceId()] ==
false) {
RecordMemEvent::size_cache["cpu"][place.GetDeviceId()].push_back(
Expand Down
Loading