-
Notifications
You must be signed in to change notification settings - Fork 5.9k
[API compatibility] support windows api #76237
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 10 commits
ea05b6a
5cc6154
755f1d2
d109814
747eba5
86055ae
a7457d0
69a28f7
abc1991
032d325
62f3c7d
6da8c0c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -13,6 +13,7 @@ | |
| from __future__ import annotations | ||
|
|
||
| import math | ||
| import warnings | ||
| from typing import TYPE_CHECKING | ||
|
|
||
| import numpy as np | ||
|
|
@@ -21,9 +22,17 @@ | |
|
|
||
| if TYPE_CHECKING: | ||
| from paddle import Tensor | ||
| from paddle._typing import PlaceLike | ||
|
|
||
| from ..features.layers import _WindowLiteral | ||
|
|
||
| from paddle.base.framework import ( | ||
| _current_expected_place, | ||
| _get_paddle_place, | ||
| core, | ||
| in_dynamic_or_pir_mode, | ||
| ) | ||
|
|
||
|
|
||
| class WindowFunctionRegister: | ||
| def __init__(self): | ||
|
|
@@ -445,3 +454,287 @@ def get_window( | |
| params = (win_length, *args) | ||
| kwargs = {'sym': sym} | ||
| return winfunc(*params, dtype=dtype, **kwargs) | ||
|
|
||
|
|
||
| def _apply_window_postprocess( | ||
| w: Tensor, | ||
| *, | ||
| layout: str | None = None, | ||
| device: PlaceLike | None = None, | ||
| pin_memory: bool = False, | ||
| requires_grad: bool = False, | ||
| ) -> Tensor: | ||
| if layout is not None: | ||
| warnings.warn("layout only supports 'strided' in Paddle; ignored") | ||
|
|
||
| if in_dynamic_or_pir_mode(): | ||
| device = ( | ||
| _get_paddle_place(device) | ||
| if device is not None | ||
| else _current_expected_place() | ||
| ) | ||
| if ( | ||
| pin_memory | ||
| and paddle.in_dynamic_mode() | ||
| and device is not None | ||
| and not isinstance( | ||
| device, (core.CUDAPinnedPlace, core.XPUPinnedPlace) | ||
| ) | ||
| ): | ||
| if isinstance(device, core.CUDAPlace) or ( | ||
| isinstance(device, core.Place) and device.is_gpu_place() | ||
| ): | ||
| device = core.CUDAPinnedPlace() | ||
| elif isinstance(device, core.XPUPlace) or ( | ||
| isinstance(device, core.Place) and device.is_xpu_place() | ||
| ): | ||
| device = core.XPUPinnedPlace() | ||
| else: | ||
| raise RuntimeError( | ||
| f"Pinning memory is not supported for {device}" | ||
| ) | ||
|
|
||
| if pin_memory and paddle.in_dynamic_mode(): | ||
| w = w.pin_memory() | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里还需要吗,上面已经设置了pinnedplace
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
| if requires_grad is True: | ||
| w.stop_gradient = False | ||
| return w | ||
|
|
||
|
|
||
| def hamming_window( | ||
| window_length: int, | ||
| periodic: bool = True, | ||
| alpha: float = 0.54, | ||
| beta: float = 0.46, | ||
| *, | ||
| dtype: str = 'float64', | ||
| layout: str | None = None, | ||
| device: PlaceLike | None = None, | ||
| pin_memory: bool = False, | ||
| requires_grad: bool = False, | ||
| ): | ||
| """ | ||
| Compute a generalized Hamming window. | ||
|
|
||
| Args: | ||
| window_length (int): The size of the returned window. Must be positive. | ||
| periodic (bool, optional): If True, returns a window for use as a periodic function; if False, returns a symmetric window. Defaults to True. | ||
| alpha (float, optional): The coefficient α in the equation above. Defaults to 0.54. | ||
| beta (float, optional): The coefficient β in the equation above. Defaults to 0.46. | ||
| dtype (str, optional): The data type of the returned tensor. Defaults to 'float64'. | ||
| layout (str, optional): Only included for API consistency with PyTorch; ignored in Paddle. Defaults to None. | ||
| device(PlaceLike|None, optional): The desired device of returned tensor. | ||
| if None, uses the current device for the default tensor type (see paddle.device.set_device()). | ||
| device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None. | ||
| pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False | ||
| requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False. | ||
|
|
||
| Returns: | ||
| Tensor: A 1-D tensor of shape `(window_length,)` containing the Hamming window. | ||
|
|
||
| Examples: | ||
| .. code-block:: python | ||
|
|
||
| >>> import paddle | ||
|
|
||
| >>> win = paddle.hamming_window(400, requires_grad=True) | ||
| >>> win = paddle.hamming_window(256, alpha=0.5, beta=0.5) | ||
| """ | ||
| w0 = get_window('hamming', window_length, fftbins=periodic, dtype=dtype) | ||
| alpha0, beta0 = 0.54, 0.46 | ||
| B = beta / beta0 | ||
| A = alpha - B * alpha0 | ||
| w = A + B * w0 | ||
| return _apply_window_postprocess( | ||
| w, | ||
| layout=layout, | ||
| device=device, | ||
| pin_memory=pin_memory, | ||
| requires_grad=requires_grad, | ||
| ) | ||
|
|
||
|
|
||
| def hann_window( | ||
| window_length: int, | ||
| periodic: bool = True, | ||
| *, | ||
| dtype: str = 'float64', | ||
| layout: str | None = None, | ||
| device: PlaceLike | None = None, | ||
| pin_memory: bool = False, | ||
| requires_grad: bool = False, | ||
| ): | ||
| """ | ||
| Compute a Hann window. | ||
|
|
||
| Args: | ||
| window_length (int): The size of the returned window. Must be positive. | ||
| periodic (bool, optional): If True, returns a window for use as a periodic function; if False, returns a symmetric window. Defaults to True. | ||
| dtype (str, optional): The data type of the returned tensor. Defaults to 'float64'. | ||
| layout (str, optional): Only included for API consistency with PyTorch; ignored in Paddle. Defaults to None. | ||
| device(PlaceLike|None, optional): The desired device of returned tensor. | ||
| if None, uses the current device for the default tensor type (see paddle.device.set_device()). | ||
| device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None. | ||
| pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False | ||
| requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False. | ||
|
|
||
| Returns: | ||
| Tensor: A 1-D tensor of shape `(window_length,)` containing the Hann window. | ||
|
|
||
| Examples: | ||
| .. code-block:: python | ||
|
|
||
| >>> import paddle | ||
|
|
||
| >>> win = paddle.hann_window(512) | ||
| >>> win = paddle.hann_window(512, requires_grad=True) | ||
| """ | ||
| w = get_window('hann', window_length, fftbins=periodic, dtype=dtype) | ||
| return _apply_window_postprocess( | ||
| w, | ||
| layout=layout, | ||
| device=device, | ||
| pin_memory=pin_memory, | ||
| requires_grad=requires_grad, | ||
| ) | ||
|
|
||
|
|
||
| def kaiser_window( | ||
| window_length: int, | ||
| periodic: bool = True, | ||
| beta: float = 12.0, | ||
| *, | ||
| dtype: str = 'float64', | ||
| layout: str | None = None, | ||
| device: PlaceLike | None = None, | ||
| pin_memory: bool = False, | ||
| requires_grad: bool = False, | ||
| ): | ||
| """ | ||
| Compute a Kaiser window. | ||
|
|
||
| Args: | ||
| window_length (int): The size of the returned window. Must be positive. | ||
| periodic (bool, optional): If True, returns a window for use as a periodic function; if False, returns a symmetric window. Defaults to True. | ||
| beta (float, optional): Shape parameter for the window. Defaults to 12.0. | ||
| dtype (str, optional): The data type of the returned tensor. Defaults to 'float64'. | ||
| layout (str, optional): Only included for API consistency with PyTorch; ignored in Paddle. Defaults to None. | ||
| device(PlaceLike|None, optional): The desired device of returned tensor. | ||
| if None, uses the current device for the default tensor type (see paddle.device.set_device()). | ||
| device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None. | ||
| pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False | ||
| requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False. | ||
|
|
||
| Returns: | ||
| Tensor: A 1-D tensor of shape `(window_length,)` containing the Kaiser window. | ||
|
|
||
| Examples: | ||
| .. code-block:: python | ||
|
|
||
| >>> import paddle | ||
|
|
||
| >>> win = paddle.kaiser_window(400, beta=8.6) | ||
| >>> win = paddle.kaiser_window(400, requires_grad=True) | ||
| """ | ||
| w = get_window( | ||
| ('kaiser', beta), window_length, fftbins=periodic, dtype=dtype | ||
| ) | ||
| return _apply_window_postprocess( | ||
| w, | ||
| layout=layout, | ||
| device=device, | ||
| pin_memory=pin_memory, | ||
| requires_grad=requires_grad, | ||
| ) | ||
|
|
||
|
|
||
| def blackman_window( | ||
| window_length: int, | ||
| periodic: bool = True, | ||
| *, | ||
| dtype: str = 'float64', | ||
| layout: str | None = None, | ||
| device: PlaceLike | None = None, | ||
| pin_memory: bool = False, | ||
| requires_grad: bool = False, | ||
| ): | ||
| """ | ||
| Compute a Blackman window. | ||
|
|
||
| Args: | ||
| window_length (int): The size of the returned window. Must be positive. | ||
| periodic (bool, optional): If True, returns a window for use as a periodic function; if False, returns a symmetric window. Defaults to True. | ||
| dtype (str, optional): The data type of the returned tensor. Defaults to 'float64'. | ||
| layout (str, optional): Only included for API consistency with PyTorch; ignored in Paddle. Defaults to None. | ||
| device(PlaceLike|None, optional): The desired device of returned tensor. | ||
| if None, uses the current device for the default tensor type (see paddle.device.set_device()). | ||
| device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None. | ||
| pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False | ||
| requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False. | ||
|
|
||
| Returns: | ||
| Tensor: A 1-D tensor of shape `(window_length,)` containing the Blackman window. | ||
|
|
||
| Examples: | ||
| .. code-block:: python | ||
|
|
||
| >>> import paddle | ||
|
|
||
| >>> win = paddle.blackman_window(256) | ||
| >>> win = paddle.blackman_window(256, requires_grad=True) | ||
| """ | ||
| w = get_window('blackman', window_length, fftbins=periodic, dtype=dtype) | ||
| return _apply_window_postprocess( | ||
| w, | ||
| layout=layout, | ||
| device=device, | ||
| pin_memory=pin_memory, | ||
| requires_grad=requires_grad, | ||
| ) | ||
|
|
||
|
|
||
| def bartlett_window( | ||
| window_length: int, | ||
| periodic: bool = True, | ||
| *, | ||
| dtype: str = 'float64', | ||
| layout: str | None = None, | ||
| device: PlaceLike | None = None, | ||
| pin_memory: bool = False, | ||
| requires_grad: bool = False, | ||
| ): | ||
| """ | ||
| Compute a Bartlett window. | ||
|
|
||
| Args: | ||
| window_length (int): The size of the returned window. Must be positive. | ||
| periodic (bool, optional): If True, returns a window for use as a periodic function; if False, returns a symmetric window. Defaults to True. | ||
| dtype (str, optional): The data type of the returned tensor. Defaults to 'float64'. | ||
| layout (str, optional): Only included for API consistency with PyTorch; ignored in Paddle. Defaults to None. | ||
| device(PlaceLike|None, optional): The desired device of returned tensor. | ||
| if None, uses the current device for the default tensor type (see paddle.device.set_device()). | ||
| device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None. | ||
| pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False | ||
| requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False. | ||
|
|
||
| Returns: | ||
| Tensor: A 1-D tensor of shape `(window_length,)` containing the Bartlett window. | ||
|
|
||
| Examples: | ||
| .. code-block:: python | ||
|
|
||
| >>> import paddle | ||
|
|
||
| >>> n_fft = 512 | ||
| >>> win = paddle.bartlett_window(n_fft) | ||
|
|
||
| >>> win = paddle.bartlett_window(n_fft, requires_grad=True) | ||
| """ | ||
| w = get_window('bartlett', window_length, fftbins=periodic, dtype=dtype) | ||
| return _apply_window_postprocess( | ||
| w, | ||
| layout=layout, | ||
| device=device, | ||
| pin_memory=pin_memory, | ||
| requires_grad=requires_grad, | ||
| ) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -876,6 +876,7 @@ set_tests_properties(test_cross_entropy_loss PROPERTIES TIMEOUT 180) | |
| set_tests_properties(test_legacy_loss_args PROPERTIES TIMEOUT 10) | ||
| set_tests_properties(test_activation_nn_grad PROPERTIES TIMEOUT 250) | ||
| set_tests_properties(test_empty_op PROPERTIES TIMEOUT 120) | ||
| set_tests_properties(test_window PROPERTIES TIMEOUT 10) | ||
|
||
| set_tests_properties(test_elementwise_div_op PROPERTIES TIMEOUT 120) | ||
| set_tests_properties(test_multiclass_nms_op PROPERTIES TIMEOUT 120) | ||
| if(NOT WIN32) | ||
|
|
||

There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
你这后面没有对device做处理呢,paddle.full的kernel可以直接传递一个device,而这个并没有处理device。
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
抱歉漏掉了,已更正