Skip to content
7 changes: 7 additions & 0 deletions python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,13 @@ def new_init(self, *args, **kwargs):
is_autocast_enabled,
)
from .amp.auto_cast import autocast
from .audio.functional.window import ( # noqa: F401
bartlett_window,
blackman_window,
hamming_window,
hann_window,
kaiser_window,
)
from .autograd import (
enable_grad,
grad,
Expand Down
264 changes: 264 additions & 0 deletions python/paddle/audio/functional/window.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from __future__ import annotations

import math
import warnings
from typing import TYPE_CHECKING

import numpy as np
Expand Down Expand Up @@ -445,3 +446,266 @@ def get_window(
params = (win_length, *args)
kwargs = {'sym': sym}
return winfunc(*params, dtype=dtype, **kwargs)


def _apply_window_postprocess(
w: Tensor,
*,
layout: str | None = None,
device: str | None = None,
pin_memory: None | bool,
requires_grad: None | bool,
) -> Tensor:
if layout is not None:
warnings.warn("layout only supports 'strided' in Paddle; ignored")

# device: accept PlaceLike strings like 'cpu', 'gpu:0', 'cuda:0'
if device is not None:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个你可以直接调 _get_paddle_place

参考下paddle.full、random是如何处理这几个参数的,可以复制过来

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

好的

dev = str(device).lower()
if dev.startswith('cuda') or dev.startswith('gpu'):
idx = 0
if ':' in dev:
try:
idx = int(dev.split(':', 1)[1])
except ValueError:
idx = 0
w = w.cuda(idx)
elif dev == 'cpu':
w = w.cpu()

if pin_memory:
if w.place.is_cpu_place():
w = w.pin_memory()

if requires_grad is not None:
w.stop_gradient = not requires_grad
return w


def hamming_window(
window_length: int,
periodic: bool = True,
alpha: float = 0.54,
beta: float = 0.46,
*,
dtype: str = 'float64',
layout: str | None = None,
device: str | None = None,
pin_memory: None | bool = None,
requires_grad: bool = False,
):
"""
Compute a generalized Hamming window.

Args:
window_length (int): The size of the returned window. Must be positive.
periodic (bool, optional): If True, returns a window for use as a periodic function; if False, returns a symmetric window. Defaults to True.
alpha (float, optional): The coefficient α in the equation above. Defaults to 0.54.
beta (float, optional): The coefficient β in the equation above. Defaults to 0.46.
dtype (str, optional): The data type of the returned tensor. Defaults to 'float64'.
layout (str, optional): Only included for API consistency with PyTorch; ignored in Paddle. Defaults to None.
device (str, optional): The device to place the returned tensor on. Defaults to None (uses the default device).
pin_memory (bool, optional): If True, returned tensor would be allocated in the pinned memory else not. Works only for CPU tensors. Defaults to None.
requires_grad (bool, optional): If True, operations on the returned tensor will be tracked by autograd for gradient computation else not. Defaults to False.

Returns:
Tensor: A 1-D tensor of shape `(window_length,)` containing the Hamming window.

Examples:
.. code-block:: python

>>> import paddle

>>> win = paddle.hamming_window(400, requires_grad=True)
>>> win = paddle.hamming_window(256, alpha=0.5, beta=0.5)
"""
w0 = get_window('hamming', window_length, fftbins=periodic, dtype=dtype)
alpha0, beta0 = 0.54, 0.46
B = beta / beta0
A = alpha - B * alpha0
w = A + B * w0
return _apply_window_postprocess(
w,
layout=layout,
device=device,
pin_memory=pin_memory,
requires_grad=requires_grad,
)


def hann_window(
window_length: int,
periodic: bool = True,
*,
dtype: str = 'float64',
layout: str | None = None,
device: str | None = None,
pin_memory: None | bool = None,
requires_grad: bool = False,
):
"""
Compute a Hann window.

Args:
window_length (int): The size of the returned window. Must be positive.
periodic (bool, optional): If True, returns a window for use as a periodic function; if False, returns a symmetric window. Defaults to True.
dtype (str, optional): The data type of the returned tensor. Defaults to 'float64'.
layout (str, optional): Only included for API consistency with PyTorch; ignored in Paddle. Defaults to None.
device (str, optional): The device to place the returned tensor on. Defaults to None (uses the default device).
pin_memory (bool, optional): If True, returned tensor would be allocated in the pinned memory else not. Works only for CPU tensors. Defaults to None.
requires_grad (bool, optional): If True, operations on the returned tensor will be tracked by autograd for gradient computation else not. Defaults to False.

Returns:
Tensor: A 1-D tensor of shape `(window_length,)` containing the Hann window.

Examples:
.. code-block:: python

>>> import paddle

>>> win = paddle.hann_window(512)
>>> win = paddle.hann_window(512, requires_grad=True)
"""
w = get_window('hann', window_length, fftbins=periodic, dtype=dtype)
return _apply_window_postprocess(
w,
layout=layout,
device=device,
pin_memory=pin_memory,
requires_grad=requires_grad,
)


def kaiser_window(
window_length: int,
periodic: bool = True,
beta: float = 12.0,
*,
dtype: str = 'float64',
layout: str | None = None,
device: str | None = None,
pin_memory: None | bool = None,
requires_grad: bool = False,
):
"""
Compute a Kaiser window.

Args:
window_length (int): The size of the returned window. Must be positive.
periodic (bool, optional): If True, returns a window for use as a periodic function; if False, returns a symmetric window. Defaults to True.
beta (float, optional): Shape parameter for the window. Defaults to 12.0.
dtype (str, optional): The data type of the returned tensor. Defaults to 'float64'.
layout (str, optional): Only included for API consistency with PyTorch; ignored in Paddle. Defaults to None.
device (str, optional): The device to place the returned tensor on. Defaults to None (uses the default device).
pin_memory (bool, optional): If True, returned tensor would be allocated in the pinned memory else not. Works only for CPU tensors. Defaults to None.
requires_grad (bool, optional): If True, operations on the returned tensor will be tracked by autograd for gradient computation else not. Defaults to False.

Returns:
Tensor: A 1-D tensor of shape `(window_length,)` containing the Kaiser window.

Examples:
.. code-block:: python

>>> import paddle

>>> win = paddle.kaiser_window(400, beta=8.6)
>>> win = paddle.kaiser_window(400, requires_grad=True)
"""
w = get_window(
('kaiser', beta), window_length, fftbins=periodic, dtype=dtype
)
return _apply_window_postprocess(
w,
layout=layout,
device=device,
pin_memory=pin_memory,
requires_grad=requires_grad,
)


def blackman_window(
window_length: int,
periodic: bool = True,
*,
dtype: str = 'float64',
layout: str | None = None,
device: str | None = None,
pin_memory: None | bool = None,
requires_grad: bool = False,
):
"""
Compute a Blackman window.

Args:
window_length (int): The size of the returned window. Must be positive.
periodic (bool, optional): If True, returns a window for use as a periodic function; if False, returns a symmetric window. Defaults to True.
dtype (str, optional): The data type of the returned tensor. Defaults to 'float64'.
layout (str, optional): Only included for API consistency with PyTorch; ignored in Paddle. Defaults to None.
device (str, optional): The device to place the returned tensor on. Defaults to None (uses the default device).
pin_memory (bool, optional): If True, returned tensor would be allocated in the pinned memory else not. Works only for CPU tensors. Defaults to None.
requires_grad (bool, optional): If True, operations on the returned tensor will be tracked by autograd for gradient computation else not. Defaults to False.

Returns:
Tensor: A 1-D tensor of shape `(window_length,)` containing the Blackman window.

Examples:
.. code-block:: python

>>> import paddle

>>> win = paddle.blackman_window(256)
>>> win = paddle.blackman_window(256, requires_grad=True)
"""
w = get_window('blackman', window_length, fftbins=periodic, dtype=dtype)
return _apply_window_postprocess(
w,
layout=layout,
device=device,
pin_memory=pin_memory,
requires_grad=requires_grad,
)


def bartlett_window(
window_length: int,
periodic: bool = True,
*,
dtype: str = 'float64',
layout: str | None = None,
device: str | None = None,
pin_memory: None | bool = None,
requires_grad: bool = False,
):
"""
Compute a Bartlett window.

Args:
window_length (int): The size of the returned window. Must be positive.
periodic (bool, optional): If True, returns a window for use as a periodic function; if False, returns a symmetric window. Defaults to True.
dtype (str, optional): The data type of the returned tensor. Defaults to 'float64'.
layout (str, optional): Only included for API consistency with PyTorch; ignored in Paddle. Defaults to None.
device (str, optional): The device to place the returned tensor on. Defaults to None (uses the default device).
pin_memory (bool, optional): If True, returned tensor would be allocated in the pinned memory else not. Works only for CPU tensors. Defaults to None.
requires_grad (bool, optional): If True, operations on the returned tensor will be tracked by autograd for gradient computation else not. Defaults to False.

Returns:
Tensor: A 1-D tensor of shape `(window_length,)` containing the Bartlett window.

Examples:
.. code-block:: python

>>> import paddle

>>> n_fft = 512
>>> win = paddle.bartlett_window(n_fft)

>>> win = paddle.bartlett_window(n_fft, requires_grad=True)
"""
w = get_window('bartlett', window_length, fftbins=periodic, dtype=dtype)
return _apply_window_postprocess(
w,
layout=layout,
device=device,
pin_memory=pin_memory,
requires_grad=requires_grad,
)
1 change: 1 addition & 0 deletions test/legacy_test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -875,6 +875,7 @@ set_tests_properties(test_profiler PROPERTIES TIMEOUT 120)
set_tests_properties(test_cross_entropy_loss PROPERTIES TIMEOUT 180)
set_tests_properties(test_activation_nn_grad PROPERTIES TIMEOUT 250)
set_tests_properties(test_empty_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_align_torch_window_func PROPERTIES TIMEOUT 10)
set_tests_properties(test_elementwise_div_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_multiclass_nms_op PROPERTIES TIMEOUT 120)
if(NOT WIN32)
Expand Down
Loading
Loading