From e5b5c03ccb5f3a23d853f053fc5c780c0a90138c Mon Sep 17 00:00:00 2001 From: mbsantiago Date: Tue, 30 Apr 2024 07:59:18 -0600 Subject: [PATCH 1/7] adopted rye as dependency manager --- pyproject.toml | 38 ++---- requirements-dev.lock | 307 ++++++++++++++++++++++++++++++++++++++++++ requirements.lock | 127 +++++++++++++++++ 3 files changed, 445 insertions(+), 27 deletions(-) create mode 100644 requirements-dev.lock create mode 100644 requirements.lock diff --git a/pyproject.toml b/pyproject.toml index 0f77806..9d0772d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,9 +36,9 @@ all = [ requires = ["pdm-backend"] build-backend = "pdm.backend" -[tool.pdm] -[tool.pdm.dev-dependencies] -dev = [ +[tool] +rye = { dev-dependencies = [ + "icecream>=2.1.3", "pytest>=7.4.0", "coverage[toml]>=7.3.2", "black>=23.3.0", @@ -57,19 +57,7 @@ dev = [ "pytest-watch>=4.2.0", "pytest-testmon>=2.0.12", "html5lib>=1.1", -] - -[tool.docformatter] -style = "numpy" -wrap-summaries = 79 -wrap-descriptions = 79 - -[tool.black] -line-length = 79 - -[tool.isort] -profile = "black" -line_length = 79 +] } [tool.pytest.ini_options] addopts = "-vv" @@ -78,6 +66,13 @@ addopts = "-vv" line-length = 79 target-version = "py38" +[tool.ruff.format] +docstring-code-format = true +docstring-code-line-length = 60 + +[tool.ruff.lint] +select = ["E4", "E7", "E9", "F", "B", "Q", "I", "D"] + [tool.ruff.lint.pydocstyle] convention = "numpy" @@ -87,17 +82,6 @@ venv = ".venv" include = ["src", "tests"] verboseOutput = true -[tool.pydocstyle] -convention = "numpy" -match = "(?!test_).*\\.py" -match_dir = "(?!tests).*" - -[tool.mypy] -ignore_missing_imports = true -show_error_codes = true -show_error_code_links = true -disable_error_code = ["call-overload", "misc", "override"] - [tool.coverage.run] branch = true source = ["src/soundevent"] diff --git a/requirements-dev.lock b/requirements-dev.lock new file mode 100644 index 0000000..42a3053 --- /dev/null +++ b/requirements-dev.lock @@ -0,0 +1,307 @@ +# generated by rye +# use `rye lock` or `rye sync` to update this lockfile +# +# last locked with the following flags: +# pre: false +# features: [] +# all-features: true +# with-sources: false + +-e file:. +annotated-types==0.6.0 + # via pydantic +appdirs==1.4.4 + # via crowsetta +asttokens==2.4.1 + # via icecream +astunparse==1.6.3 + # via griffe +attrs==23.2.0 + # via crowsetta + # via hypothesis +babel==2.14.0 + # via mkdocs-material +birdsong-recognition-dataset==0.3.2.post1 + # via crowsetta +black==24.4.2 +cachetools==5.3.3 + # via tox +certifi==2024.2.2 + # via requests +cffi==1.16.0 + # via soundfile +chardet==5.2.0 + # via tox +charset-normalizer==3.3.2 + # via requests +click==8.1.7 + # via black + # via mkdocs + # via mkdocstrings +colorama==0.4.6 + # via griffe + # via icecream + # via mkdocs-material + # via pytest-watch + # via tox +contourpy==1.1.1 + # via matplotlib +coverage==7.5.0 + # via pytest-cov + # via pytest-testmon +crowsetta==4.0.0.post2 + # via soundevent +cycler==0.12.1 + # via matplotlib +cython==3.0.10 + # via soundevent +distlib==0.3.8 + # via virtualenv +dnspython==2.6.1 + # via email-validator +docopt==0.6.2 + # via pytest-watch +email-validator==2.1.1 + # via soundevent +evfuncs==0.3.5.post1 + # via crowsetta +exceptiongroup==1.2.1 + # via hypothesis + # via pytest +executing==2.0.1 + # via icecream +filelock==3.13.4 + # via tox + # via virtualenv +fonttools==4.51.0 + # via matplotlib +ghp-import==2.1.0 + # via mkdocs +griffe==0.44.0 + # via mkdocstrings-python +html5lib==1.1 +hypothesis==6.100.1 +icecream==2.1.3 +idna==3.7 + # via email-validator + # via requests +importlib-metadata==7.1.0 + # via markdown + # via mkdocs + # via mkdocs-get-deps + # via mkdocstrings + # via typeguard +importlib-resources==5.13.0 + # via crowsetta + # via matplotlib + # via soundevent +iniconfig==2.0.0 + # via pytest +isort==5.13.2 +jinja2==3.1.3 + # via mkdocs + # via mkdocs-material + # via mkdocstrings +joblib==1.4.0 + # via scikit-learn +kiwisolver==1.4.5 + # via matplotlib +markdown==3.6 + # via mkdocs + # via mkdocs-autorefs + # via mkdocs-material + # via mkdocstrings + # via pymdown-extensions +markupsafe==2.1.5 + # via jinja2 + # via mkdocs + # via mkdocs-autorefs + # via mkdocstrings +matplotlib==3.7.5 + # via soundevent +memory-profiler==0.61.0 +mergedeep==1.3.4 + # via mkdocs + # via mkdocs-get-deps +mkdocs==1.6.0 + # via mkdocs-autorefs + # via mkdocs-gallery + # via mkdocs-material + # via mkdocstrings +mkdocs-autorefs==1.0.1 + # via mkdocstrings +mkdocs-gallery==0.10.1 +mkdocs-get-deps==0.2.0 + # via mkdocs +mkdocs-material==9.5.19 + # via mkdocs-gallery +mkdocs-material-extensions==1.3.1 + # via mkdocs-material +mkdocstrings==0.24.3 + # via mkdocstrings-python +mkdocstrings-python==1.10.0 + # via mkdocstrings +multimethod==1.10 + # via pandera +mypy==1.10.0 +mypy-extensions==1.0.0 + # via black + # via mypy + # via typing-inspect +numpy==1.24.4 + # via birdsong-recognition-dataset + # via contourpy + # via crowsetta + # via evfuncs + # via matplotlib + # via pandas + # via pandera + # via scikit-learn + # via scipy + # via shapely + # via xarray +packaging==24.0 + # via black + # via matplotlib + # via mkdocs + # via mkdocs-gallery + # via pandera + # via pyproject-api + # via pytest + # via tox + # via xarray +paginate==0.5.6 + # via mkdocs-material +pandas==2.0.3 + # via crowsetta + # via pandera + # via xarray +pandera==0.18.3 + # via crowsetta +pathspec==0.12.1 + # via black + # via mkdocs +pillow==10.3.0 + # via matplotlib +platformdirs==4.2.1 + # via black + # via mkdocs-get-deps + # via mkdocstrings + # via tox + # via virtualenv +pluggy==1.5.0 + # via pytest + # via tox +psutil==5.9.8 + # via memory-profiler +pycparser==2.22 + # via cffi +pydantic==2.7.1 + # via pandera + # via soundevent +pydantic-core==2.18.2 + # via pydantic +pygments==2.17.2 + # via icecream + # via mkdocs-material +pymdown-extensions==10.8 + # via mkdocs-material + # via mkdocstrings +pyparsing==3.1.2 + # via matplotlib +pyproject-api==1.6.1 + # via tox +pytest==8.1.2 + # via pytest-cov + # via pytest-testmon + # via pytest-watch +pytest-cov==5.0.0 + # via pytest-cover +pytest-cover==3.0.0 + # via pytest-coverage +pytest-coverage==0.0 +pytest-testmon==2.1.1 +pytest-watch==4.2.0 +python-dateutil==2.9.0.post0 + # via ghp-import + # via matplotlib + # via pandas +pytz==2024.1 + # via babel + # via pandas +pyyaml==6.0.1 + # via mkdocs + # via mkdocs-get-deps + # via pymdown-extensions + # via pyyaml-env-tag +pyyaml-env-tag==0.1 + # via mkdocs +regex==2024.4.16 + # via mkdocs-material +requests==2.31.0 + # via mkdocs-material +ruff==0.4.2 +scikit-learn==1.3.2 + # via soundevent +scipy==1.10.1 + # via crowsetta + # via evfuncs + # via scikit-learn + # via soundevent +shapely==2.0.4 + # via soundevent +six==1.16.0 + # via asttokens + # via astunparse + # via html5lib + # via python-dateutil +sortedcontainers==2.4.0 + # via hypothesis +soundfile==0.12.1 + # via crowsetta + # via soundevent +threadpoolctl==3.4.0 + # via scikit-learn +tomli==2.0.1 + # via black + # via coverage + # via mypy + # via pyproject-api + # via pytest + # via tox +tox==4.15.0 +tqdm==4.66.2 + # via mkdocs-gallery +typeguard==4.2.1 + # via pandera +typing-extensions==4.11.0 + # via annotated-types + # via black + # via mkdocstrings + # via mypy + # via pydantic + # via pydantic-core + # via typeguard + # via typing-inspect +typing-inspect==0.9.0 + # via pandera +tzdata==2024.1 + # via pandas +urllib3==2.2.1 + # via requests +virtualenv==20.26.0 + # via tox +watchdog==4.0.0 + # via mkdocs + # via pytest-watch +webencodings==0.5.1 + # via html5lib +wheel==0.43.0 + # via astunparse +wrapt==1.16.0 + # via pandera +xarray==2023.1.0 + # via soundevent +zipp==3.18.1 + # via importlib-metadata + # via importlib-resources diff --git a/requirements.lock b/requirements.lock new file mode 100644 index 0000000..c551871 --- /dev/null +++ b/requirements.lock @@ -0,0 +1,127 @@ +# generated by rye +# use `rye lock` or `rye sync` to update this lockfile +# +# last locked with the following flags: +# pre: false +# features: [] +# all-features: true +# with-sources: false + +-e file:. +annotated-types==0.6.0 + # via pydantic +appdirs==1.4.4 + # via crowsetta +attrs==23.2.0 + # via crowsetta +birdsong-recognition-dataset==0.3.2.post1 + # via crowsetta +cffi==1.16.0 + # via soundfile +contourpy==1.1.1 + # via matplotlib +crowsetta==4.0.0.post2 + # via soundevent +cycler==0.12.1 + # via matplotlib +cython==3.0.10 + # via soundevent +dnspython==2.6.1 + # via email-validator +email-validator==2.1.1 + # via soundevent +evfuncs==0.3.5.post1 + # via crowsetta +fonttools==4.51.0 + # via matplotlib +idna==3.7 + # via email-validator +importlib-metadata==7.1.0 + # via typeguard +importlib-resources==5.13.0 + # via crowsetta + # via matplotlib + # via soundevent +joblib==1.4.0 + # via scikit-learn +kiwisolver==1.4.5 + # via matplotlib +matplotlib==3.7.5 + # via soundevent +multimethod==1.10 + # via pandera +mypy-extensions==1.0.0 + # via typing-inspect +numpy==1.24.4 + # via birdsong-recognition-dataset + # via contourpy + # via crowsetta + # via evfuncs + # via matplotlib + # via pandas + # via pandera + # via scikit-learn + # via scipy + # via shapely + # via xarray +packaging==24.0 + # via matplotlib + # via pandera + # via xarray +pandas==2.0.3 + # via crowsetta + # via pandera + # via xarray +pandera==0.18.3 + # via crowsetta +pillow==10.3.0 + # via matplotlib +pycparser==2.22 + # via cffi +pydantic==2.7.1 + # via pandera + # via soundevent +pydantic-core==2.18.2 + # via pydantic +pyparsing==3.1.2 + # via matplotlib +python-dateutil==2.9.0.post0 + # via matplotlib + # via pandas +pytz==2024.1 + # via pandas +scikit-learn==1.3.2 + # via soundevent +scipy==1.10.1 + # via crowsetta + # via evfuncs + # via scikit-learn + # via soundevent +shapely==2.0.4 + # via soundevent +six==1.16.0 + # via python-dateutil +soundfile==0.12.1 + # via crowsetta + # via soundevent +threadpoolctl==3.4.0 + # via scikit-learn +typeguard==4.2.1 + # via pandera +typing-extensions==4.11.0 + # via annotated-types + # via pydantic + # via pydantic-core + # via typeguard + # via typing-inspect +typing-inspect==0.9.0 + # via pandera +tzdata==2024.1 + # via pandas +wrapt==1.16.0 + # via pandera +xarray==2023.1.0 + # via soundevent +zipp==3.18.1 + # via importlib-metadata + # via importlib-resources From 3e1d1b9e2ce16fa28af9e55a956b5f7626f496e8 Mon Sep 17 00:00:00 2001 From: mbsantiago Date: Thu, 9 May 2024 15:00:42 +0100 Subject: [PATCH 2/7] Added full tests coverage to array functions --- .testmondata | Bin 237568 -> 0 bytes docs/data_schemas/index.md | 2 +- docs/reference/arrays.md | 30 ++ docs/user_guide/1_saving_and_loading.py | 10 +- docs/user_guide/example_dataset.json | 2 +- docs/user_guide/nips4b_plus_sample.json | 2 +- mkdocs.yml | 4 + src/soundevent/arrays/__init__.py | 50 ++ src/soundevent/arrays/attributes.py | 109 +++++ src/soundevent/arrays/dimensions.py | 609 ++++++++++++++++++++++++ src/soundevent/arrays/operations.py | 577 ++++++++++++++++++++++ src/soundevent/audio/io.py | 51 +- src/soundevent/audio/scaling.py | 23 +- src/soundevent/audio/spectrograms.py | 116 +++-- src/soundevent/geometry/__init__.py | 2 + src/soundevent/geometry/positions.py | 124 +++++ src/soundevent/types.py | 44 ++ tests/test_array/__init__.py | 0 tests/test_array/test_dimensions.py | 208 ++++++++ tests/test_array/test_operations.py | 442 +++++++++++++++++ tests/test_audio/test_audio.py | 16 +- tests/test_audio/test_spectrograms.py | 66 ++- 22 files changed, 2367 insertions(+), 120 deletions(-) delete mode 100644 .testmondata create mode 100644 docs/reference/arrays.md create mode 100644 src/soundevent/arrays/__init__.py create mode 100644 src/soundevent/arrays/attributes.py create mode 100644 src/soundevent/arrays/dimensions.py create mode 100644 src/soundevent/arrays/operations.py create mode 100644 src/soundevent/geometry/positions.py create mode 100644 src/soundevent/types.py create mode 100644 tests/test_array/__init__.py create mode 100644 tests/test_array/test_dimensions.py create mode 100644 tests/test_array/test_operations.py diff --git a/.testmondata b/.testmondata deleted file mode 100644 index 704d761aa1c873025881cb1ddf36e83288c96b5e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 237568 zcmeEv34l}O)%Hy?$z(E1a6v#o!lHo8KytGq7!XiU)?t_dVF-82OlC4kNHPNhDqcl# zU#iv>OT`82zH2vY{oHr!zE!PSm8xiMrCRkr=ic1O%{JU@_V4%IY1J$@&pGe&p7*@_ zIm?$V3??{BAQJ1L5*9l#gwSe{GHt2ZYrL}NiR$@=U+SaqfcmF0pfaE`ppXIH zHl%vk9G`~o35Hp2W1I^xo&I<)_%nZy^#_6>E==`s{&*)Pe#mX)nBGJ%5*EJm2jGkS z<}}Zn-7?RzV)24yE9O}iES@`WrDYHx%aX;GykL_uzX`I97D)|w1@b1JGSyr?%;(db z+MS^25SRH2d|FT}?-xUo8n?_oYT>;6x>zQeEE#{9kU$fGC!dK zD%q>yR|%Dz;Nl5?W<^H*bR^N4`zv#jFR^NKKO_7ct2X!7mPr|(i}f1r;9}8OFq{Bu zn|#XB`s!g`uV(8JLcM1EK&;ki?oS)iYBcvVIjS?`!NF@&u512Um)Bhud|K6|e~UFY zxFrOFat+d;lH6p~{^V1p)K(80Ia0H=NuV_OT=GA4X{04TOtYTs29To2vb1@@qS?)> z&`PrvM>O;td5ph@OLRt9Khw!E-SOU@xaFvYOODD$=}80u;^>7-X6I`_x!YFK6-|<~ z5FJOOB~xsATbbNcXBaknq=pb(;rbAIL8>qUes?(GF8u4nZOUZHT)Ri0j86J z3KE@W+}{Hpvy+SaxgeYKi`uj{mGcq&Ew-qfUs@(%3&v){8*8eExj~I-LLKLPI_HO` zwEEBa$bggqgCB6lV$E8TEiOzf{zIdP3E-*@M@ExdIV0=TfINf?Z;-4 z{%Lg@7wO+gzsPpAz(Ke#7>k5^IKiJbimn*pC!cb>zIxc?$(pT^9IiE8fFZ5$>EGpW zp}8G`W%lwp+08j!zX9&jTn=`)g3v*{2iqH?a14^yGf?1tSj$lha(RtNjLG+%+;^wz zn&xd~U!ljHe2PO?J#74V%~r7i%D$GptV*L*_C*;v9fY!Ye>}nU_@fllO?7bbJZ?GK z4{kZ^@8e=|q<3)VDl3Cv!;O{S3ctYj}-d z$S2`GC8*!M8 zlX|_mjW9oB-e$hZ+-8pAr|O@|fXaZ%fXaZ%fXaZ%fXaZ%fXaZ%fXaZ%!2eMO%n@Q* z&Z66LeLBpcYZ|&Z66U7kdQjXSr}{XS^mxgkPTger7qFmcx7qFENZphs@p)J^PCjm% zYIoV(ZjaOLb-ElLi(NNS_6+=zv`@8L?QW;X;j}vJ4!cdGTXv{`5~5(6B;FhArGQz5pAC?Tbaio>BH`%OBR_{0yXstb~B>zZQMsVz>o0$9%*9ve2 z;Q-wf*(c&+!=bumHTzx*!2N%XX)FQ%SN~K7R0dQAR0dQAR0dQAR0dQA2;%HD8Ur!% z(lH}gEY1@cTps~wl!K7QL%0|vLOub9OAp) z=yEE+;m;Z^OM{_MWP`;bl|D* zIR4tzXraQep#{5GY*V5?3a5dsQ=RZ(A`ycJ1nG1L4hrLIZ=;2w;F?Av5(&jE&Z%~I z&&6;v%z`^$Q{C`B0B>vpE}W&0v0MV4gOcH!IG1P&?{d1K^-?f^kH43ZlCrG-?fd zl8u(`;0E|A1Vx1RPDn>NDr6@ z6NlGOed6mTysr}D0?9f}e%JvM94^)mAW($lXCRP^1*wq5D~K16nxqe#;9eRR#zWwU z4y=hkO2AdE4umELgo__Fi6_0`SrpJOR3YjFuhOA_k#KL1@I48o7SE#tDB2o`iBcWG zFenBh;O9{)%u;Bx@T(}5=!{1>2EMTY5z$~&`bmgN;MHq32-XW#j9-R&J37#cK+9+x zt}BM10>F}^{Y(^XG6s+&KG)Au;Y5&ux2Q|VLYT^y5WqN3Osz;u8S*tQ1a6`=8T6Iisxfu=2fhwnibq3O#%L7NvEcJf2~Z4VsB z7kLn1n>J7hrW3%BGr@y2;jXk$R1&QaKHMa>P~@P$pXv$u;S{|;L3JR5jg}bK!EF?u zfX8Fp`d-{*2N`z!vNsSARfsde?qH%R#8I)ZNGNn;H3*BfE$ClfJ==7R_j0(5}SnM{eq_g9(rRTXn%GQ z2d>H_dMUW2)(0$1MPn2=Wg}RnAZJ4-2TmFs6TIF?#bOi`3q2kP-V}^RvEJSA4^$NH z|7#ldBjEq)pUQyBfXaZ%fXaZ%fXaZ%fXaZ%fXaZ%!2cx%&R%1{)xH{axeq4Y|GSs_ zz^wPz!@uol~3fhk6o8aGLc#PaXVg>wbc%!yqD^b-j8eS8V%y%@5Hd`Cs#5d}n%7Ds% z%7Ds%%7Ds%%7Ds%%7Ds%%7Dti|1Aba()t>Ho<@jbHx2)d5#*@UD>z0UpBn9-F2uR> zGjRAF@{Zsm@C95(=!y2ikNlWV#2*RxV;tO1VDTP?pYHc3j~G+~qivO8b;456evDS8 zeFqnTD=)Eru>_GQ+|t1-H?lI&aDRY{`7wJ7H2An;y_qtwIx0OWdz`-1_$>@(yo7{{P}NEqDT!7ClWTf&dmorsq~ zfrF0)1rCraK=rc0h$vflnUWTYP;83!)y^hc6I@SeBAqU))!I&6g8Tm(^LI$UNBj@I zRR2^4R0dQAR0dQAR0dQAR0dQAR0dQAR0dQAR0jU{FfiSwg~Dje*J6>p;(z}4luj*) z%7Ds%%7Ds%%7Ds%%7Ds%%7Ds%%7Ds%%D{ga1MW6`jlW$Jk1^9yp$Dg>jtxwM0~g)# zX>i~If-S=CzsK%$LfSyaO1T0YM|!Ou8|(0RIgW8KtjkKfD2}D!!-FyYKQZLLj15&? zl>wCjl>wCjl>wCjl>wCjl>wCjl>wCjl>wE3f*8Q~|LXj|Aj;H-RR&ZBR0dQAR0dQA zR0dQAR0dQAR0dQAR0jU*7{L4g)#lew@2RQy|K@kZkN)e5qbjU2pfaE`pfaE`pfaE` zpfaE`pfaE`pfaE`@PCbgDW^<^B2VPEz`w`w818?R5dZ%m9w+^y*TTPsI<*a>2xHxe z7zUuJfvszv{yy_z4Ug8H2%o8cDg!D5Dg!D5Dg!D5Dg!D5Dg!D5Dg$L>;IMAJ zzuh5^FFz2-JPa|=g`n#GXe`nN5zsM6J572V0nYCBxV?6-!x>=g4$A2yy`BK+aIrSh z%5t358Sroq@FRbI%aUiS?tfc*<;N4Q4qx@yvl~I{VXS^ydrLBQIui*8Fg85kS}E2O z@Y?MGyMyu2b~ob)xF{P%J6Vf?$ny?|*Ta$|>7cCv2g{MH zJHRk3#d#=~)nVsI#_M41@b6cN8~Wcn!T3G#Aqbef4wxGyj{={H%%8j%&awVD)e{YI z$ehbzC23p0&azgQ*Xr_66lry{c9(;r9ZrT~>~Y5EvA0KKxUT zzXMk5m$XmFyuU6eoTMZuD9JKbmZfbTj-%Lsm$nC(fWzwbdT6K9O0ty4&bmmO!$#ip zg5i;WhxU8ouG`<)6c7{~yIQ{@osI#ng|r0$$V|{16NFH9D@(f^oV0Co%vNPj??^i^M~R~e5?Mc45$pK45$pK45$pK45$pK z45$pK45$pK45$qJ|IdKErb?^P62Ba#dm5Cr)>LV<+Kt4`+RNbGG-H)Uqg^ydvrLWu z|NnnYsv4>cs0^qKs0^qKs0^qKs0^qKs0^qKs0^qKl*)h_|G!iQ)!(ZOs0^qKs0^qK zs0^qKs0^qKs0^qKs0^qK{C6;535J6h3M1HIA*Wg!Ex~vsmasUd+Q>#rSA-4)X^U;D zz0uMg+yGyN`nVXpcQjghs6;drN#OVPscv}B^+aO*O%Sak5DdXX)~PNVej4lGASy>x z1nKE!BTO7#JK-zgbraQ_h{U)+9Db1cu!9N#Mn8Op$l`+_kc$PWki{#APyeuq+nC_O z@n9s3=%BjzqeLti?m%b~N+AEJDcYatjD+D?6wn{-PwE7((xHHnaBq+BJqe|bQcO41 z0iYJrj$)$t1{4#uk5XZliifzk+TX)QHiXm4 zU=?8d5eXFsk@#FPtV$CV4T4}7euSYiEN&4RUpFz4K8r(qTzU_>*wIYHr|}6cMs;BM zQy=29JwUEtPss;2Pzk0Jz>qV6`X-3~!3jkrr+S1BH(5w4u!HVwf;R=D zQLJ}2`~wyB-{F{4Ema0o22=)A22=)A22=)A22=)A2Fl8SN%Isjkzfe(PV-{j**a&# zzZ$M>IJSOI{YCX;-Jj~#*Nv$CeeJs1YSZ1Oc9XGYSIu1GZsWz;Ek=(fR{fUtwdxb9 zCm5bKL=8iT-|BDIw`oRHeMc}=zo=TE`%pU`zzIT(U6>IFv-Hr9n%XrOQ!+EcW@*6p zO}yvbAK1%x6MwFN)^9$r_ebKb3ILzH%WEfI(S1nVT~^!p3BW2qM#GaY?a}BfU>mn? zdt&N=@9jTNqgkO}kR94pjK~QqU$91n;a1sz%Vx9Ly);WQ5YN=@f~k1G&af`Z;qllZ z*rN+zFq~WS>{{v8O@o<12uTjvKMpw(5_|cCSLeE(_w!FdXY-ovwh*!#dq8NRyXDFIvkYu&5Or=_14k%L7RbgrP@$DbDJk1Ekv)plvpqgK+UAM2q)&$$*WbJR}!z zI!Fje@A&7>FW}Dnd{~3CUB7ytz_7y+??!Ytm@q`Ow+A48y@O=z5a*t}BcOR-eI!8jNfOIt~f z^w?;|4L0qdIfu=|z<9(#?>cAL!H#DgzdN)|zbM<_L}*#Sakta!a=AQU$Rq^acRE}U zu^j^Q+c?tU@p7yUq=5_@ZJyP2&fgaP@d9EirJvJ2Be%*iY6EBr(e^2~-APh5kAs9* z>{cgBlTIYp1|urY;dXFNo5M@*{L!}H@E1Ve$ z=0*-8AJAYt;83kD2C9*?(y%9t85_WIJT|M9L!!XFvbMusZtMB=cK);*m)|i-d)+1gNcH@2Z zy~JC_*6OdcYYfjRnhUJeFVF5c2hujkI(wR>7%QYM@OlGQ3UV~CHn)?sQP71t9X1=| zp*&8K0|UD9%WrR+@brXRH{7%8T+NVmkbxs>LNx$ibOw$CzU%s@kcDIS;3+;}^B8u< z@jwDpI|-9OH&{8OW$;3t4@mvsbaNgUYj9RD1^6exIG|{r)2%}nPQU8Yw}`(Eu6@zq zUoU#-Z-RiAD;(`{U@IG zmupCXXHmGfm3Rr0e=l0Uf`_KQzVq>3s zmAFn!<&v>PmKv2#69x#=m`D$tsQ|4#cBj+n_A*w=6@bx`*Xpsc*xz!{x3exQ?V>p7 z;XSsm{}P+pTX&<2xSH4K``hPcdgp8jFf4;SCR98g3 z@jzM;FLY~^7ly(Pj&#$ITgG9#pfq-cIh1_9yf9+ zvAr9{Dl-PrpofE_It*3{2T4-wq#YVK!~pDix9N@!DMk^Cc?t$3lIjC?S+9WWA(ZKHUPO_ zymsi0V1VcGaP9!-bvayClJvg#+;Kgd&YY^f{c8HjMNjX(@1s|G{I5TB>Q3UyP7szg z-jGpSCSk-MD%4{w(YY;r}-ueb&)j)Dc$9x%R!gF zWA5+XCtj;t4N|g(-AP9iVYql491TuIz`PtbnqoW-@cC|!6_x?5cA9o_9+HJ*QZ`S3 zb5Q}-;||!Kzo9#L>#Zy5+!dneGyc8pLLK~NZ~;Yo<)hCX+9dRfpMY1ckc=4%j2=P8 zo{Eedo;SVngfMcb9$f#T``*{zS@Z^0f~Kk>`XBD=dRNe2S0Vka*T2mR`V;G+JINYa z$$Vb2!zx>MLj3R(cv-=n|M4dfv|{TYdx-y82J_V1UIb1&Vm~OPog-~9aA#qOfws{w zho`*^<)KNAvB4miPq&^Qw4*c+ zc6wmb5LN_WnS^$@ou7}II``yZlirwG0bJ(&S?>(5uRE6*M{FkQ+_fLoo>$vw`jct1 zX>`paH9a*$jlVN;#`@}es@n`d8g4Kgr~g=gw%%R!Ue(sBsk%3HC+QB=KC0z3Uu!Pc zG!ZWYh_e4l?RurtKr9ME_KDqVeTbr^_w6LKR-Rvq`;5ld<}(24YEL7H>jLCkN+Z`cF*jDc>$snhvymnt@oIhb&puNbBs< z`nl5e_O_{@e;)X>W-T>WrOkVFlYwZ<28Pmd}>jvtnjg9*PMe8zV}7an|AV#?Pp zSfhZHp|H;dtA_zt2y;aEfh-_?hUEXeFpL=;RjV;E`T%==l+7K0 zT?QD;z!EAfr8zk#tXGg;*h6D!Z_8w*D&q>SQk9!0frrV%ZKj7&>Rnb(ERcp%BM025 zlm_ZBFoqdfO;F@n+<3{ip`vs{=J-Pm#ENWAL=hRaC~Y$--Ii*dU?3I&w#-CCJ4WY) zuV9A~`_+4oexmoamm`yHHp=a_+AzDXgQV;n>`Vq+9yckZ;f0MEx0Q0xHaFcmUODay z2lhOm6nD)y=-cu&l(dZ&`Sy>}Eu+@4%C#om9(9aTty?Y1anEo}=~s+9Mmg?f*PILM zqzY^A5aqbz{%7=W$lIuKPgilnK1eww zXJ0vbk5WpO3^fqbKy%T3!upkz_C7P6y_I4;0k0j-HNXlxY`8J7A`U$)?S$oj=m!E6 z?2f?7CD(EwSVp$57kqNk9fLQP4uU8>I0C82;?nFu{jbV^%0R^#!1(_Yi5m&r`ES_U zaA||D{;&1t!`c5g>Q1RUy!OS~O||1p513}uJXJHp_>?hjY^c7j+F^Lf(5C-Xf2Mvy z)o-erbzkYu(tfACP&-C*uV#%#2XN*8Yvc8zTbB(1WwQj?nWSNk#)S`sPYc1=4-QSi zp*qe3yY5bgf?Z2EL`AzQbO%~*&?j*YpvbaGER#H94lQyZ@< zK;>PRUh{@hl>@vMREFWaj0|ZC-xFcEkU!QN#_{lsQ+7Cie#??)tL}eWd*#OyuEv@B zs=safl%?PRy4y;+;izN4%DO!+*qE}zt}AN|PytvIcDPw5=^!J#rVMKZ!(X}R7Nu(i z9XtUN2l^^3U;(QW_Z?&GQ!0tC+fq#|;j6MHMY8EP72yIvNWw(u6|LC@VFps==hWUz9hHuhcyYQ*?fBW9`;Wwj>J*M%d zhYx_mjZ`4O!7Tt5*4XWE-qYcPlV1VK1E(Be;>tPQc8|^B2%J=Wm0mFRv6qxq>Dm)P zPHukln$8OI&o(Q??>M1^`kOjo=S52EuVZrwf_84Wd8<-E%qD2YIhXyURT`O@^xZ#< zegD&YT>qTBy{(Yy_xD3-(r36*iONT>G_B3y<^ylewL*GA7Xr#}9i>xlc@#VCj8F z^lVdFZA`oXOE1>ZwZ@=&a$9fmp06D`C@p1cD9c<^ zOOMjgBJjulj<1K9JhhLZ?|IHPUX1WGln0a7`HCV%+w6AD4X-8;ouh-XYsQrX^QM26GSp9f)i{UwgSHDw#NYz8NwyO5p^UPP97d3p` za9P8g`uFR3?Q+dx&2_rlstl&bO$n2w=Ifd(YmPF0V!S}B(>$jMYwC%ci(3#7uk1-9 zR)VPhVBa{K;T9;HdBG@qN~d&MmTT&lZ-BbNcmbg&a$Y?zO#x|0pc&r@rOP_mTpBn| zY;jVHJ+dT-cwrr0G|oJ(UAmx)#}jXSWf}4VabeCwv@!v*oo}T(-qH?vlOOKkIC9uP za%mc;Pc{lJ;j>OS*AD9+aFaCPbvxnarQ6BCv0PXV@jAU$SHR0&wk*1e2^KAIz+27A<1N^KTNuYa?_=t_TbGqP`XD+>SPb-ujCCPaVCFiA8C_?F?q|+|M z^9RjnibBU`^$az@#+&Q1OFigD$w6ZeVS(S@euhU>skeL%4zFjRDgz^0~br# zZBC3)0nsEFl7XNRZmZM7k{+v#g!_SBxFWmtn9Jvnd3)ocyYALZCtYeav7DMP-_BD_vNt$m?ZoZ`q1W+**~k$He){iAzFT4v21GNW~e>F>?Q zwh=c~P{4;BlhzQ|v_J_Qi5hu0rJ z=`STvZi!^4J%VW|Fb{bv27s#oAfK%?$$SV8b=|Ej%2J5%$K=6nrFyaT|B|C$?( zL=C@RwULnK`^lr7;t6%xO6&CL_=G=cD(4 z82tAt8aNDwTjYbT5>~=R0`sIsjiW8Nr8@ z16<(2$NmFQi4~K|1}ji8YvxLXZJmg)BZ|>B8VQCISuD(aid_e|OAkZb`Vw&k0*<8a zvLNE2X>G!vg$wjw$60`x@bItR+j=Ql=|omH}62WaV5hE zMLurzci5U*$0A&z)jf#OAM?%Ch`ZE+xP?}CCazHMnaNbm5mDVpm=j7N+|(3567bo}{eJ^|dVj>X6@(8N zfyA*&Qhti1PR0Wu=Qjo?q zptN$>s9#ocL7-?$r_Bz5n056irYXHpvLmM%SDGY4jr2reMCX@(E4qW!O+nvA&jl`! zaC{v~7+W?8c=ODccwyUAz+P30*kjVM(E`P$oZS|%{Yn-ZMym|QVJr7Z;NO`2VpY8|Y0Scx)zIDSd`x>jmP-t~P zi8AOCW=>pBGy&*Ro%`r(PZ$FD?RvzYSb!y^mnPT|hU{cLvhuM-{>! zQ8avj3WnmMr$!0f#$a5qsZ=>;eQ=r;1g+MgphCv zIp>zZR%bQD4)Mjl$P|#Ndjhr`T4Jx{ELO~gRs!&1kU{5q88_N0^|AjLE+`kaP zr=;_*5FD1LF&iQb3!pd0xej(sLPrAKppP=OV^y>shXV49?DJGbvf}{`;2Jmfr+A*; zFYdAX=-FRCjjSA-gB6+K$SwdS+kEm99ADs221oWm=&hlh3%XO#QTTe_A2J=lS1dsA zjOm^da4y-S!7K-IboN0n#|C@izP{z!lK{4JKElpSCtDFLo5Y2Ik)Sbzj=_XHa>@nX_TU;A|}9zb3*7m+jOJcS?+^!VY3%x0Fhoe>4} z_Bn{2F)S()JpqGCN%@%+`!2#CRX+G+e<-ki^ny4JD5A3weo=bGN`9a$GqwT$x%HA0 zM}ow)vruBjtU;0}n}^A+xX3zh>{W4PLH3~Fc_J}i=l8Fm+ide8e#Q`~V0;V__N z&w+s5Is>uuxMLZagR?HM5{{t5>6~`eYk<4%2*k~p97{SEhKsYE5cDT7c*l;#7kHsN z0TNbCM+q4d*n%a9T}!fbzRoKeTTpp#Ih2>xqPNor>M7WpFfNo~&+QyikIO?{l_OPyzclr?sglF_Y5 zipzdHc$1PnC^oF_hbP>OWgyE@hF%zrz`%hL7g?Zn!K6qaKhSk|9ZIp~kb{jzy?Ff4rPmmcyRHTIt8K}yhyQhe$4QM42YbsvVr<^xjvkU^XC`F?lh zA3#)e8j8y3&x#QR^Lyy+rIH9TaM&o@t6TN2_E{jYYbr{dU3%g2NEDX&=l}TED9Bpb zgt9W+$zY|+vo|g)#q49gv;r~)-gFULv|5kmbMrl#Yjxk5L*}uX;~QQl9&fm?VL^ko z{(jwU+B52-^%J!(*1b(^srz}|VqHg_5%L1YYbTrDHC<%u)749$bfcA(4i}aMd&HF$aVea1_-Tl0$8u3vN!vj65!*}AOk5C2vUT~f zpxFV+G|O18kxVMNUPD}3f>Po_g|kUC&+AJuPZF#uS-7307H(iMir7VzVCtX|E^LW~ z`xOH1r@`i%j3z0Y1qmv9L6*!V03|hASE@o1-^o9G zIH&h0$py0*B!3Fzod z?MXJ$Rf8SdIGkI8BbZ4oKVAu-Ho2cqts>#8WEgGPYA&o|DmnUO#07ydg#ahTiD#;^ z&YIOO1HAEt;0bHBsY7O22f%8O&&eeq<~k!~Xa&$FIjC&zD?b95ngS;ecMW<#uzo4P zjx7@`F1yra(J9ljsIBv3*Mf>O^Qf2?Q8Gk4**56L_WU-0pH?t@vK2^M!r8mdM=u84 z(WT-J?AFf1VehbF;nGhHEI@ZOqI4`_1OFKGrxS|>lWly9B~U8VN)I#tQ0BS+e5=<3?7x~giRkV z6en&TY_cSFUMYVezRxi10Rtpn9IUj1Vx_E4uo&V6bUI`5ly*?0+~z_&JJ=$zQoxk0 z5YLoDL7AZt&-d12$|SBk9>{X$@h%y^ z!i9T0*Cp&IP!P;M=I7v4ws5X%LlDl?MFCYf(7%a`MdBNZpWVXnU7F+gi=X0%D9I_# zsl`uCM4aS=qO(+el4~T_;=H7|WhvBWV5&r1S^NY?0um{T7eB#~fTT&+xuxqQ(zLl$ zmZX{TF9&njIqW=j_#X}wr)_mzQ7TQTdEmBEX_D3xmY34h29^`n7ry|6TT0SWTW={X zO}3cOR!U1loMcP8xHNqRT3UB0CM2#TBz@7(@cjQI;z~UKzp&vei0F4@{Rj1D!3}_y z;Re9?+J|bpY7a2|))X*lYJO33r133dpV3%-UbV@v%W#DLJ$;8>Uv*Q}0>}WcQD@O! zuWi)445$D11HjV%;yh$Wi=;=amZzA-7yv>Ckd?6f;cK?c8+vZ6_pTY&-gr1Q^O>JN zvgdr8%MM{#-9iGW0AvPmdMVQ8;jEB~4}!A0Ntz=eshX4V^N=)^5ygWssmlYY zD+Z5z^wHg;j=bYX7bHQaZ2>#WLS|J+D^5`mVjpr1K>BwFWFKJcc1ZTfxZQppvZzg4 zC`ICy1^w7pFb}0cL0WjosD|lT2H%rC=xO~~i*6k|#lh0{fXi;BAv-+@S>(JdMU#x% zMv@R}2C`Pu0hVNKE^Y%4xzr-*fv*d%kQ+NTpN^H1SbMWpX{Bu7A>UfrBp-3f-OnqP zyn=_UY{vnW8Ob38laNmR!C-yq)+^}e?EkujVK@jT=yU~a9@gn*DKA4qin;(~c!aRO zq}}SV2iz3HIyUf-kgaSb+IyLPqS6wz@Q|l1U+x~rImp0GJSfon&;Q0^W1oA~>*h$8 zoAz=}rwuN0uwcy|mZK?1(go&11+27#b9-rMH;|&O;C9n<(l-XB?WT=~WNrD48wzkQkbzSk4WRtX)pXyXbP;yLd?JwgTKiM$<@v zPD>?8KHB4H@b*MEsk?XA1f<$+*WPPdn_Qmz2U zkzT9E2AQtC9LG2q)@7w#6vxu=Ay4p-_N{-e0uedWnqP9~_HRGZzeJ9ke`n{qz8|Oj zv8VN(FJHU#juVd?xfF8H*dfv=<77z}XM@(_fm}3RnsP#lN;h;6q??A$!(rRNLx#Ar znTdm%8&PT|-8|%wYa6sOxiAyKSr>5o;&aFKY&vtQ_V%mkCl@`v`@WA}>G5Osbm;ZG zR)-ZbheP&oNFC~%yQFOqz)>h3*1+rC8dqd&qIc} zf?d)T?_T}6QkS%rhg@^**;Xm^4ymz1@}eOS_vXc8zj|x!ad(n-JA`d>L#kGXgJnFB z6ea*U*R9aDAYYNwW_3d-S_dRB_4AN(uHYhAe^z&h(jw42 zr!ayf?GES*Z4NIas0MFIasdYgY4ofN1+Cg^w^2@~)ziZ7uN2l~8){Mt>*x2&87dt# z&InQ~MY5LSc`6ON+rke~s`!MHx3(!&d?jxNSTH=nxk_cRzmUYo$uJ zE>(_u|IUwIQjEJqIc{I~D?z2Ws}~nQ^1Y`Ya&JK-FH)}b4JSYFd!w;?`{9K> zigAxqj{Crp#8XOf+l~d2^Gpo0I=_MlUZ7mz_omIbP^rSaZa!$7$Xfl#J!a0A#3Emq zYo&9Ij$>fDn>#tF?82<r|faL1`h{<`{@$vd4KKL^fN-G&)k68>Ojt zT8>gW&-%mC?;ch<&x+3mPGIRtF(>k5Ez(?3%DbVIBmXSmC}%}vaFm&9NHJGFrCbgD zVEhE7T=|d8T780Q#R;lB5JyIHtbO3y=an|YwwX{FO5G&(MM{_SyJvvrK)7r&y>HDW zM}nEMfA;sKqm-6{Kkx`l1bSeRMpdzl($m4az>;nb4)S^zDM`5Y+?_Wl^)i$fw#afC zcbXW?N0AwSUTH21#$zBl^4Lkv*ed8XZ+dIk6`#ES#M7kH8*ox?&Q023{oMwM-(3y{ zmZ}|YigLL~r^m)|G|RE8-LM3nSBN|q2Y^3+LwE4jTUXRMY&6AqAfczj4O^ug2YJ*X z-KPfbI&-tDb6j6;At4*1rfD047vD zQ#DGr6CwcKq4nVZG!JTyCY~w%2tdIIn(l&sqW+*51~%#=kcUv_S!BC@LjREaWQNl1 zDESEv>A-;hfg@%TxBdqN<9PvM*MC4TZr~AD{Raf&1~c)?1k|yV*hQ$*%GI%SN{6_m zVHtsO3XA${)h}uvB|kwbtThU%O95TLb1R(@VM1q2rRuK$EUbwvFF!0Ms-?;gi_^#j z<%h*JmSq)yg>|f!^26f#=Bo0;V&>_N^26d%#c}0_#pSD|<%h*I5#r35{<%h-X zg!b~o;zIYz^26fF;F9vg;wtot^21_IbFTcbm^Z$&{IHnOoGCvnCSYeP01N5H*Oea@ zS6dgB9~Sp#+R6`We_+n&Q2AkT%Wie~Va<+5uIMg5Yy*Ga?qK<0aX%E}hEGvAcw49U zw~vy~AyUJ&)WM_FIcudSn(#dKXYG&QN!(RI0Z%uq{5Nq&1qJN1?jBFvRzU%H&W*Sn z_x~G-5MjRAyvl57xDzq}d{uu{{Q_74xVCOt?GLrF+CxlFnKqefYVNCHjb9mmVO&`K zP4&*|#fETfSS56;#*HV-vt^sZ$ZdnL-fP78sSz{ zQqRy~MvU^i?4**^6tqmCf|B01|SHYfS4IEJC#lRlDBFp zynh#o$AdHkIKX89$j$*F2>b(W+PCHL;{&Mv=2Fy`Zh9qIj-0m_LymHr5Hlk;P(kXa z9u{v9_5`6Q!d*iME#W(P*#B$+G0Xc=ObM1OzlG!Lp^m;EK%*NGG;a*k0sgYnTg0ef z93oOAW3yr;WiA$jJE;&_G{!|S`3{DEjdehDec^<^FZ9*)w?HDh0VQSx%pJVx6mad* z7c{)=0V$n*C?z8deu`#sKOy&eQPQMR4-1#HX(~j8;i83~VjyJ-#q|45zVMzyK+@7) zlvJM{lXdX&q+PF*d4k9{hkXr*TmlixWKTlSgGkg5!8#nxvp}KiGUSDjF(LQ3RFlHTt@)J!vC=F6&0n zVmo3f7N}V7-3R~bDunDR1ybyYr8){u5i%Y{g9uqEFAG88V+@z{vYp1c!?6&Zr4#}i z_rOJ6-`P7)c>*Apb|7TYrYk@uJI*lM{}LcZ0*IKATd5dtfxV9ue+9x+q#&d;huemJ zj8Tr3a44`?AAwaXRc!d8Jy?QeY$-G+n{(2ICZ@ki17tIUki|B=fdQ*%(mhYkLf^>O z&skaXwpt|}lkKOo`^3wz z-0h{5J86AM$f*6PYXGuk4MLWXD@%+PAPw-NJEvhf_TXwn%^2VhKJ!kF!g38S#qm8G z5yF@;kkp%jcw;^`vU3!OT7Eo=DxudFdzoaL?AmhOE&y#_g`mY&9**$v9(|JOVBOh> zxojn37Ta@4n4*n!&H8l$A=^rUL>o&v{gYpOeLR-DwWN~cbZ;Plq_69I3g=6vs*yyK zW(-j^x$3#9&6+XVv6`1Oaos(-1=_c@edf!}(;A*@=xnI2zZp&i{8)E?-TJyQwSTJJ zRy*DFchk+LcGH11Pt}}SGu!y1@gC!PV^j4z)t6K+ggXMiHY5xO>bL8gK)~rBDE}V> zGav}vCHWs9P8{UB%6^pq({$!G!e(m5hQ3c7|oeZ!A=oR7#!USuvCpVFkkwz6^#|VrCR9 z+K6%hVd)Oe3U*n9MyUdhoCAhYEjNsP!bKyY{tlQB3b9kMY~xDAz+#QZ@JDHBISSJw zRiX*A0ZCV2iJ-z#ZhqMs zr3%=T6%=frL?z({*g^|Dv~(CO++kVKS_{@DRkVqPyCI=ype z%%br#6ZY9~PBL4mAIzuvAc^dz20%cb5x)cnM>CUszVt~ZH+cl4s$kk6RWQiJLh-`J z>L3(jnS4Z+s)9lA2bodyhPjPV$m|S@bHdJCa*>qy`5=T9Cn@Jf;6_u`%fxtMLsj*g z)wJQ0hOzbMLEQgG>uA{dzoOPVBua&OEo_ zEMkLBSJmI}an(qjUt`lQkp5>_r2j&Hq23PQx&Kz{SEL{Pkg8l9N6P}MQFbc}_cc6$ zK)^{uR0GE9_CPoxlC;`vq>~{b#*ozpF?25dO84iQ5s$TN4r$k~8VnnDuweN#4$IVF zaFo+-_j)L|Enu^fR=bDtl2(tCa#|@j>vB_8+Us;sR?2yD^9vIVw=c758Y-jt`n}(s zuhCXY^Zu1588kJOLZ{dHLK3^x-r5Ros-IqUd(R4T(L{tXdGG>%ZV==a9j?e9P)K5!eV2!>l-M410 zNTeeEc^ytSX=4N6pTUhnOf0*bgK$_bl5~)aopVwY&9GL^^X-Vyt;;)RF4r_w2>h4C z zAn@f?2Q^X4JU{BLz47Y5pZEPi#8Z`(@y0`^S%`-#E913~R@M+tS60T0KWphAo~W#h z`@TQ5hj^y4GH%~f^CIzVWo2B}fBzZ8LzR_r)(0LZ@nmIX^p3S^h({|cqshOjfq1;K zGU`q`eIfB!Wn~y{TG~Q90v@gq@7xKOK+_KCXS%oC2R943=zIPlSM(C!SE?KMrrv#+ zW<+I%n+w=$-_RUbDb4RC=iaNSuM|4-#5w0`>csf}+X(aN<_QhIYM5XDY<;MHXx(jf zi)z2By|i|Q=`GVX6It_S&1p4u;}b@<`dirFpJI5_KxJX1z9u z!9YVSf2W)CKn!cn>LRW1Pk?b)DVt{v53$fzL39V%zW4yTa78#0g;P}tSQo(95o3RA zx$v_`T_@O~f3|rY5Z2$pcpYB1i=`Y4_mJY0_i=8a?VSU3SvxbU@06fQ#Y00m^E1vUZ5AZ?ikyHan~nd12NOAX(ba zL-4eWxXJsD|8nK6kKCd(<_pb37_}AYv70ho0Ptr#IVQc*4hBCr)B1c#@}g+Mg6VuavWMY#}&wpNS~ z9IPl80)f_w5rQK-kuCNKEQ zjIlE}zxl<7+XaEB9Bzj6&@DU!SIeITNP^w@${#15yW;7qSug3f(=HbSc?VpOtjNl^ zJebMG!@1m~2T~(JY7VD|wl?z+T5VqXKuUJCM@)19+`8+*3o!bXxcUd<4=0WJ zmpGclF_nj8SSL-94$A3Z0rUuEpqEU0_im-2?K}iPo0HvOMQuezcm8NwaQF|FzWLWb zx97%-zTI!xji){uU>VA7cakt_bda#Y1cMBkbW$D?5?Qz)dW_q_IU(3Ty^@E(XS2Z& zGR90IelP-Km&0Ce>-qI|{IZfZ~(P` zeb_&mCry5zcDrDRNTPC-)9rG5NSbtc0&b4B+N@9j$PMGAVOI`v0J*Y8U4w6s;$3a; zL*I5Pz1z+3&Os7~WG0fZ%$hr7M(YmK-~xT&F~{)hT)^|rdV>Q1k-)xKGKdad2`uIW6J7w`YqG#D>8&Z&N{ z`kZRF;V*`eVL$zEAqU_ARS#AzuOf7}=z4U+wU1~c+L4;aG%<~t_;rEn00lhW%r^~w z9YSe><~XE!_R-G<8$tLwz7av2(l2MpJg-vF?5xi|_QV-h5Yfy}8C-A4y~q#1!bO5e z43jUdmM)tlFU5r3+fTS+$RCcNrQKkWu3haosA0+<;06mnxin1i&aHSWCbX~%@3JoE zCn09hSAyWi8D3$Lt~9Oh*^XD5mhux3wfO6hNNVy%Q1|2K;Z5FV{xF0r`kIk=*~ZzF zymvN#qys=_@`qLvHNm{bl~MBpxsiQ;-?S?Td*(~|38j=%ya|^q=lUgUw+hsaF9lM( zhMBa**rMA10mvXf4k3%VVYcrp3dsb8zTg>`;SIB8{8*G#tlf(j=Ow!j&Hfr|X&G-R z1rx6F$;>Gn=PZEP%#T6LqRlCJbt_49u=BdT0_Y*-08JW1XYEUPH*zIE8bL=ZHBziH z;f%hp4A9s(B-|8ePbYY#YqSLwFM+B=ZB$$(tJ8F;&fT9)^^{PCA&HS zHO3!=sLRszF4b7_9Jlm!IsbXLP?Vvi6h*uloGQwO#~3ugrTl@2T5R_u-UUsz(hy_$ z8X&ju2Owm3Iy(L~tq!w`Nu!{k73BMnL7JJoO z@Vm(_w|nv%R|(t?DTR9xF{xxYGW-ky(p(ax*guE^gUE@`0wR#vPzsq+%TL-!uh9j+S}CY% zDoNF(rpelmI%l0Ms9IB!szOhmVs*o2-OYfS;Ejk{p{_CO3aKwRW4A>p3^^w1E99>21G3}a6yVAe#Lj#Bed}P5@E%w|58V@;pF}I7dWUYg#+{u@+nF9th2D& zUCQeawRoS8;Z2g37mv|P0kWCbB4n{1HyL6=D*ainpT+^GsmcoR|IX9Z9bWrXZKSrw zXskY?`bfh^hOLIf^&jZZ);p`-GXL64nQI&FtlL_(jd-=DS4(Q%)SRNRnz))#x@pFz zO~bTrY0on~K%7_ev~jcUw1ySBKh=K;Hwm1uiohFL5To!9Z=RXuXXyquw}nxP>{>u_ zXd>P$=OEskaK{TGo%KSZOyZrD05mN>P}*sNBorO#4dV>kPe(TT;qJcpj1W03;*WPy zQ7+U^yp0#4#uf%F>Xr}jts+!SyCs(o_{~;ObxL8X%I?eMgMDK)z&eY7m0g_5hyD5@ zz#fqwJAeJl6qg<%UTX%l@kOJ_^c{eFbp=39&JUT(u2OfG^A-D*qKl2#{^Z4I$@V{6 z13J3%>zH;-M6&(fK!A&Z<%>-KTY%K^U@yQgh@mzHwKyZ3`f6kzc^fG^s&n=p_dPqtUCoi0VP2dQIFHoxFhKAlVKAi$M@+H032Nmpyc<2u7AMUf0ET8(-q9;><2W<=kGr# zqGy)%M)_d>ax9epfMUu&5CTA=^8KQFB$=KAM81cSUJlK#SS}(ZmGTw#-J&ZyZSXXp zZEL1$zlpzA?_9ZoaBUZ#Xby_t(j$ql1;1ymS>iu2_tq^h~2&{qb-&ogqPM3 ziOaw-?9)3d>HNiNO?Cfq;>&%s-$4dKlJ!XqK7A1MV0nQ9F>bwI7+w9IhANzszo87G z@qnhXC`PiyPU&mn5e7ueE1L)mlmxN9gNa}t=ck2HWSsb)@{d4M96gprex@B}j%Jei z0fng8gN*7PD0ozt7g#p9xxXyx<`~u8Uht?chq}bCasPiZ(L9#;KS^%}$HhBFKn{r~9KR{gVTd)1M; zzv#Bul?csHx2;4S6 zJq}IRLw1e;PT#@!1J;Alu^izUV!_YzFvFemTNt zgj81wKTxc21h(!r_+jeQ8SLnU5HjG1;NX29gz#j2$NqiK^B}5)Z$?r17xM-!iL5i+ zNX24Qzb|^mX30=EYIKUd=B-?D556r)m@kmse=_5f^L~G+$gE%`q{rKS}jbk^9yG_DX&cV&~t{ z9h7Q0D+B8QzT-YvZvp5v{6d5-+04=W9%wGEGK1+i%4a`}jU-@R7`R5&g&GE-nf~kT2+&8m!(jPz$ z!yjEHrUWC+Vrp>5_jIq zpY8zMR(>|(7TmxkT-j_h2|Q=b802CZKMR2~&U`56LR_;?V)`DPdNN>Aybm#_6-t6Q zEQVpS7z$BRB5h9LYk7RjUO~o@<(83ZYKu1h_y>@&oS%s@GWLZfwf1c%QbCgJ95vOA zCbE*Bfv81WpMWN9zev!#f4K*sx9~?GbjD$a!LRcrhgAdTCjA(C7KRHDFF&4usQHjP z2E#Ll1V@T%h2qEyV!h(%ZeS3^?6(;;SjJ~b*7NmDTK@U1-X04o4eycYp8 zY^eyq9P87*6|+6KU%Hm}pp1;scF{6YlNw*kwF!?<2zL>MP`1eKTXB2SB#^O=ccF}o z1G)PuM82ixwB=i0%LO%9jJ3S82%hp5L&j69;e;I^U@`AN0R>u54gtQ_7mi#9fHZGM zKwDwvn2!{p8{Or&u29alIhFH?+ z!I4XxFXHS7+#Rg0#$iGyxV&CA7!kknt?dBs0Q>)%^N7(z7g2v^{gk?A>LPXf)!tRR z!t{;la?=qSt9FLw9bKz-ul97K%}q56jh`8RZj1x8?2m-bt~r)pH~X~pTC**TjI+~p<_e};-wB~NsaP_{_+|5&u>}sY}Ng5YscMo z(}-jIn4h&Wa!%-0$wV zeHig>rO@yF=HHWuw=0Ey)ic&piT5joe&uIB96)?rDfH-=4@lzGN}+f4J@gduX{FGY zMQhF_Uau7Te5S`md{HTM!!LjDBR;4Ux;A|JX5zD-a9HzT%sejg6Uw{#vq` zu;*^+{Ru`lQzz`a=mc=+m5pAD`~QbSxIXjs=3^S(YdEE0eEmc99reb#>+9y%egb*^ zCYc^Jb(m^uekIK6&o(wzKUv*fU1zw(uvq^${kaeU@QtcdtETAwq}!|;uYF1zhr9m| zYgQ2-6%+$-4Zmd{(fQI2iD5W?4DYKne1DWb$w2gHhZmGD%uw;}__W+711`C9`?nwI zUn0lNzq8ZHa1g$RWt=SO;vnt`<6#2h;7B(OQLY`f4*o<4>5v{KM=lQX zwg_p~#3b4S$7JC|z{A&UnTH`Um4xQ`Aq;#Fp&qpCdTDBJ)*!oLD z#|WY`78pJ6onZW4X%x{le5B9<;MhuO0oL&0Lg8_>PdR)~q3}4mQVt&~6dqSXmBRb^ z?gDG~fbY8gSy=6M64bSOL}*S^!*#QCfi2ydUC6XZI~az>Z`CfFrFiyuIlK!z1@iyyx8? z*vogbjFn|+8}#B73xjBTfC)IPUayCCI;|v2dF-rAV<68?bpObfa?MltZ1oQt!O@Ppu zwRdY*X={vAt3Ry1qWW0F_lCO+D-AmR?fP|kL)C3nYv6{!A~Vr&L&K{2@9VFrpQ3Bm z{l4zSx)bWg*FI7ktv$r_g6T}txS9uR!Zl-!PZ>|p{6F^I15Aoyi~FwX?&_XAgdJuW zK%zq!cG)nS$O%(NnUpq z-|v0C5AXBb`fYWeGc`3`e=3}+F6B2Ft^SYBNdI5^Ve}&0M#oy^n?AD`n~mqpm+u^~ z!#Cy@ETFeRCa=gdmy&x7=wMd%`BD4nk_*7U8j;ZcjynX%s85(Tb3DBdSH^>|fBnIJ zy2DzD?Kv5Rd5q9Kb|<~8H0#i>{A+_d`eh5YKNTo>Xw>tuVtLs^$Hlb_i##Gsm>v9s zeUHbg1v|X+=wNg97`;`s+o5%k{GE>dqJzVJu%mA|`a$@NHZ7iU{665=kH)9DhI8hj zAA;j#W)zn!D48+ux2KFY_1Ul09Iwv4Wbf1Y3n$lnEj{^EVFLq&ZNl4l`AOsDN6Ag{ zb!Zb}>Tz?K-2ItPd)u@Al3V+I6)GIFWcsXmi&5aYQ*)?UHT_GTl{4Fa$G={uhdfx= z#8fgji_TtX`Uy`}fdYDv2HS_V*gQo>ees zZpru=lV{D1&6+lUZe4o&bMVk7`;Rp$sd>LOcNMG5?)wV@O3sdmyl40S!EWwjNl~(J zhev3$yZ+!8MbBcJEQIhfn^H$NFdN&_`qL*&c#0%G;NW@V+JN=bn9^tff{u&N!al?NcTO{BDE&gSx-4Ty_y>mX4SI@ZjHwzO^kbcG#)|Y?D7;@*kjilkoc$;DDu>h4d<_dJ9kw zQxE;uBH0iQ`IpN6XZnzb?6<1@Q#(-D9e@2_()rjcJAU4z0xUSrB;k~2nxS)&dDLkX zHTp(}DlzqO?2T}T%SM3ysc3Ypr{;gw){(yHaI1Oq=XU$uAGMa=)IS`*8Rh=2T>N&= zq3(>06YNFB4-o|BWAzzRq(J+w?bn zK;y30{lc#FuCD#wrD3Aq;rq3ItJXtCR2dlsx%ufCg|UpRSY|<1Nh}=!Re8CY8R><9 ztjyf>f>>cjZsE$`_l6>+qOFXZl6w> zep}1q?{@R9^NO1uf43Wcx&G~j$KUNaTbq2*;P|_(jzt$YJN|B4JkaFlla9Yz_Il&f zoBO7*Xbtsxv>1MPJ3l1ackV1qRm_<=aZbT>lFs6s912S=D#$3v$SsW(7UdTg<29jp z0cCbcHUgP5^J9oxF3eB6@Sz{eX4b!=_21-sV6zJsx5L*^!^#KD!8^B&E4IIfc2!SvhzYI9?!In3bECn;(p|@B7`u zL$F561z1`i}^^)sC*XhpBaROk>@seYr^`o`R>S2Ctu7=r(|4!7vi5fUj11D)QqdOc=j!z@UN4%#qS?)tt3yxmYQdM*GEk}hYbt?`o z{|KKoBR-Wr>x^U9{?U$(`WcSCg-&5QI*_ef(aW7*!v~IxPoWPy;>@JO<2(MK)_#r1 zNZ!}2^Rx{+;d?}UGWq_WN8KfDj{ZdsiA+!_(bx5#6ga~@1M?_EWwLk=F!P~hkt}L>ei*x z>u}4?jF*yM{qW!S_x;Ypd1KNa{fbmz<*jG*A}VF~cnRKH&!$NUTHg(V8Z-MG|>Yb0T!DnFC0e!|1uh#tI&p0F$=J!6K z_t0i<;S&bO3+WU7=TVKxN9ebQBH8wnVL-_{x}6$%n$8m&7B3*Le^OBnDgCdGPg(Kl zJ#aiGK7kw`p@e^2A<2UNUTyTa#=8^lN5sdI`#;(4Ufwp}|ApD`*vhCZYD|0?&f z9`pbCPO@$TJ2)oDFEIK~eSyPj`uDy-pVzxRh0hotA4Q*0>o#P62G#rTeZm(WnK z93M%akX_p7@mwIr(|9+~VxaSmww?Yiv9|dMgy1ALchVyZrQv zdN+N2QNJ5*EGR8R>2it-vNEy@3iD}OeoXzd6C_+ftB~UR;F8a)UNO+)voZ)h7XUeY2k+?ny7;a4tYM0(S_ap!hNluyY19b#rc`J zS%o<{MX^{xPEl4?dQnbZc5!x2UP(@FW?pV~c5Y@tc3xrnkVNB}x^4Qv{7-7`woy&p z&Kp>HT}|EgPlQd@d8H+&fb`O={9;H7PCd!aEzZbHFDNL^ z#s6pL=9WSWAqZm<4gX+k++iIQI@;x1Z*HWfN4tzkglg-4@pImsnz|pG2-fO>9?@`D zpmq;*gW5gNgCAVqv8E67*hHXKg>N0XxmE27pHjQRpHY12D>W_rm_+^B{;-#x{rSB$ z|6%LZs&2+MePUnj>ZWe(>ZZYz@gLW;ZpJ3+)T;2U9&dGZ?F#R&UE#MC8DG}4@M99b z+WxTSwP!(6va<5?3UV__Gtx8Dv+@e^aneh6L3VmUZgEa-L3&|+cB}w*jY)WG>;C4< zo;TOry{ESBiw>T9UQOK(Pq=Hb)Ay%obr05br|Klh;4OgPIA=M7j=LQtzy0qo4e(DGpQwQoHE^N^PSn7O8uN#ynNHHS6JEK;COFvW?Od7-{5%X zeOL8o=l%_jC-*msS+#2$YhTkgG+6Zd-6wKR!9q;D=M=FdjESct|=M|O~XBKDVlr(r;oZav0=25n9 zB>uTQ>^*abEaChquWg46_-^>m53*a2dwJoRZ8+XuLqo@JW;de#+tlPYa5>&*0qbq% z-+=M|{k}r)yWaD?d7gJX)t)T(>(<-u!LH9-<*p{qyPe}5U7QZbh3bDb@qOZ+6E$$6 z22Rw#i5fUj11DiJ$f?dt`9pZm8OPP(F3DN29Lm!baU(kI`6x~BQV&#jn9A9GwEqhuu|KW5}-RiCiL z-~V;A^LFp;`1k!NL;dTPF8Afvc%%4HLyP1?eaLYT=@eQT!{{m_mS0eUXww`_`os$I zO7qi;3h*YY{H%-uoW`FQyZ-qm-&Bt7#m*Z-^O?sw+3inyy=UN$_n&&ysO>Knb?(CU z+w&UE4zUZ4cO_&V?Dq*-s6)@+o2)KMwP zU-sA`zcme$rO@E5S3h_advX%J4*OX0@z*~fsUx?X?pVN9{U3Pg-|mH1mM@)(FBCgg z13P|Sh=g(7R|6Y&rucuucQ*6y_Mhha2yo)R6E$$622Rw#i5fUj11D@dLNaDsx1abyc7D6^8Z%!Whwm@ z)OY(Am1JaN^LQ4vVdGtwS+QJf7cD6&%1kdQ$w0JKc0M)<#ESCrvPUJ_VWax7w@n{5 z5qs$5d`&0x@%}$Pz}U_FKls1$f98MB|D69d|Kt7#{CE0q^k0d0{#}g7fOGxj{$>7o z{$l?${}_K?{}6vqe`kNp-^$H3d5S&LJYzgVJbgVqJ)J!< zPkT>ePl~6N$K%o6Ke#_~{~K>Sc+>q4_XFnhhe*H*j@ag}R^YoTk5 ztHjmEHQ1HsI>i-nb#SG)nz-C9&H26aL+2aLFPzWg-3s?RZ+Bkj+~M5lJkMF~yxh6e zImbEKIl(#H+27gA+1Z)pZ0Bt53_1Ny)A3)&*N#se?>Js|Jmq*8yD@HZTt-aQE z>q6^XtIS$r&9)|4#z1h+Xn{`cx_(gmpJ`?YXSH#oe5ph7=EUprl ziY;P|s1(b@95G3Z6GKEF(Ou+-_M(Ld37;^GAB-=I4~*B1XN`l#J;u$(mBwyki?PP2 zG?p9lj48$hW4O`Z=xKB`GK@AxQ=@_5H4Oa+{ondW`kVR-`akps^xO68^vm@f`bPac zyu)@!E`i%9e=B|opAnFYQSM|uJudf1Dyuf zw7%>D-q)t{d4GAaZx7y2W}V5-=V#h<9zR1~JhKPyBeVLjbNT5umGj<`&S2;8UN)8U z( zU==*yrgGj<(y6SR=h;-ob0wX^5k0|KCPgan|TPaB8Ev4&j zY!z=Rv%1aTEfmCfa|KzvnSu`dBn4@_iGoz#SVGs8ype(xJggv}H&l?vLkek49AQ;@~Ef)4B#1!?SO1*z=65;`tnKPgzu zeo&CbepHakzL$`{h<&AC7W+;?Hv3jVjQvMJ7W+m)Ci_}JI{Q*V2lj6TY3vIHsqAwJ zc?;RU6f9u>RFKa;Q;^F(Q4nLFDoA4=D@bJ@Nyweg-cc}%eW)OteV`!5-dB*t-cyjt z-c^v!-d50oy`>4EDN$7<)}Y7JF4e2lldpH1?8$RQ95T?78d( z1#{T*3i8==3R2m#5@NI2KNQSj&nU=dPb-MArxawdClzF}ClsW!#}#y7k10rFk19xI z2PI_9Vvi`uW)CZfv4<37u?H1ovIi8*WcMpbXZI=S!0uI$#_my&$__}#oWbr^5My^K z$YOUYn9lA{(1G2qAdTIoAeG%JA!87`MZq+7vxM|M>?Q?M*^Lr9^kFwBn8L1?kk*G? zr(iO>RzhkiyGFqzcD01|rR*vN6WNs#qNQxVf)ch*Lc2nCg@R&sxrDZbY_EbMcA11W zg=~+40(Pl{*5lZ21ryjV36XK^5(VSgP6;QEV>=X#W!ojR8pXCL7{e}>&~g;ps$ewR zBB8}7c9DXSY_o*s!`LPTBiKd>&4#fH6%1z^Bs3ky)+-pw)=4;N5L>HY2wNke$sl%t zg2C*335^G_^Aw!L&Xv%pKRZXkKz6o-aDTR1!2nh*p<#bkrJx_Hlo0B}Diri(IvP-)t9m7Y3HrR}H6wB9LGRN8&A zO4BB(wA)0Lrk1L-eThu#<`koa8i=VhlBLpMrbS)$br70nqTH&Bdn>J8sD4^0nib{j^Rhm*y zrshd?Rhm*qrlN^or71p{8jZawP4UQ7Z{${Kic6+i*s0Qn4wZ&1l?F_e28Bvf4AXFO z-awa0Koj`OnL^|LGYmg78<-wb7e9zE#0TPa@tk;6+#_xgSBpL3VzFMFEmnzzVumOY zW5i%NV#^h2;$+cS)E6$njBky98SfjfVa)a@#%#A3R~vhbi;eZh*~TjWxBh?m-^Z-N zGyX^Xcl&R`tio>p7XMm*wSR?wK4uh({Gl|0Ywb(VwRf<`IL9X7euC8oXdsjwLxewDS?{$hgUQrE|A)i*v2B+PT6x-#N`$mKVC>uPI{b+NVHIvXcIF0^J?CDs^gkk#AjYQ?N}m@Q~vc`VKR-u%M+ z(0s#u-hAA=&%DjN*4%4uH#eAPo2$$Pm@6nWN0|N1o@OUA(`;)tGZq>%j1ptCF~~T> z=x*d1sYXkqq2bs6tADM3s=uSZtUskctly>IsPETz=@;p1^eTP1K3AWN5&cm8O#Ku+ zM{lpU&_lXUH?$wL&$aipSDl82k$tbfGVc8UqpcjiEn@q%^)~I%w%D{=+i259P4#mY zwb4`;Xi->Gy`x1TP4#>h1+{hZ?uBfhrusJv+iI%ov#^b(Iyws@+Bx#3A#4L*D!U;= zqI|i62w$Wi%$F$$@g)j^e4&B>Uo7nh!+xI38XV;d6h!!J1z|p4L5R;)5ahEI1o#|j zKM3~IW!9i5pQ#|irzi;X845ytnt~votRTRrO8c{5Un;ZCit-f(Rd@Aj~Hy2=Q?Wf_$`s03R#u2e9>gxJ_&MD0y)}4?a>s z8XqCA^=Iq&V4K$RVe(@C9(<^RG(JRL>&MpeK{l=71LVbpVNG>b7lt&|HC-6gR7Z4S zKs#67y?|YzC2ZQORoJvsTW!-0t=gvTT9r-Pv@)BvYH^#kXe(^msI8KpzMze!x~~gb zXzJlu5Y|)|cR@&7Des=oHfZYUm><>D^D;l8smEk~SW^$z{E)U-dYm8B)KfA)pe>U2 z^VsFu44d|9vu)a`&9`ZXHqWN*+FYBqX)|rws!g|Pi#ElkjhcE|=C#q((>AY#rXHJl zVQr2q#=MZGo}alWy?TD;Mm6>P%#CR3`I#Hm)blepq^aj;ZctOt&)k5fo}W2vucn@a zIqWiRlq|s#ZQH^;Hnk`X~r$rz;3(XUNjdgndt$H8ZN6rXZr7q9CmGQV`O5C$YWWHRnwr&{4*MLLH9e~3DTruU3c^~hf{>Q2AgE<3 z2xu``{%Nr9AhV`LwG0Ijt-XSx{8O^Ryu6+|?jg0NOk zK}f5kAgFm21T??2p9p)G%$gY0JPIP3r68=i6@)aWf}mz92xtyzU&=1wzu2@>6E^M8 z44bxVx=q_OX46*wvrSw0k2Y=O|CNqQ+h|-t3;u(Gu%;;p@t@?~CHQyXTbWf7<=-oa z@UInw`F9FJ{67kU{3`_k{*AORhW!^Zt2oNPR1o3+R1oI>RuJN!D+uz>6a@Icq(Dqn17-m#6MCH z{<=+D_$xMTCp-3R>`&6@>ZQ3PK!xJUnRyDDw+4t02l>QV`+KDhTrz6@>Wn z3WEF@1p)q?w4VU`CuP=zD1TZ(gg>qz%%4&a;!h|D^2Zbe_&=omc-TK8v&KjHqY5JY zK?PxcP(g@4tRToAP!Qk`N&9iIzgK3Bi}L#wMEKnb!u&o3A%2g7AiqmNfFF?dV_|>0 z%o-czcPfbRTNH%(9STDHHU&X`vw{G>Roah%{S7i}OqAcGAi}Rz5au^32=VI`1o<@z z0{l8@Kbl>{ud-<~-*3}}{0e#7=r;UH1z~=*f)L-QAjmJ5caK5|_QIUSH^+<)7>?^p8diKtKO!{%-y}e}=yuM(vIL0l&|0`I+x~-$ z1opXixwl}ff3`c}UV>8+r??9-+8^ZZ<382h5##-Kh!P09>$zPR@&DlZ()Ee!9YhK| z<9ZZh{@Y#GyY{(uxwary;B1Wgm$>G*rnm}SqcHC8<2u#V(Us|H=W2nGe?6BA(E>j> zzjS`$d__|owSq6S`ZJmYxOai8OM$MueVj$McwSm!v~k#H<=%yCR{6k;A=kfV>| zR7XcgrlXys1!e;3A$CBse!yJ7C)PXGE7miZ4Y<#`-MZe|hc`iPvDR6ut+=%q@dJ~s z0&Apo7G?yxTX|Nx)dq6{A*+t%u$cKBW(7Vn-!fnPpD_eynZ3yeOU!4~e_QP2x&K5zwsEIigG~6?4TDQ7A@#sWF}FxD7i^fh`Ios29aius2| zMm@u2XoxEKQvXEGK|HM=)bG`ALtMcX`c8c_W+AHeRr(@*mOc^l5W~@g=s!``T<3gL zP?R-8Ix;HCno=4z%TCg{2Bi{ZO(-KI${JG|5M_-hjfk=^r7&!WbOa1Tl)^9wLr9b; zYd{$=3{VQg6iQ)OpHdjsLpmIWbt#2m9T-BJM46v5VCbV1hF(fx=%EybZluFt=%N&c zP8dRzM45v!U}#YaLz7Y%3QA#UARP(=9R^S#QKnHw8?%hdjE$mC&?V4K!3VpD6aI!9YZL^9fReCb!K7^0AqQP zI15+$;f{fn;*J6G!WuKtAJCVbFB1K5rEd>2(N|_%U?$E4Ae2HR`ryi$xN?TfIL}P< z2J~U&X5w@S=bDMr0B2B6FA3+EiJpMol+#1P*=FKY!0D88iiFi>qC4O;%IPMd+Dvo> z^rDD7xaCkx+ES zHzT3wiUN^PbcH_>imvcSLeXWRNI)ofm*pZsI?)CGNGH0$59ve~_#vI>0z1-)F0dn= z=!`D_o#35uAIU^#)C9>y=Vc;6GSM03Cz4Gkq>{Q5P1tl916kn7Kk_r zL|(p#lR)IbF9}3mu85OB>bLoa7+~en=j2;0N-+bLNRSsY4FzNF8!u2X){% zu!A`8?71RN+K@d*#7P^nQGRFx&&G8~1CODcqzo~XlawI_|4;@VgMSDEkD;8T3$fWE z4qf0eloPVRWB5E$g%~~$s=#CL2T|Zz@CQxcS?~u*;92koMc`TR2SMOj@CQBMSu;f( za=^3T?-WYm4`RTx;162Bv)~U>z%%i^p#(e=ejo%q6MmorJaf8;Lk4&z?4SZX6Lt^* zo(Vf>0M8gC;v@kX(?lE+z|;GPI4MB-R1qfy=zzaT0Xj?(aZ-RZ{7njwHd(|;0a8mv zoD?8+l8BQ6w8!710PQD=I4M9Bf0F`4OGKO$pk1MelLE9W7I7#5Z;QW40NNIbI0--- z{7nMTra;6=09ua|aT0*m6GXf@(g^-00f>wjaR>lE8GlpzKY6T(Q~Pf4KVmpo5^6A4n6_@B^uW+I^tCpk|+PnxIyn(o0aIPstb5=2K#V zntV#GpcbE!CaA%uqzY>9DeVO{_xe2rwf6cw1U2^hX@c5%{Zj=s_4@4vwe)(Y2x{o{ zx(jOO_0j}2^LpI`weotYf*N_f_JZ1Y-5fzpyl%Fj7G5_d3Mk1E)V}Lx3TobUy9#RE zb-M^^+;uw$YTI?w1U2otse)Q|-S&bScAXqS?Yd63pk`esCa6`{$r9A4>tqUQ({(xv zYSMMm1-0lp9RxM#I%$I1bDdN{&ACo{L9N-}Nl;_4I9ZKTS|0_NNMJ!~XVyny@cNPz&~D3u?f=n4tFS%M#RleVKwpfXed-b#x)LcDbL9Nx(Mo?q*v=-D>Jt09&)e{iZQauqt4b>AA)J{Do3u>nB zR)SioJ4H|1-~jaXLeS+NLug zsA)Qbf?B3CMNq?ZG!xV=9VvpEr4<&`Dy^o18l@Ey)F!QfpeAVr1+_>kMNoq@PZHD~ z%@je+Q8W?M8bykr#%MGa)E12tK}}I_B&a3oDS{fJ78cYFwT6P4p%xO<3blZsMyLe^ zwLvY#&~+zofN_nJ2Mmt?PoeRDg&tu3@BCl*Kk~onf6@P>|6#1t-|WB2zvr;%|EXB1 zAMGFPKhxjC-^rheQGW}p)z|a8{2Ipm|Mq?Cd)xOiR_hP??(yB~yViHPZ>MiF#`{&i zmA-|(nZAh_?GN|$$BKPdUyd);cQRuC>-$_j?)~2TZ;bQb^uFMI0&DhndvEq$<=x}m z=DiSO{0i@K8TnuQYwUj~tlLMuExloHJ+ITtJl}f$<@o?(`{xn$e?La{H{jicyFFVx z>kb*$7kEZ`&hnh$>F&w%bnrwxCwT%Mug7rz;NE}4sD3gc z|3|vda-ZQo#hvd?$0~j^cL>q{4mWds>-rq)_;0#ibUo>M*mXCe|F3pkhL!w{uJc_L zuH~-zuIY&WAL|g($1>gm#<1NRFjwc-tIqq`Y=-4lJ0jzVZcC2zN zbj);2bc}ZlcMNcxhE@F>xeuU;BgNr%=yELovGum~vh}oe(7MOE6*~bgw{}>Ytn;l3 zYne6InranUqYfL%H^p86zh#*}oBuKYX})K^YCdB=`s@1s4s(-vf!qzSz?@;0n&Zr2 zSmW%Bg2!XYjk9wCX64BeB4%YP2|=^6CBfNIv$BPRuvOVy*EDuE+hA5UlUX6N zvZ(~vpCkeHO(ej+F~MrsH|1^amtU_Vy^?B_^;{cM6V*w2yx z`fc-QHu%9Xc_EQM3_p4&E1lUg^`vloflv%JZl>qw^39v7g0Q(|> zIP420z`lU&<77WUX2E{E1lW(00Q<2LU_XXn73@b#fc+@4Uq$vKWfts5NPzut39ug~ zAt)-SGqEx%DyTQHGHh0$JE5(_3`GTXC{~6<1@$Oa21EsQDON^A1@$Rb!jL)@D_}^y ziWM+Kw?bP%hSafG0YmCptbiePEmpvg`W7o-NS%x2Fr?naau}j}p)Dsv>R>E~A@wkp z!;rcd%V9`;jOF;M=wxWi*iN$o-3-7EvjQCrz;;nVU5#a7Q9*r;Wo^s~bT)8dn^}SG z2HY*$`xzI&H#%{9$ofH5{ zLEV%^C37Pb`?)K^&uU+Ao8i1iT_)LmKFMpRILWg$vK9hQYC z4fR+SqBPWHS%}i0&!R1$>O!vtfa*fO1%T?Jp34IGMArowdqo9xUKXHyy39bmX%en5 zE4T#I8r$ zE;q}kOW10bPm_R>PNjQM(kU_nC7nzeDCs1mb5YWXl)`%{Za|2LC@-N5l(d*ql(dLa zl(djil(YahBJRU1pCADx9WMbT9fw=yz}Hxrfs&4)43u=V%s@#;Nw~}`A1MJP9U%cF z9ZvV6q{CzeN=mvh2PGxln2nM`H~4ILhiqsF2NC6@8?#YT(v8_DDe1;+l$3O1c2Ja) zZp?xqbc4@=A!I{C9Ed0<-IxVK(v4X#B;A+=L(+{|FeKfW2}9@xp9w?AhK2wTQBJxs z6NaQ4Ghs-&F%yQ+4L%cwkPSWqhENSY1BMU{4Ur$BoK#~5451o)1`Ht@dYPfAe=Eh&dm@Y1j-hfwfRd=JuzQb=zZbOLaRSw=!p3h6C_Pyn!K zK}t~?GRvS8$k-yvAQil{g(xGXD8WJrDMbm^J)jgCVm3q>Nks`3N+1=y1Pdil3SJTs zWe^Hpf`t;&iDDQ+CwMUoArl&cHAES7f)~ROGQo>s2$kT)Foa0(Vi=M}6u}T0!HZxB ziO>+GA<9T2ieN|@Q3OM11TTUiNkmc5EQ3U72*)tXpb>x_W*I~Ruw9fvC3s<2ltCnT zAr@;$BMPZXpb>zJ%`y^+LaGvo1OQb6mEeV_5)z3*suG9kR)O}3?UJGJPb)8#=(#jVjK*i5E`N{ zL>WoMI2e*djDsOb#5fp2BKSBMl0uAyAt}UI7(yX5gk6X-l8CV|B#9UcLz0NGFoZ<# zu`ncs7z0C6h%qpPLTHG$5M?A0V_--UF$RVt5o2HoiQr>kND46;22cnM0T!Z+Bw{pv zD@X(~Hj6USh|%~#pb^NpP?SL=_-L4tMvTH|l17Yz3uuIf2n$h0A~6b|Ng^={pGhJy zD&q3Z=9&5@46FOg{B!+N{006I{{H@6*x#Rp5q>j&1MKhDeBb&0g%y1ji+{j(v+pYG z?pLw+D}4*Fll&S)hHtgjT2jE%5|+`_EESl?h@Z_F9Q5ar*@*8rylaPPO?f8tFAFC)hP zLGSH|3AhaB1g!H`dsiUBf3kNxVgb(dp5o2L2>~q--|xd20Y7-YKm@?6o~Jzzd+zdF z@7as90oHn|Jj*?EJ(E4-JwvhT-yO02?Xl(`^7wEf!1wOY-R~o^|7rKb?mMyCf4O_R zdjr<`SGniAr(#ZExVxXb2WAA)+^w+6U&n2_e!?36hpyLM&$9S0`77t2NF5s3#-(zs9QmTh13S18}eN7Uxxn=iiJo020oHh~_VF z4s)LAJjI#oOhqhzL+t)Hu#W$Q;{(TQSjB$?qy8HlS2%XyjSOdF=l?v%WXCwi5Uk#J z$9TUz*6xE2FGl;{W7q$C80$Y_-S7X#{|V-Jp7S5{AHYtG{fO<@B>pSD#QORh_MVTM z#eR&)H;MDG%)4036vbFsA1F@4csxTyL?cm0m>7+Ji8b{%jOUDlSW&;p*pIRJCgVIK zVJyOGda*Ie7-*b^we$?5wb8_=Z#eZ|5X=9GztTSz`#dK42jSUj>+hmn!wfeAssfW{&pwwrIP27vR*0#TxU?7R_mC{ltHNtp>0of@|!Ofc^vm8!$-R z$u{T*Rp!p9{+LY>9bsv7dtDX8CVQ$W8*x!Ddkcc{$mQT;BPB07T9$j$b!ey2?#{dSvz`YkpE^xKr1 zZR|$4^y_U3>enha7qc7mtCP52zaojd^(&LO zQAeyBdAhibj`%l8Vf`ALLi#?Ng8JpkWGlYm9+kN@s_(TaqVKXPtY2nRNWavkpni!> z0e!b}vjuLptIRD?eWy(keXC7jeTPjUeVa`|eTz*2{bJ?jBDmS4GB1ki7b!}FGz4 zXbAMA%vDkCHJc*Zi#CPj<{i2k(q6Xng4zo<1+UtdG!^8zGODTA&Xo~OMQg4M zYtP95x0NAHMSHFcYAVKaWk7pc+GDapQ$e09qMC~BToKV!wC0Mi_77>lBBZHk&lN#U z#dxj=XlNB&qd5yr1$i!yYAUvKc|=pun#;r5L(+bENK?_C%Y&MV@mwCz&>UcoX$cL_ zv80{aJvQyoR8S|TH8eC4d3BqHb|Go2cAHIGG(6feZ=-grs@BbHn|^K*x9Vpnaf@D+ z#Em*OV^FI6y;_;yH+nq;!b^H5_jmON!+fNBypQwn8dC6gd}d!$0l*3K3;vo`Zjuz zO)c~>Hih+Kn?m|HWwH)mW2DMl7u84G6w$G>gTDE?us+JBkUqkupgz>5fIeKgSqnGV z21F)nqxuk=BKiQE!aBAR$*Up#ESrLQf13jOK;>o)+?=U0*F^PxHbwN_HidPZfLOL=pAhe>s@UM>78u~>iISW^iImnd2o}XGS7?Zc{WA#ESti5u1z65+oqtNX;VOt zDL3cBO$U{EZdA{(DWbQxDXgd46w=dd3hGgt0(z=)a}M0JQJLpN^>#Kz^pkB0>uqfc z>8))F>aA=F=n>@x>lb=+m3eklZ)sCRKgp)B-omDk-pr<;-o&PW-c-54GKL;jnX99E zW1Av+(5A57$fl6q(59f?z@~s6Qf{!Up{FEqw_Y!ayL5jNH|lkjRdpM^zD;30U{gr1 zV^dJ~DHE(}=x&u+71g~qMRbQvVcla>NO##3)GeC=x>LEq;)ZUh%*v>4+JxbjO<`Tw z6vAq&e7LcS!)(IpjdFuE4(-1xvm&bfVpBx>!KSeGvrQrGC!2!W_cjHzAC(&{b!h)l zndMRKJDVcfS2l&UZ*2-`-`Et?zO*TzeXZPJ#Y6jCWtK&?f7=w%KC>yTePL5b`OfdPc4IMH3VUWLUG?P6KN#bHhDP*@z&)SiXKK}~H{SRBx{p@;93y?j+)7e(|9 zHih-gHih(!HU;(dHU;zx)eo?lU9X>$#J&3YN!+QgOX3cFZ4$TZYiRwySbLRWm;OI- z`roUF{Xgix$A1e}|1b4#^{?}<_OC+J|8$)FHyY!%GyL8Cx&Bm~{MQI8|4u~vf9v}v zW(Hox*p1fxZ;>+tTYc*g<-ZE6{?l>p-)P?;-x-fb}Ok$9=JTJ;w5J_agU9cZqw9doZH*ySsDUsqU6?G;g_nmM8nY>w4Mslw8}t$#tb` zx9cL;8dsHT1y=T_xr#8NALu&$*IoJzU3Fa6ueKH3W_oq1W9331H2d(BGeyZQ2*1cHEzs9=E+GcIQF8#Q*$eL-DT4SvtRv+sW z#N?-0CtHoIdX~fd#r($n%zW2;8S(fJns=DjVVC|kbG^CRT!|C?rkVxjaI>G;!^}53 zBuWW|4Z~u zILWU}uGyFBWAs6KZ@sG?)7$CI^q}s=TJ!g6-~SAs``TKKHEfOjgMDMI z#v(Qm^011nvB%k0)@m$c6Mbo|#yYmf9%KJDS7RZYv&XtvtKplnzOYtfAzNdQvd^v6 zSji^ZZLP*qHqob6HP*5WhA(Qd0+u4W&kt60s}*dy$pX0=?-ex!?4jpc00 z`q-+*dbY+MW}jKrSkNYdDOR+JU@DiiA7Y=F)pAMuA(&!GTVoHADb}<#_8|Mns>Y%= z(T7$wR<()Vx2mzMt+5B#2WB_}oE6 zce3ZK>a!H#Cm5*cS*v=0BHY)X=nnR@S>2Cn^p2QS-B)EjWmTW4=t;A>kG$BytUg0R znpNGKuHDX_u&Pg2be~mynxX?%buXgZ*gvf5o{Dg_hay}(mFQOXxK(|MB3$jR2v@ri z-NGKTs=F$Bz^d+|2v<83-OL`fsyivV$ExnA2v_roZejO4iT%~gb}IYc+IN37~> zMYtMMgsWLZH?W7T>P$uOoS_I;(}}KU4_Vb66ya)`B3w-+x{f_)Rkv3J&rwCV+K%X2 zcE45KRuR5$8%4OpU1kWcc!qrwpSF^jV>XwS`GOJtAk8pLstZq(lRoJR- zM%S)lcbe5rWmdqdK1pTWZdNy;tSfO>W6HXc-C)dL(1A8HLF7saC?xh z?MLiIbpx3dGON+u!%g_)6bTWty1s;2F8?s;z zc>@H@DySPEVpc)i070_~+Q!-5s96PR1B9(AD4WLi!oCX9hOCfT1!V)k9>NBIJ#-BK zd&nARm%(1D+GVhps&*OKL)CB%_7F7y?4fA@*hA6)L8}Uirm;QjHnR$n#@QY?kczg4 z-D*}zMcdQHtb(H9TEwc7g0_d19jkRc29&@lk)A!7j8L&X3nxD>HnY>!z55#wwZ97qk@ z#cnpMpkc^rV^v8B+l7A#s-%SNVjImWNEj}LtSYHsyVy-;6%-6vC=~>ZvrAAa=obJ^ zrF>mNrGk7R3#Ed30Z=L_UYDR$5HIAw9@@p(PS{K7+6j9|mz*UwtDs%D275>s0QOKW z0PG=L0N6vfINJex$QA(hQnfHWY*szHmjgd$U+eyPXK%usZLu_7O75KVGniUYzyqAIBkKw6empmnpIMq zw!j|ZgqvV5wP_3Np-sqvy_BYlU@xWVBG^l5!kn*JC8g;i*h^`;2=-E%E`q(3ri<`| zLYgp1$aY#)&?cfCR+SW|&1}0_CBC!*`ED(Dl@ zR;x-1)MlzZDNvhHdk`qPxCODJRQk;=%qpo+n^1bGP@C{UP$7_z#LQtVps7*nu5(=fUjc8w$Qld5@DiIPzS=X7B&?wF}B2*F*MOg@yghmk|R8or6 zMubX2q$q2@St(U&BSIaeN?nLhN2nBM7s4q-3NwxC>>*Q} zt%1E%sWq^dDzygoQl&0{y;P|SU=Nkj7-kdAN-0wpz#cNi*#)qdDs=(urAl1@d#O_A z!(OV?`LKsdX$;edW~G#=^I;E};_Q6bOO-kw_EM$JhrLv(^I$Jk>O9y(r8I{5L$gxK z)OoOnOmTJ|?4?Sb2Yabf=fPg8)VZ*iDs?XGp;8*d#GzRUnd0nR*h`f<7xqvoTsjY2px>>*Ja!;GO>DK+YxkjuN2H`X>Y?4qCO9qS$J?d|P~ll9wqn|T9X zm**FptpBm+O~lqe=DF8%3&z{K4m%TXG@|NH_jJKt`PMi~pH9U484>lLxZiR=k9hif z-M8R8yxs1LaEg98cF50imtv$n$bC9a(9d$WbvJbfa2g(S{Rb!LzvFtz^@QsIoP~Fd zYY$G(UyD)pa_otpm` zfuHFtc8PH<15GQ|yW4mKL&dOho9q*H{V&2s; z7$@IlVV`>&jHxZ0diQUflK-mpAojRFh0*jyh>E`yC*&`(rdx&9aH}sy(m7TXd)ot+ z3+LkhVt#GDiCFmO%mX+X{|cOSw;JQ<73L&!t~nO_*#{vCzBSIeYiyckUGby%0Q=cr z6;C1l{dRE;cCufD==U2K>V=#T05=r`&6^d0&JeYL&9rR}%bo5le<7k=+n_!h5k++`!2 z-I>JajXM;+$?F-nC$X+!FL1xf-G;s5_a=84x2PN5;B}0fljt{YvXQ-J+?d2yjT;oc z&V9!9HnQEubxC~Iuou5y<6h$$mHQez*z4f0u~&_&RPL+XW7zB9ucA2nlesQqpTbu# z7jCbazruxKFMz+o4P±brbHE=wXe_9T%RmnwXTTZX;h{Sr5gUCCTwT#`h?*s1VE zt{Xd&s2SUn$c=4DWQM)${Q~>N*s5}0U~d~+6h6;>HZDrye~rxwpJP86o09mYu~Ff( z>_@|%Y<-q}X>3rr&#)hi^-28FuqRueX5Sn3Wb4zoca6H?DfXRlK@z_<>^a&e*|)}d zD)&j;YcJ(K!Tw{|^SPMEGVE1dOk^3W)xDU>GVIk_Ol2ANS}*3Zj7oI_CbNtR)%+go zVw5K{zc9)aVm`}AByqP9PvWP>DutNRGFB#Ww_z{YVp7Xku5vM}Wh_&l@JJV9X)^O; z!(P(G#FnvGiYd%jA_Ztca5n@e8#YsZ!ytjOjfy==`toI@fl;H zLd!E5!Vl zF(`?`9j~Q7> zyvN8);sGN=A!f*o^d#P6bV%X>BTXSD$&Az_K47#@;sGP75c6b4yCmLcv`yjxqm4pL zl^Ly*c#jcD;sN7ig_tcfS|#x=qow-Qt_~P2R3_%hjAlu^-LO}LuL>LXdht~Oqp7-# zxiX`15^p!`h2Sg0hP_UFWx!~pE@PU^u-An5M-6)=cz?u5Q6IiPY=mqI8TRV%{-9y6 z^X?BA0p$j>Wrn@byDw_gQzrW&hP}GGFKpP0#QQ>q-@Y0&>~-FK0mG-@eI z4Yy4Z!(QFJB5c?T!dHY0d%5_Epkc4`UJ)=X7PZ22*Y@g{J?gZrmHy?M33f5#3(h-5b{J1>wCR-Ci!<8`SM}-n{|+ zd*uc*Y5Ld7&1F&DUL?LOqT8#xmxXnELHM$eZZ8*K7S!!^-pc~|m&y&M*!0hnc&q-; zB;KOi>$`i}==L)4o{0Xb`oKM5{a@-vOsnZ1s?7hVy=#lD<2cXfvPa7!iKJyqmMqD# zBg|60Oecdh+q`S9}L_LZJ> zp#6H!+TMPvXKibLuV-y-f466CX}?jzV2`c+FMac??U(xIAGg2LH!rte>6?Gh{&orQ z{Gs-%J!^aWwVt)D{c_LR+J3S82zJ}rFO(0zaJc=go^_!8k3DO9`=5K(w)Q`PdHwZ; z32$xvY&}pm*uOXL58f-yA2sg_-tC)*gMTZ{51aP{@AS=kgI+X#*gP1#T|WH)fAIaj zc__HjHxCAHmF5qd2ZA^I=KkP&rTK&A-9axVf6#a#c)kC0U+~@1d>?<%i^=!#2e0*? z?hShJd$qYIc(r_bwYfWZr3|dA4+h^ZAHLVTD|oqY?h1OL{vKvvysJ7q0`~LnO%>Q5GhQH@=-v4)?+WxhX8Qksn%i#U5*X_54e=__5-tqU^@C(J= z{C_$8C9wXV9scd%&kR2|d;z!kJ%yY39~%C-VFJ$o&f(jJ*W-Nuk8yY372M1JO}y9d zYa73UTl+qbv;Cg&pT~`TPj5Vf;@3a52Wk)09;iJ~d!Y6}?Sa|@wFhbs{ER&i=Af-x z4Xmlc9K3a_fhAR#7sU0Cnuo)@Ag-~X3iE=v{;+v(n1i@(HL#otb1>Jf23AvHUQpK` zG!KP&L0x~~e_K%3SW1O?L0$i_xj)Q7UAG!oNQHU9U1J>;=HRaVvwdL>^19W)Dk{vu zUc>UgE$Hj_n|s3?^mVI&B~+M$zxLMdb$LNxUxnlo1a_-|^;4LG!S>d!Fb9PV3wnXP z;IOfH3UhGStp+%%Fb9dIn1k2$)}}BAvF)u5VGd^7TkFHTpteCog*mA0R^yH4XqbcBh6Sx>4szRD z-wAWD+pPvjs4xe;4a>U}{I<7V4D*8E1_Kr5Ah=r%5Kv(bhTB{CEGX_)1N2jfvn7Rv z5`g6P&rkxe+^q(w?I6F`To*shWWnIYJ~aS-U90s=6g$P zU6}7FEuic!Euh@xEig`DzN@raVSZ<60dHq%0dI%5z&3^X_R@MQ%O{MORKvrXOt#}wwblop0>2dI8%qm+x4~Orm+=0-#vMaD8{m4M8QlRD|7S*0+{O3%!ykjv{_@5%c*EV7 zan^kknE7YcZ|;5rx6B_JdI^g3o#8janQ#|)@40mcJ1>I4=D~jjU&4FbhTDJJo^9^} ztNdK+Q1kDa7n^$^uD-NnFbXd(c~n2BJy3g~_CW1{+5@!*{;xe?JKalXn{%^fY;OAE zly%R#srfPYtU7;D%}-8Gjm@jMPtT2+^OG|Z=DeDlo6IIn@tX%`E^*^xW3AzqCXPsL zgv~Ok6<5-x33D{nETW9u@pz|u`bJ-J!OhQ3nmIr^!evZjN>oH`%+i!9XAF&;BP4QM zS`#rN1mE^Vr~AnpLAoh3Z6~MB0W(U3j96qMnc&k|k~kShkt3NF!ljv$2}>eM<<=ve z?k_IEv}#_#3+H_4tjQFkgxJK!Qfi{DkXjlgGORSSD*7WxAPWOpwq{7P+g_}KM z<^+$UL|V%dDXd{i0fz}AQlfOKXu>H#OSVv>+*pW+URHa84yW zR*d<|3POmnB1(*ADc@Y_zoM=)v(v`S%{hw(pPT_EBU%~3BxR|H02;+wb0?^cv?d~R ziOQ@CWt^FHcGAoPb5Z>%ml;>7OKFTCBAn}l88jG6l}ZxI1#wPWXKy{; z=^m-ZzM$sKGtO6Drj915mX32oOJteT!aA)iAue(%i!^g7!gSN}&OGO)i(oA95Ny_> zitwXFT*sNoSR|#;oFh7wh;2d%BlMONo$j&f!evg)Uz{z_jB%D*o@7K=o?0c1Hi~52 zCfp@7jzpZL%2+nKeCZbYC2C6yzMCgRDlG+i1s93b2pg)bOSvNnM<-0uc;rl{d$ziC zJv=mv`&D&e=DY(ejbm$M%94aJYlP7>Ll8!Q)7)?prHV<$lpJ2ZMSK{|xp{y^DcUf~ zBE}GhvD6xot_{LQD#I1g=$3G@u^W$ex(`*C(FdV#+$7Plb5t<|IAiDQ1n_Ul~oxt%p_Jv)KNjSL9gLVJ4P^CP}F7Uu}=5Il7g0y2*G{>r(7lpdYCno zXM(c?aV=v>9A!c}OYkkgCOWJf4tsFwITS3jMx`;QiU>w&l0@8U%DwMMmJr8M<~U(e zud8g566pd41~oA`l}&r+B{4M=gFbRdnxukinusiQIwFyE48bjRDl*e~wA20gGBus_ zKxk2cPJzJ=1)-64_;iBd*}!oa`u#Vci&zp=260)kZ1ViPn*~^60YV7E*HVMQ05KiW z44E%cOo*7$2<}fJ*FM(iKC(=y09FhFF{&pF`g`Os4vWEK<$Cr1GNWg5BzjI@QEboK6rNC&CSn^`TxtE zzhdJPYiev8N{XdxjIByVuuqH~F)Y-TvMK)OX_`?+Gp>*G0DBq(DA*n;&@N0{cYb2_ z;*?(qTZKh<8qpLRVp(KUkzkK9#zwM@GL>Otm8F&g@F)wgs;)rm?O1uNC?Ygw*e}G+ z1-6K=tA?#0rbH$qqp+PVW1_JCCr-yfccKbwVV7wFTdBW-tsD=lsbCp4gG7qG7Hn3T z7<)^cN`Z=EXM$pX17DVj$7#@gY7n-!BYCN;_VkRKy|#lDV-JEzMzWY0$z+;X#jqKT z?FAwYc5|XMjbosC{(on<->?6F37Y;dkC>6s;lIIc00j5!{o%$VIM4qVc&Gh_^?!x? z>jRv%KieG|`s&crLqp-$!}IuH{Zo6O_CW1{+5@!*{=a(Q;SQ!N&4IbW0w2DL`ASl9 zFat0*HDTc;KT!dq)1(LQmQ@ti`z`QS|1ZJNg#1zoI#f*l;9L1D82K>rMxahQK%78 zh#AoRq0Q20a-KQ?S&)mMFTlUdFnpQV;rV~k_*1X{pB%k&i(f|DLBe-k- z&vBpr2E6-jdc*GZFRj0{{y_JY?r(J;9J)I6M?)-p9XHI&b?>bE-E|M)4RV(|XF6Mh ze+YgfpzU|tpKCwd`bp~xttXoQ*?hivvhm;eiktg6MFI!~2ga9cuJq3)2K>3wod0-q z!|jyo{JGMUKRy;fdpJV$vx{s0O828F!2e1H7DMf-9K z`MKX_7(P~-?oS_Hw3oJkb~D-j_`S;(a&TN)VX_~;XHl~cq^mXN`V)r&s5%E4X#jE! zss7P}0aSF$z^y9NA3G30(O8YN>O_BZ|Ds*jfm*IM&mY~lJl1N{{DXG~Nb_YS*O29p z?e#udq|ic|T2qogvByWkQm{4V`0?F7R+oocLyA9gR{(X}Qpmxcw89Mk`k2uIJM1Zz2(1S zA&;)~Sitz!3jYo`V-2SQ9=Schsena=Ec4Uq4h5XtQV~oA&>~IzO3wtG*jzDY6o6|w z5-^T;_%0bU3b-|&2zct&04D;fe0S3a0*-F-iJ%I4_2&Uj+_Ib_dwqR!RmTC2jaDRv z!jS_b#!62EoETZIYztxSXId*f4DjS|#n4-nZDFFd`m+E>H!jy09(r|00Uq7pCoxqG zyV{cgkAce^uS8l%G=B4Fr3V3?=mt0lP*t)8yj7nAI5ku;29^_~MU%>v9s@WY`dROy zVwQ*lbIO&T0(fLy#qifdTh$?e$2t|QQ39>$48X&Hxse=CZ|Fwf_Hq0eNGn$^ZZW diff --git a/docs/data_schemas/index.md b/docs/data_schemas/index.md index 63eb24d..cbb1c0b 100644 --- a/docs/data_schemas/index.md +++ b/docs/data_schemas/index.md @@ -19,7 +19,7 @@ objects encountered in bioacoustic analysis. These include: Delving into the core of acoustic analysis, we have schemas for: -- [Recordings](audio_content.md#recordins): Complete audio files. +- [Recordings](audio_content.md#recordings): Complete audio files. - [Dataset](audio_content.md#datasets): A collection of recordings from a common source. diff --git a/docs/reference/arrays.md b/docs/reference/arrays.md new file mode 100644 index 0000000..10a9612 --- /dev/null +++ b/docs/reference/arrays.md @@ -0,0 +1,30 @@ +# Arrays Module + +???+ info "Additional dependencies" + + To use the `soundevent.arrays` module you need to install some additional + dependencies. Make sure you have them installed by running the following + command: + + ```bash + pip install soundevent[audio] + ``` + +::: soundevent.arrays + options: + members: false + + +::: soundevent.arrays.dimensions + options: + show_root_members_full_path: true + + +::: soundevent.arrays.attributes + options: + show_root_members_full_path: true + + +::: soundevent.arrays.operations + options: + show_root_members_full_path: true diff --git a/docs/user_guide/1_saving_and_loading.py b/docs/user_guide/1_saving_and_loading.py index 0a78ddb..dd7c921 100644 --- a/docs/user_guide/1_saving_and_loading.py +++ b/docs/user_guide/1_saving_and_loading.py @@ -1,4 +1,4 @@ -"""# Saving and Loading data +"""# Saving and Loading data. In `soundevent`, we use the **Acoustic Objects Exchange Format** (**AOEF**) for storing and exchanging audio objects. **AOEF** is a JSON-based format @@ -88,8 +88,8 @@ # ### Loading Annotation Projects # The [`load`][soundevent.io.load] # function can be used to load the annotations into Python and obtain an -# [`AnnotationProject`](../../data_schemas/annotation.md#annotation_projects) object -# directly. +# [`AnnotationProject`](../../data_schemas/annotation.md#annotation_project) +# object directly. nips4b_sample = io.load(annotation_path, type="annotation_set") print(repr(nips4b_sample)) @@ -130,8 +130,8 @@ # [`save`][soundevent.io.save] and # [`load`][soundevent.io.load] functions, respectively. The # loading function reads the **AOEF** file and returns a -# [`ModelRun`](../../data_schemas/prediction.md#model_run) object that can be used -# for further analysis. +# [`ModelRun`](../../data_schemas/prediction.md#model_runs) object that can be +# used for further analysis. # # By utilizing the saving and loading functions provided by soundevent, you can # easily manage and exchange acoustic data objects in AOEF format, promoting diff --git a/docs/user_guide/example_dataset.json b/docs/user_guide/example_dataset.json index a3459c1..f888ef6 100644 --- a/docs/user_guide/example_dataset.json +++ b/docs/user_guide/example_dataset.json @@ -1 +1 @@ -{"version":"1.1.0","created_on":"2023-11-27T19:53:00.688910","data":{"uuid":"b1096756-eea2-4489-9e6a-b98b559647bb","collection_type":"dataset","created_on":"2023-11-21T13:43:14.742002","recordings":[{"uuid":"89957d47-f67d-4bfe-8352-bf0fe5a8ce3e","path":"recording1.wav","duration":10.0,"channels":1,"samplerate":44100,"time_expansion":10.0,"hash":"1234567890abcdef","date":"2021-01-01","time":"21:34:56","latitude":12.345,"longitude":34.567,"tags":[0,1,2],"features":{"SNR":10.0,"ACI":0.5},"notes":[{"uuid":"2931b864-43e4-4fb1-aae1-a214dccca6e3","message":"This is a note.","created_by":"04ef3927-3a3d-40df-9d6e-2cc5e21482a0","is_issue":false,"created_on":"2023-11-21T13:43:14.742073"}],"owners":["d6eb0862-a619-4919-992c-eb3625692c13"]},{"uuid":"bd30f886-3abb-475b-aacb-c7148a4d4420","path":"recording2.wav","duration":8.0,"channels":1,"samplerate":441000,"time_expansion":10.0,"hash":"234567890abcdef1","date":"2021-01-02","time":"19:34:56","latitude":13.345,"longitude":32.567,"tags":[3,4,5],"features":{"SNR":7.0,"ACI":0.3},"notes":[{"uuid":"713b6c15-0e3d-4cc5-acc6-3f1093209a40","message":"Unsure about the species.","created_by":"04ef3927-3a3d-40df-9d6e-2cc5e21482a0","is_issue":false,"created_on":"2023-11-21T13:43:14.742147"}],"owners":["d6eb0862-a619-4919-992c-eb3625692c13"]}],"tags":[{"id":0,"key":"species","value":"Myotis myotis"},{"id":1,"key":"sex","value":"female"},{"id":2,"key":"behaviour","value":"foraging"},{"id":3,"key":"species","value":"Eptesicus serotinus"},{"id":4,"key":"sex","value":"male"},{"id":5,"key":"behaviour","value":"social calls"}],"users":[{"uuid":"04ef3927-3a3d-40df-9d6e-2cc5e21482a0","name":"John Doe"},{"uuid":"d6eb0862-a619-4919-992c-eb3625692c13","email":"data.collector@soundevent.org","name":"Data Collector"}],"name":"test_dataset","description":"A test dataset"}} \ No newline at end of file +{"version":"1.1.0","created_on":"2024-05-05T17:07:27.856998","data":{"uuid":"b1096756-eea2-4489-9e6a-b98b559647bb","collection_type":"dataset","created_on":"2023-11-21T13:43:14.742002","recordings":[{"uuid":"89957d47-f67d-4bfe-8352-bf0fe5a8ce3e","path":"recording1.wav","duration":10.0,"channels":1,"samplerate":44100,"time_expansion":10.0,"hash":"1234567890abcdef","date":"2021-01-01","time":"21:34:56","latitude":12.345,"longitude":34.567,"tags":[0,1,2],"features":{"SNR":10.0,"ACI":0.5},"notes":[{"uuid":"2931b864-43e4-4fb1-aae1-a214dccca6e3","message":"This is a note.","created_by":"04ef3927-3a3d-40df-9d6e-2cc5e21482a0","is_issue":false,"created_on":"2023-11-21T13:43:14.742073"}],"owners":["d6eb0862-a619-4919-992c-eb3625692c13"]},{"uuid":"bd30f886-3abb-475b-aacb-c7148a4d4420","path":"recording2.wav","duration":8.0,"channels":1,"samplerate":441000,"time_expansion":10.0,"hash":"234567890abcdef1","date":"2021-01-02","time":"19:34:56","latitude":13.345,"longitude":32.567,"tags":[3,4,5],"features":{"SNR":7.0,"ACI":0.3},"notes":[{"uuid":"713b6c15-0e3d-4cc5-acc6-3f1093209a40","message":"Unsure about the species.","created_by":"04ef3927-3a3d-40df-9d6e-2cc5e21482a0","is_issue":false,"created_on":"2023-11-21T13:43:14.742147"}],"owners":["d6eb0862-a619-4919-992c-eb3625692c13"]}],"tags":[{"id":0,"key":"species","value":"Myotis myotis"},{"id":1,"key":"sex","value":"female"},{"id":2,"key":"behaviour","value":"foraging"},{"id":3,"key":"species","value":"Eptesicus serotinus"},{"id":4,"key":"sex","value":"male"},{"id":5,"key":"behaviour","value":"social calls"}],"users":[{"uuid":"04ef3927-3a3d-40df-9d6e-2cc5e21482a0","name":"John Doe"},{"uuid":"d6eb0862-a619-4919-992c-eb3625692c13","email":"data.collector@soundevent.org","name":"Data Collector"}],"name":"test_dataset","description":"A test dataset"}} \ No newline at end of file diff --git a/docs/user_guide/nips4b_plus_sample.json b/docs/user_guide/nips4b_plus_sample.json index 35f97d3..1a2a471 100644 --- a/docs/user_guide/nips4b_plus_sample.json +++ b/docs/user_guide/nips4b_plus_sample.json @@ -1 +1 @@ -{"version":"1.1.0","created_on":"2023-11-27T19:53:01.364978","data":{"uuid":"c18624a1-8145-4657-a3f1-b3512134ecf6","collection_type":"annotation_set","recordings":[{"uuid":"8392b0ff-293f-4d5b-bc1b-d40d2a0eb0dc","path":"train/nips4b_birds_trainfile079.wav","duration":5.00390022675737,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"35e120d0-1633-4864-88c9-063aca992747","path":"train/nips4b_birds_trainfile237.wav","duration":5.00390022675737,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"7ee23c44-1bc6-4833-8d66-14aa4a8e8634","path":"train/nips4b_birds_trainfile587.wav","duration":5.00390022675737,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"24eceb91-535e-42f0-80e4-8c670465bac8","path":"train/nips4b_birds_trainfile106.wav","duration":4.069297052154195,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"24dd23da-ca07-40eb-80e6-af8b6b2a75ee","path":"train/nips4b_birds_trainfile430.wav","duration":5.00390022675737,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"8954beed-21d0-4f20-98bd-58c14264d853","path":"train/nips4b_birds_trainfile661.wav","duration":1.3873922902494331,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"69f7bf42-087a-4d88-b312-f26688597974","path":"train/nips4b_birds_trainfile429.wav","duration":5.00390022675737,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"b98b5a59-3518-45e5-9306-67c522540ae1","path":"train/nips4b_birds_trainfile633.wav","duration":5.00390022675737,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"52e329a3-afbb-475e-938c-f70c88580723","path":"train/nips4b_birds_trainfile200.wav","duration":5.00390022675737,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"6eeca672-f3ec-41ca-a551-54ddec0dd1a7","path":"train/nips4b_birds_trainfile545.wav","duration":2.7921995464852607,"channels":1,"samplerate":44100,"owners":[]}],"clips":[{"uuid":"b283a71f-4aa8-4ee2-ac83-7c7d5d0af2bc","recording":"8392b0ff-293f-4d5b-bc1b-d40d2a0eb0dc","start_time":0.0,"end_time":5.00390022675737},{"uuid":"e2ec7571-bfe1-4682-8d4d-15e3e34edfc9","recording":"35e120d0-1633-4864-88c9-063aca992747","start_time":0.0,"end_time":5.00390022675737},{"uuid":"ab5bcc8f-078e-4194-97a4-763998d289fa","recording":"7ee23c44-1bc6-4833-8d66-14aa4a8e8634","start_time":0.0,"end_time":5.00390022675737},{"uuid":"dd9ad126-8b15-4121-84f5-f59dc3f85802","recording":"24eceb91-535e-42f0-80e4-8c670465bac8","start_time":0.0,"end_time":4.069297052154195},{"uuid":"0a53e14f-f51c-4ade-9567-87c2b02bd197","recording":"24dd23da-ca07-40eb-80e6-af8b6b2a75ee","start_time":0.0,"end_time":5.00390022675737},{"uuid":"ae455d97-d1fa-43a1-b179-907bee8471e6","recording":"8954beed-21d0-4f20-98bd-58c14264d853","start_time":0.0,"end_time":1.3873922902494331},{"uuid":"0b42c9e1-0289-4f03-b3cb-10859a80103e","recording":"69f7bf42-087a-4d88-b312-f26688597974","start_time":0.0,"end_time":5.00390022675737},{"uuid":"56329c3c-5beb-48ed-8a79-be4e9bd2d9ed","recording":"b98b5a59-3518-45e5-9306-67c522540ae1","start_time":0.0,"end_time":5.00390022675737},{"uuid":"57e0b23a-8663-49f3-8455-997e0f4d2b49","recording":"52e329a3-afbb-475e-938c-f70c88580723","start_time":0.0,"end_time":5.00390022675737},{"uuid":"c7b6f927-fd3b-40ac-9b25-d682eec2d3ac","recording":"6eeca672-f3ec-41ca-a551-54ddec0dd1a7","start_time":0.0,"end_time":2.7921995464852607}],"clip_annotations":[{"uuid":"0e7a786e-48e4-4424-a8da-bf080bdefd9e","clip":"b283a71f-4aa8-4ee2-ac83-7c7d5d0af2bc","created_on":"2023-11-23T20:44:32.913233"},{"uuid":"af9713a7-2bf4-45df-bdff-63d47de34d71","clip":"e2ec7571-bfe1-4682-8d4d-15e3e34edfc9","created_on":"2023-11-23T20:44:32.913255"},{"uuid":"70738dff-3c98-4838-979c-0ed073edd0ea","clip":"ab5bcc8f-078e-4194-97a4-763998d289fa","created_on":"2023-11-23T20:44:32.913264"},{"uuid":"fa0fc36e-0c1a-450f-900b-5a32df17a159","clip":"dd9ad126-8b15-4121-84f5-f59dc3f85802","created_on":"2023-11-23T20:44:32.913377"},{"uuid":"178bf156-2bab-4970-8a76-4abfdc4b31b7","clip":"0a53e14f-f51c-4ade-9567-87c2b02bd197","created_on":"2023-11-23T20:44:32.913386"},{"uuid":"dc71d06a-54d6-4fad-ac6a-205bbee7ec96","clip":"ae455d97-d1fa-43a1-b179-907bee8471e6","created_on":"2023-11-23T20:44:32.913392"},{"uuid":"b418abd6-52cc-4ef4-9725-33d3d38b7878","clip":"0b42c9e1-0289-4f03-b3cb-10859a80103e","created_on":"2023-11-23T20:44:32.913400"},{"uuid":"d18ca56f-2f13-44dd-be43-f995e4d2edb6","clip":"56329c3c-5beb-48ed-8a79-be4e9bd2d9ed","created_on":"2023-11-23T20:44:32.913415"},{"uuid":"8dfa7d26-5f8b-4f37-bf0a-c031373b22f7","clip":"57e0b23a-8663-49f3-8455-997e0f4d2b49","created_on":"2023-11-23T20:44:32.913430"},{"uuid":"ebc5e8d0-bd3e-4d01-95e0-d9a0ce337ac9","clip":"c7b6f927-fd3b-40ac-9b25-d682eec2d3ac","created_on":"2023-11-23T20:44:32.913437"}],"created_on":"2023-11-23T20:44:32.913488"}} \ No newline at end of file +{"version":"1.1.0","created_on":"2024-05-05T17:07:28.277844","data":{"uuid":"c18624a1-8145-4657-a3f1-b3512134ecf6","collection_type":"annotation_set","recordings":[{"uuid":"8392b0ff-293f-4d5b-bc1b-d40d2a0eb0dc","path":"train/nips4b_birds_trainfile079.wav","duration":5.00390022675737,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"35e120d0-1633-4864-88c9-063aca992747","path":"train/nips4b_birds_trainfile237.wav","duration":5.00390022675737,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"7ee23c44-1bc6-4833-8d66-14aa4a8e8634","path":"train/nips4b_birds_trainfile587.wav","duration":5.00390022675737,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"24eceb91-535e-42f0-80e4-8c670465bac8","path":"train/nips4b_birds_trainfile106.wav","duration":4.069297052154195,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"24dd23da-ca07-40eb-80e6-af8b6b2a75ee","path":"train/nips4b_birds_trainfile430.wav","duration":5.00390022675737,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"8954beed-21d0-4f20-98bd-58c14264d853","path":"train/nips4b_birds_trainfile661.wav","duration":1.3873922902494331,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"69f7bf42-087a-4d88-b312-f26688597974","path":"train/nips4b_birds_trainfile429.wav","duration":5.00390022675737,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"b98b5a59-3518-45e5-9306-67c522540ae1","path":"train/nips4b_birds_trainfile633.wav","duration":5.00390022675737,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"52e329a3-afbb-475e-938c-f70c88580723","path":"train/nips4b_birds_trainfile200.wav","duration":5.00390022675737,"channels":1,"samplerate":44100,"owners":[]},{"uuid":"6eeca672-f3ec-41ca-a551-54ddec0dd1a7","path":"train/nips4b_birds_trainfile545.wav","duration":2.7921995464852607,"channels":1,"samplerate":44100,"owners":[]}],"clips":[{"uuid":"b283a71f-4aa8-4ee2-ac83-7c7d5d0af2bc","recording":"8392b0ff-293f-4d5b-bc1b-d40d2a0eb0dc","start_time":0.0,"end_time":5.00390022675737},{"uuid":"e2ec7571-bfe1-4682-8d4d-15e3e34edfc9","recording":"35e120d0-1633-4864-88c9-063aca992747","start_time":0.0,"end_time":5.00390022675737},{"uuid":"ab5bcc8f-078e-4194-97a4-763998d289fa","recording":"7ee23c44-1bc6-4833-8d66-14aa4a8e8634","start_time":0.0,"end_time":5.00390022675737},{"uuid":"dd9ad126-8b15-4121-84f5-f59dc3f85802","recording":"24eceb91-535e-42f0-80e4-8c670465bac8","start_time":0.0,"end_time":4.069297052154195},{"uuid":"0a53e14f-f51c-4ade-9567-87c2b02bd197","recording":"24dd23da-ca07-40eb-80e6-af8b6b2a75ee","start_time":0.0,"end_time":5.00390022675737},{"uuid":"ae455d97-d1fa-43a1-b179-907bee8471e6","recording":"8954beed-21d0-4f20-98bd-58c14264d853","start_time":0.0,"end_time":1.3873922902494331},{"uuid":"0b42c9e1-0289-4f03-b3cb-10859a80103e","recording":"69f7bf42-087a-4d88-b312-f26688597974","start_time":0.0,"end_time":5.00390022675737},{"uuid":"56329c3c-5beb-48ed-8a79-be4e9bd2d9ed","recording":"b98b5a59-3518-45e5-9306-67c522540ae1","start_time":0.0,"end_time":5.00390022675737},{"uuid":"57e0b23a-8663-49f3-8455-997e0f4d2b49","recording":"52e329a3-afbb-475e-938c-f70c88580723","start_time":0.0,"end_time":5.00390022675737},{"uuid":"c7b6f927-fd3b-40ac-9b25-d682eec2d3ac","recording":"6eeca672-f3ec-41ca-a551-54ddec0dd1a7","start_time":0.0,"end_time":2.7921995464852607}],"clip_annotations":[{"uuid":"0e7a786e-48e4-4424-a8da-bf080bdefd9e","clip":"b283a71f-4aa8-4ee2-ac83-7c7d5d0af2bc","created_on":"2023-11-23T20:44:32.913233"},{"uuid":"af9713a7-2bf4-45df-bdff-63d47de34d71","clip":"e2ec7571-bfe1-4682-8d4d-15e3e34edfc9","created_on":"2023-11-23T20:44:32.913255"},{"uuid":"70738dff-3c98-4838-979c-0ed073edd0ea","clip":"ab5bcc8f-078e-4194-97a4-763998d289fa","created_on":"2023-11-23T20:44:32.913264"},{"uuid":"fa0fc36e-0c1a-450f-900b-5a32df17a159","clip":"dd9ad126-8b15-4121-84f5-f59dc3f85802","created_on":"2023-11-23T20:44:32.913377"},{"uuid":"178bf156-2bab-4970-8a76-4abfdc4b31b7","clip":"0a53e14f-f51c-4ade-9567-87c2b02bd197","created_on":"2023-11-23T20:44:32.913386"},{"uuid":"dc71d06a-54d6-4fad-ac6a-205bbee7ec96","clip":"ae455d97-d1fa-43a1-b179-907bee8471e6","created_on":"2023-11-23T20:44:32.913392"},{"uuid":"b418abd6-52cc-4ef4-9725-33d3d38b7878","clip":"0b42c9e1-0289-4f03-b3cb-10859a80103e","created_on":"2023-11-23T20:44:32.913400"},{"uuid":"d18ca56f-2f13-44dd-be43-f995e4d2edb6","clip":"56329c3c-5beb-48ed-8a79-be4e9bd2d9ed","created_on":"2023-11-23T20:44:32.913415"},{"uuid":"8dfa7d26-5f8b-4f37-bf0a-c031373b22f7","clip":"57e0b23a-8663-49f3-8455-997e0f4d2b49","created_on":"2023-11-23T20:44:32.913430"},{"uuid":"ebc5e8d0-bd3e-4d01-95e0-d9a0ce337ac9","clip":"c7b6f927-fd3b-40ac-9b25-d682eec2d3ac","created_on":"2023-11-23T20:44:32.913437"}],"created_on":"2023-11-23T20:44:32.913488"}} \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 577a141..29e0c79 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -15,6 +15,7 @@ nav: - Reference: - data: reference/data.md - io: reference/io.md + - arrays: reference/arrays.md - audio: reference/audio.md - geometry: reference/geometry.md - evaluation: reference/evaluation.md @@ -100,6 +101,9 @@ markdown_extensions: - pymdownx.critic - pymdownx.keys - pymdownx.details + - pymdownx.emoji: + emoji_index: !!python/name:material.extensions.emoji.twemoji + emoji_generator: !!python/name:material.extensions.emoji.to_svg - pymdownx.superfences: preserve_tabs: true custom_fences: diff --git a/src/soundevent/arrays/__init__.py b/src/soundevent/arrays/__init__.py new file mode 100644 index 0000000..61d15e6 --- /dev/null +++ b/src/soundevent/arrays/__init__.py @@ -0,0 +1,50 @@ +"""Module for manipulation of xarray.DataArray objects. + +This module provides functions for manipulating xarray.DataArray objects, +including creating range dimensions, setting dimension attributes, cropping and +extending axes, getting dimension ranges and widths, and setting values at +specific positions. +""" + +from soundevent.arrays.attributes import ArrayAttrs, DimAttrs +from soundevent.arrays.dimensions import ( + Dimensions, + create_frequency_dim_from_array, + create_frequency_range, + create_range_dim, + create_time_dim_from_array, + create_time_range, + estimate_dim_step, + get_dim_range, + get_dim_step, + get_dim_width, + set_dim_attrs, +) +from soundevent.arrays.operations import ( + crop_dim, + extend_dim, + normalize, + set_value_at_pos, + to_db, +) + +__all__ = [ + "ArrayAttrs", + "DimAttrs", + "Dimensions", + "create_frequency_dim_from_array", + "create_frequency_range", + "create_range_dim", + "create_time_dim_from_array", + "create_time_range", + "crop_dim", + "estimate_dim_step", + "extend_dim", + "get_dim_range", + "get_dim_step", + "get_dim_width", + "set_dim_attrs", + "set_value_at_pos", + "normalize", + "to_db", +] diff --git a/src/soundevent/arrays/attributes.py b/src/soundevent/arrays/attributes.py new file mode 100644 index 0000000..e453546 --- /dev/null +++ b/src/soundevent/arrays/attributes.py @@ -0,0 +1,109 @@ +"""Standard attributes for acoustic data arrays. + +This module provides enumerations for commonly used attributes to describe +dimensions and arrays of numerical data in computational acoustics tasks. +These attributes are based on a subset of the Climate and Forecast (CF) +conventions [documentation](https://cfconventions.org/), a widely used +standard for describing scientific data. + +The module includes enums for: + +* Dimension attributes +* Range attributes +* Array attributes + +By using these standard attributes, you can ensure interoperability and +consistency in your acoustic data representation. + +For a complete list of CF conventions attributes, refer to the +[Attribute Conventions](https://cfconventions.org/Data/cf-conventions/cf-conventions-1.11/cf-conventions.html#attribute-appendix). +""" + +from enum import Enum + +__all__ = [ + "DimAttrs", + "ArrayAttrs", +] + + +class DimAttrs(str, Enum): + """Standard attribute names for acoustic data dimensions. + + This enumeration defines standard attribute names used to describe + dimensions of acoustic data arrays. These attributes follow the CF + conventions and provide a consistent way to represent information about + dimensions, such as units, standard names, and long names. + """ + + units = "units" + """ Attribute name for the units of a dimension. + + This specifies the physical quantity represented by the dimension, such as + 'seconds' for time or 'meters' for distance. It follows the UDUNITS + standard for unit symbols. + """ + + standard_name = "standard_name" + """Attribute name for the standard name of a dimension. + + As defined by the CF conventions. This provides a consistent way to + identify dimensions across different datasets. + """ + + long_name = "long_name" + """Attribute name for a human-readable description of the dimension. + + This can be more detailed than the standard name and provide additional + context for users. + """ + + step = "step" + """Attribute name for the step size of a range dimension. + + Specifies the distance between consecutive values in the range, which might + not be explicitly stored as a coordinate value. If not present, the + dimension is assumed to be irregularly spaced. Not a standard CF attribute. + """ + + +class ArrayAttrs(str, Enum): + """Standard attribute names for acoustic data arrays. + + This enumeration defines standard attribute names used to describe + properties of acoustic data arrays. These attributes follow the CF + conventions and provide a consistent way to represent information about the + data. + """ + + units = "units" + """Attribute name for the units of an array variable. + + This specifies the physical quantity represented by the array, such as + 'dB' for sound pressure level or 'meters' for distance. It follows the + UDUNITS standard for unit symbols. + """ + + standard_name = "standard_name" + """Attribute name for the standard name of an array variable. + + As defined by the CF conventions. This provides a consistent way to + identify arrays across different datasets. + """ + + long_name = "long_name" + """Attribute name for a human-readable description of the array variable. + + This can be more detailed than the standard name and provide additional + context for users. + """ + + comment = "comment" + """Attribute name for any additional comments or explanations about the + array variable. + """ + + references = "references" + """Attribute name for references to external documentation or resources + that provide more information about the array variable. + """ diff --git a/src/soundevent/arrays/dimensions.py b/src/soundevent/arrays/dimensions.py new file mode 100644 index 0000000..203067e --- /dev/null +++ b/src/soundevent/arrays/dimensions.py @@ -0,0 +1,609 @@ +"""Creating and manipulating DataArray dimensions in computational acoustics. + +This module provides functions to: + +* **Define standard dimensions:** Quickly create common dimensions in +computational acoustics like 'time', 'frequency', 'channel', 'category', and +'feature' using the `Dimensions` enumeration. + +* **Build flexible data structures:** Construct range-based dimensions +(e.g., for time or frequency) with desired start, stop, and step values +using `create_range_dim`. + +* **Work with time series:** Generate time dimensions from arrays or given +parameters with `create_time_range` and `create_time_dim_from_array`. + +* **Handle frequency representations:** Create frequency dimensions from +arrays or specified ranges with `create_frequency_range` and +`create_frequency_dim_from_array`. + +* **Modify and extract metadata:** Set dimension attributes +(`set_dim_attrs`), retrieve dimension ranges (`get_dim_range`), calculate +dimension width (`get_dim_width`), and estimate dimension step size +(`get_dim_step`). + +""" + +from enum import Enum +from typing import Optional, Tuple + +import numpy as np +import xarray as xr +from numpy.typing import DTypeLike + +from soundevent.arrays.attributes import DimAttrs + +__all__ = [ + "Dimensions", + "create_frequency_dim_from_array", + "create_frequency_range", + "create_range_dim", + "create_time_dim_from_array", + "create_time_range", + "estimate_dim_step", + "get_dim_range", + "get_dim_step", + "get_dim_width", + "set_dim_attrs", +] + +TIME_UNITS = "s" +TIME_STANDARD_NAME = "time" +TIME_LONG_NAME = "Time since start of recording" + +FREQUENCY_UNITS = "Hz" +FREQUENCY_STANDARD_NAME = "frequency" +FREQUENCY_LONG_NAME = "Frequency" + + +class Dimensions(str, Enum): + """Defines standard dimension names for computational acoustics arrays. + + This enumeration provides convenient and descriptive names for dimensions + essential to representing acoustic data + + Notes + ----- + Use these dimension names to ensure consistency and clarity in your code. + """ + + time = "time" + """Name for the time dimension of an array. + + This dimension represents time in seconds and should monotonically increase + from the start to the end of the array. While generally regularly spaced, + it may contain missing values or irregular spacing in special cases. + """ + + frequency = "frequency" + """Name for the frequency dimension of an array. + + This dimension represents frequency in Hz and should monotonically increase + from the start to the end of the array. Generally regularly spaced, it may + contain irregular spacing, such as with a logarithmic frequency scale or + custom frequency bins. + """ + + channel = "channel" + """Name for the channel dimension of an array. + + This dimension represents the channel number of a multi-channel array, + typically used in multi-channel audio recordings or spectrograms. Each + channel corresponds to a distinct audio source or microphone in the + recording. + """ + + category = "category" + """Name for the category dimension of an array. + + This dimension represents a categorical variable or label for each element + in the array. If the original data is not categorical, it's converted + to categorical data. Each value should be a string or integer label + corresponding to a category or class. + """ + + feature = "feature" + """Name for the feature dimension of an array. + + This dimension represents a feature or numerical descriptor of the data. + It's not limited to feature extraction results but can also include + hand-measured or derived features. If an array contains multiple features, + each feature should be stored along this dimension, with the name of + the feature stored as a coordinate variable. If the array has time and + frequency dimensions, the feature dimension then represents the feature + values at each time-frequency point. + """ + + +def create_range_dim( + name: str, + start: float, + stop: float, + step: Optional[float] = None, + size: Optional[int] = None, + dtype: DTypeLike = np.float64, + **attrs, +) -> xr.Variable: + """Create a range dimension. + + Most coordinates used in computational bioacoustics are regularly spaced + ranges. This function creates a range dimension with a specified start, + stop, and step size. It stores the start, end, and step values as attributes + on the coordinate. + + Parameters + ---------- + name : str + The name of the range dimension. + start : float + The start value of the range. + stop : float + The stop value of the range. + step : float + The step size between values in the range. + dtype : numpy.dtype or str, optional + The data type of the values in the range. + Defaults to np.float32. + **attrs + Additional attributes to store on the range dimension. + + Returns + ------- + xarray.Variable + A variable representing the range dimension. + + Notes + ----- + - The range is created using np.arange(start, stop, step, dtype). + - The variable has attributes 'start', 'end', and 'step' representing the + range parameters. + """ + if step is None: + if size is None: + raise ValueError("Either step or size must be provided.") + + step = (stop - start) / size + + coords = np.arange( + start=start, + stop=stop, + step=step, + dtype=dtype, + ) + + # NOTE: Remove the last element if it is greater than or equal to the stop + # value. This is necessary because np.arange includes the stop value if + # it is an exact multiple of the step size, but we want to exclude it. + if coords[-1] >= stop - step / 2: + coords = coords[:-1] + + return xr.Variable( + dims=name, + data=coords, + attrs={ + DimAttrs.step.value: step, + **attrs, + }, + ) + + +def create_time_range( + start_time: float, + end_time: float, + step: Optional[float] = None, + samplerate: Optional[float] = None, + name: str = Dimensions.time.value, + dtype: DTypeLike = np.float64, + **attrs, +) -> xr.Variable: + """Generate an xarray Variable representing a time range dimension. + + Creates a time range with specified start (in seconds), end (in seconds), + and the desired time step between values. + + Parameters + ---------- + start_time + Start of the time range (in seconds). + end_time + End of the time range (in seconds). + step + Step size between time values (in seconds). If not provided, + calculated as 1 / samplerate. + samplerate + Sampling rate (in Hz). Used to calculate step if step is not given. + If both step and samplerate are provided, step takes precedence. + name + Name of the time dimension. Defaults to 'time'. + dtype: NumPy-like dtype + Data type of the time values. Defaults to np.float64. + **attrs + Additional attributes for the xarray Variable. + + Returns + ------- + xarray.Variable + Variable containing the time range values. + """ + if step is None: + if samplerate is None: + raise ValueError("Either step or samplerate must be provided.") + + step = 1.0 / samplerate + + return create_range_dim( + name=name, + start=start_time, + stop=end_time, + step=step, + dtype=dtype, + **{ + DimAttrs.units.value: TIME_UNITS, + DimAttrs.standard_name.value: TIME_STANDARD_NAME, + DimAttrs.long_name.value: TIME_LONG_NAME, + **attrs, + }, + ) + + +def create_frequency_range( + low_freq: float, + high_freq: float, + step: float, + name: str = Dimensions.frequency.value, + dtype: DTypeLike = np.float64, + **attrs, +) -> xr.Variable: + """Generate an xarray Variable representing a frequency range dimension. + + Creates a frequency range with a specified start (in Hz), end (in Hz), + and step size (in Hz). + + Parameters + ---------- + low_freq: float + Start of the frequency range (in Hz). + high_freq: float + End of the frequency range (in Hz). + step: float + Step size between frequency values (in Hz). + name: str + Name of the frequency dimension. Defaults to 'frequency'. + dtype: NumPy-like dtype + Data type of the frequency values. Defaults to np.float64. + **attrs + Additional attributes for the xarray Variable. + + Returns + ------- + xarray.Variable + Variable containing the frequency range values. + """ + return create_range_dim( + name=name, + start=low_freq, + stop=high_freq, + step=step, + dtype=dtype, + **{ + DimAttrs.units.value: FREQUENCY_UNITS, + DimAttrs.standard_name.value: FREQUENCY_STANDARD_NAME, + DimAttrs.long_name.value: FREQUENCY_LONG_NAME, + **attrs, + }, + ) + + +def set_dim_attrs( + array: xr.DataArray, + dim: str, + **attrs, +) -> xr.DataArray: + """Set the range of a dimension in a data array. + + Use this function to set the precise start and end values of a dimension + in a data array. This is useful when the coordinates represent the start + of a range, but you want to specify the end of the range as well. + + The start and end values are stored as attributes on the coordinates. + """ + coords = array.coords[dim] + coords.attrs.update(attrs) + return array + + +def get_dim_range( + array: xr.DataArray, + dim: str, +) -> Tuple[float, float]: + """Get the range of a dimension in a data array. + + Parameters + ---------- + array : xarray.DataArray + The data array from which to extract the dimension range. + dim : str + The name of the dimension. + + Returns + ------- + Tuple[Optional[float], Optional[float]] + A tuple containing the start and end values of the dimension range. + + Raises + ------ + KeyError + If the dimension is not found in the data array. + """ + index = array.indexes[dim] + return index.min(), index.max() + + +def get_dim_width(arr: xr.DataArray, dim: str) -> float: + """Get the width of a dimension in a data array. + + Parameters + ---------- + arr + The data array containing the dimension. + dim + The name of the dimension. + + Returns + ------- + float + The width of the dimension. + + Raises + ------ + KeyError + If the dimension is not found in the data array. + """ + start, end = get_dim_range(arr, dim) + return float(end - start) + + +def estimate_dim_step( + data: np.ndarray, + rtol: float = 1.0e-5, + atol: float = 1.0e-8, + check_tolerance: bool = True, +) -> float: + """Estimate the step size of a numerical array. + + Parameters + ---------- + data + The numerical array. + rtol + The relative tolerance used when checking if all values are within a + specified range of the mean step size. Defaults to 1e-5. + atol + The absolute tolerance used when checking if all values are within a + specified range of the mean step size. Defaults to 1e-8. + check_tolerance + A flag indicating whether to perform a tolerance check on the differences + between consecutive values. If True (default), raises a ValueError if + the differences exceed the specified tolerances. + + Returns + ------- + float + The estimated step size of the array. + + Raises + ------ + ValueError + If `check_tolerance` is True and the differences between consecutive + values exceed the specified tolerances (indicating an irregular step size). + + Notes + ----- + This function calculates the mean of the differences between consecutive + values in the array. If `check_tolerance` is True, it verifies if all + differences are within a specified tolerance (defined by `rtol` and `atol`) + of the calculated mean step size. If not, it raises a `ValueError` indicating + an irregular step size. + + This function assumes the array values are numerical and equidistant + (constant step size) unless the tolerance check fails. + """ + steps = np.diff(data) + mean_step = steps.mean() + + if ( + check_tolerance + and not np.isclose( + steps, + mean_step, + rtol=rtol, + atol=atol, + ).all() + ): + raise ValueError("Array values do not have a consistent step size.") + + return mean_step + + +def get_dim_step( + arr: xr.DataArray, + dim: str, + rtol: float = 1.0e-5, + atol: float = 1.0e-8, + check_tolerance: bool = True, + estimate_step: bool = True, +) -> float: + """Calculate the step size between values along a dimension in a DataArray. + + Parameters + ---------- + arr : xr.DataArray + The input DataArray. + dim : str + The name of the dimension for which to calculate the step size. + rtol : float, optional + The relative tolerance used when checking if all coordinate differences + are within a specified range of the mean step size. Defaults to 1e-5. + atol : float, optional + The absolute tolerance used when checking if all coordinate differences + are within a specified range of the mean step size. Defaults to 1e-8. + check_tolerance : bool, optional + A flag indicating whether to perform a tolerance check on the coordinate + differences. If True (default), raises a ValueError if the differences + exceed the specified tolerances. + estimate_step : bool, optional + A flag indicating whether to estimate the step size if not present in + the dimension attributes. If True (default), calculates the mean step + size from the coordinate values. Otherwise, raises a ValueError if the + step size is not found in the dimension attributes. + + Returns + ------- + float + The calculated step size (spacing) between consecutive values along the + specified dimension. + + Raises + ------ + ValueError + If `check_tolerance` is True and the coordinate differences exceed + the specified tolerances (indicating an irregular step size). + + Notes + ----- + This function first attempts to retrieve the step size from the dimension's + attributes using the standard attribute name `'step'` defined in the + `RangeAttrs` enumeration. If the attribute is not present, it calculates + the step size by taking the mean of the differences between consecutive + coordinate values. + + If `check_tolerance` is True, the function verifies if all coordinate + differences are within a specified tolerance (defined by `rtol` and `atol`) + of the calculated mean step size. If not, it raises a `ValueError` + indicating an irregular step size. + + This function assumes the DataArray coordinates are numerical and + equidistant (constant step size) unless a valid step size attribute + is present or the tolerance check fails. + """ + coord = arr.coords[dim] + attrs = coord.attrs + + if DimAttrs.step.value in attrs: + return attrs[DimAttrs.step.value] + + if not estimate_step: + raise ValueError( + f"Step size not found in the '{dim}' dimension attributes." + ) + + return estimate_dim_step( + coord.data, + rtol=rtol, + atol=atol, + check_tolerance=check_tolerance, + ) + + +def create_time_dim_from_array( + coods: np.ndarray, + name: str = Dimensions.time.value, + dtype: Optional[DTypeLike] = None, + step: Optional[float] = None, + samplerate: Optional[float] = None, + estimate_step: bool = False, + **kwargs, +) -> xr.Variable: + """Create a time dimension from an array of time values. + + Parameters + ---------- + coods + The time values. + name + The name of the time dimension. + dtype + The data type of the time values. If None, the data type is inferred + from the input array. + **kwargs + Additional attributes to store on the time dimension. + + Returns + ------- + xarray.Variable + The time dimension variable. + """ + if dtype is None: + dtype = coods.dtype + + if samplerate is not None: + step = 1 / samplerate + + if estimate_step and step is None: + step = estimate_dim_step(coods) + + attrs = { + DimAttrs.units.value: TIME_UNITS, + DimAttrs.standard_name.value: TIME_STANDARD_NAME, + DimAttrs.long_name.value: TIME_LONG_NAME, + **kwargs, + } + + if step is not None: + attrs[DimAttrs.step.value] = step + + return xr.Variable( + dims=name, + data=coods, + attrs=attrs, + ) + + +def create_frequency_dim_from_array( + coods: np.ndarray, + name: str = Dimensions.frequency.value, + step: Optional[float] = None, + estimate_step: bool = False, + dtype: Optional[DTypeLike] = None, + **kwargs, +) -> xr.Variable: + """Create a frequency dimension from an array of frequency values. + + Parameters + ---------- + coods + The frequency values. + name + The name of the frequency dimension. + dtype + The data type of the frequency values. If None, the data type is inferred + from the input array. + **kwargs + Additional attributes to store on the frequency dimension. + + Returns + ------- + xarray.Variable + The frequency dimension variable. + """ + if dtype is None: + dtype = coods.dtype + + if estimate_step and step is None: + step = estimate_dim_step(coods) + + attrs = { + DimAttrs.units.value: FREQUENCY_UNITS, + DimAttrs.standard_name.value: FREQUENCY_STANDARD_NAME, + DimAttrs.long_name.value: FREQUENCY_LONG_NAME, + **kwargs, + } + + if step is not None: + attrs[DimAttrs.step.value] = step + + return xr.Variable( + dims=name, + data=coods, + attrs=attrs, + ) diff --git a/src/soundevent/arrays/operations.py b/src/soundevent/arrays/operations.py new file mode 100644 index 0000000..f01ac97 --- /dev/null +++ b/src/soundevent/arrays/operations.py @@ -0,0 +1,577 @@ +"""Module for manipulation of xarray.DataArray objects.""" + +from typing import Any, Callable, List, Optional, Union + +import numpy as np +import xarray as xr +from numpy.typing import DTypeLike +from xarray.core.types import InterpOptions + +from soundevent.arrays.dimensions import ( + create_range_dim, + get_dim_range, + get_dim_step, +) + +__all__ = [ + "center", + "to_db", + "crop_dim", + "extend_dim", + "normalize", + "offset", + "scale", + "set_value_at_pos", +] + + +def crop_dim( + arr: xr.DataArray, + dim: str, + start: Optional[float] = None, + stop: Optional[float] = None, + right_closed: bool = False, + left_closed: bool = True, + eps: float = 10e-6, +) -> xr.DataArray: + """Crop a dimension of a data array to a specified range. + + Parameters + ---------- + arr + The input data array to crop. + dim + The name of the dimension to crop. + start + The start value of the cropped range. If None, the current start value + of the axis is used. Defaults to None. + stop + The stop value of the cropped range. If None, the current stop value of + the axis is used. Defaults to None. + right_closed + Whether the right boundary of the cropped range is closed. + Defaults to False. + left_closed + Whether the left boundary of the cropped range is closed. + Defaults to True. + eps + A small value added to start and subtracted from stop to ensure open + intervals. Defaults to 10e-6. + + Returns + ------- + xarray.DataArray + The cropped data array. + + Raises + ------ + ValueError + If the coordinate for the specified dimension does not have 'start' and + 'stop' attributes, or if the specified range is outside the current + axis range. + + Notes + ----- + The function crops the specified dimension of the data array to the range + [start, stop). + The `right_closed` and `left_closed` parameters control whether the + boundaries of the cropped range are closed or open. + A small value `eps` is added to start and subtracted from stop to ensure + open intervals if `right_closed` or `left_closed` is False. + The 'start' and 'stop' attributes of the cropped dimension coordinate are + updated accordingly. + """ + current_start, current_stop = get_dim_range(arr, dim) + + if start is None: + left_closed = True + start = current_start + + if stop is None: + right_closed = True + stop = current_stop + + if start > stop: + raise ValueError( + f"Start value {start} must be less than stop value {stop}" + ) + + if start < current_start or stop > current_stop: + raise ValueError( + f"Cannot select axis '{dim}' from {start} to {stop}. " + f"Axis range is {current_start} to {current_stop}" + ) + + slice_end = stop + if not right_closed: + slice_end = stop - eps + + slice_start = start + if not left_closed: + slice_start = start + eps + + return arr.sel({dim: slice(slice_start, slice_end)}) + + +def extend_dim( + arr: xr.DataArray, + dim: str, + start: Optional[float] = None, + stop: Optional[float] = None, + fill_value: float = 0, + eps: float = 10e-6, + left_closed: bool = True, + right_closed: bool = False, +) -> xr.DataArray: + """Extend a dimension of a data array to a specified range. + + Parameters + ---------- + arr + The input data array to extend. + dim + The name of the dimension to extend. + start + The start value of the extended range. + stop + The stop value of the extended range. + fill_value + The value to fill for missing data in the extended range. + Defaults to 0. + eps + A small value added to start and subtracted from stop to ensure open + intervals. Defaults to 10e-6. + left_closed + Whether the left boundary of the extended range is closed. + Defaults to True. + right_closed + Whether the right boundary of the extended range is closed. + Defaults to False. + + Returns + ------- + xarray.DataArray + The extended data array. + + Raises + ------ + KeyError + If the dimension is not found in the data array. + + Notes + ----- + The function extends the specified dimension of the data array to the + range [start, stop). + If the specified range extends beyond the current axis range, the + function adds values to the beginning or end of the coordinate + array. + The 'start' and 'stop' attributes of the extended dimension coordinate + are updated accordingly. + """ + coord = arr.coords[dim] + coords = coord.data + + current_start, current_stop = get_dim_range(arr, dim) + + if start is None: + start = current_start + + if stop is None: + stop = current_stop + + if start > stop: + raise ValueError( + f"Start value {start} must be less than stop value {stop}" + ) + + step = get_dim_step(arr, dim) + + if left_closed: + start -= eps + + if right_closed: + stop += eps + + if start <= current_start - step: + new_coords = np.arange( + current_start - step, + start, + -step, + dtype=coord.dtype, + )[::-1] + coords = np.concatenate([new_coords, coords]) + + if stop >= current_stop: + new_coords = np.arange( + coords[-1], + stop, + step, + dtype=coord.dtype, + )[1:] + coords = np.concatenate([coords, new_coords]) + + arr = arr.reindex( + {dim: coords}, + fill_value=fill_value, # type: ignore + ) + + arr.coords[dim].attrs.update( + start=start, + stop=stop, + ) + + return arr + + +def set_value_at_pos( + array: xr.DataArray, + value: Any, + **query, +) -> xr.DataArray: + """Set a value at a specific position in a data array. + + Parameters + ---------- + array + The input data array. + value + The value to set at the specified position. + **query : dict + Keyword arguments specifying the position in each dimension where the + value should be set. Keys are dimension names, values are the + positions. + + Returns + ------- + xarray.DataArray + The modified data array with the value set at the specified position. + + Raises + ------ + ValueError + If a dimension specified in the query is not found in the data array. + KeyError + If the position specified in the query is outside the range of the + corresponding dimension. + + Notes + ----- + Modifies the input data array in-place. + + When specifying approximate positions (e.g., `x=1.5`) the value will be set + at the closest coordinate to the left of the specified value. + This aligns with how coordinates are often interpreted as the + boundaries of intervals. + + If `value` is a tuple or list, its dimensions must match the queried + dimensions of the array, and the value will be set at the specified + position along each dimension. + + Examples + -------- + >>> import xarray as xr + >>> import numpy as np + >>> data = np.zeros((3, 3)) + >>> coords = {"x": np.arange(3), "y": np.arange(3)} + >>> array = xr.DataArray(data, coords=coords, dims=("x", "y")) + + Setting a single value: + >>> array = set_value_at_position(array, 1, x=1, y=1) + >>> print(array) + + array([[0., 0., 0.], + [0., 1., 0.], + [0., 0., 0.]]) + + Setting a value at an approximate position: + >>> array = xr.DataArray(data, coords=coords, dims=("x", "y")) + >>> array = set_value_at_position(array, 1, x=1.5, y=1.5) + >>> print(array) + + array([[0., 0., 0.], + [0., 1., 0.], + [0., 0., 0.]]) + + Setting a multi-dimensional value: + >>> array = xr.DataArray(data, coords=coords, dims=("x", "y")) + >>> value = np.array([1, 2, 3]) + >>> array = set_value_at_position(array, value, x=1) + >>> print(array) + + array([[0., 0., 0.], + [1., 2., 3.], + [0., 0., 0.]]) + """ + dims = {dim: n for n, dim in enumerate(array.dims)} + indexer: List[Union[slice, int]] = [slice(None) for _ in range(array.ndim)] + + for dim, coord in query.items(): + if dim not in dims: + raise ValueError(f"Dimension {dim} not found in array.") + + start, stop = get_dim_range(array, dim) + + if coord < start or coord > stop: + raise KeyError( + f"Position {coord} is outside the range of dimension {dim}." + ) + + index = array.indexes[dim].get_slice_bound(coord, "right") + indexer[dims[dim]] = index - 1 + + if isinstance(value, (tuple, list)): + coord = np.array(value) + + array.data[tuple(indexer)] = value + return array + + +def offset( + arr: xr.DataArray, + val: float, +) -> xr.DataArray: + """Offset the values of a data array by a constant value. + + Parameters + ---------- + arr + The input data array to offset. + val + The value to add to the data array. + + Returns + ------- + xarray.DataArray + The offset data array. + + Notes + ----- + This function stores the offset used for the offsetting as an attribute in + the output data array. + """ + with xr.set_options(keep_attrs=True): + return (arr + val).assign_attrs(add_offset=-val) + + +def scale(arr: xr.DataArray, val: float) -> xr.DataArray: + """Scale the values of a data array by a constant value. + + Parameters + ---------- + arr + The input data array to scale. + val + The value to multiply the data array by. + + Returns + ------- + xarray.DataArray + The scaled data array. + + Notes + ----- + This function stores the scale factor used for scaling as an attribute in + the output data array. + """ + with xr.set_options(keep_attrs=True): + return (arr * val).assign_attrs(scale_factor=1 / val) + + +def normalize( + arr: xr.DataArray, +) -> xr.DataArray: + """Normalize the values of a data array to the range [0, 1]. + + Parameters + ---------- + arr + The input data array to normalize. + + Returns + ------- + xarray.DataArray + The normalized data array. + + Notes + ----- + This function stores the offset and scale factor used for normalization as + attributes in the output data array. + """ + offset_val = arr.min().item() + scale_val = arr.max().item() - offset_val + + if scale_val == 0: + return offset(arr, -offset_val) + + arr = offset(arr, -offset_val) + return scale(arr, 1 / scale_val) + + +def center( + arr: xr.DataArray, +) -> xr.DataArray: + """Center the values of a data array around zero. + + Parameters + ---------- + arr + The input data array to center. + + Returns + ------- + xarray.DataArray + The centered data array. + + Notes + ----- + This function stores the offset used for centering as an attribute in the + output data array. + """ + return offset(arr, -arr.mean().item()) + + +def resize( + arr: xr.DataArray, + method: InterpOptions = "linear", + dtype: DTypeLike = np.float64, + **dims: int, +) -> xr.DataArray: + """Resize a data array to the specified dimensions. + + Parameters + ---------- + arr + The input data array to resize. + method + The interpolation method to use. Defaults to 'linear'. + dtype + The data type of the resized data array. Defaults to np.float64. + **dims + The new dimensions for each axis of the data array. + + Returns + ------- + xarray.DataArray + The resized data array. + + Raises + ------ + ValueError + If the new dimensions do not match the current dimensions of the data + array. + + Notes + ----- + This function resizes the data array to the specified dimensions. The + function does not modify the data array in place. + """ + new_coords = {} + + for dim, size in dims.items(): + if dim not in arr.dims: + raise ValueError(f"Dimension {dim} not found in array.") + + step = get_dim_step(arr, dim) + start, stop = get_dim_range(arr, dim) + + new_coords[dim] = create_range_dim( + name=dim, + start=start, + stop=stop + step, + size=size, + dtype=dtype, + ) + + return arr.interp(coords=new_coords, method=method) + + +def to_db( + arr: xr.DataArray, + ref: Union[float, Callable[[xr.DataArray], float]] = 1.0, + amin: float = 1e-10, + min_db: Optional[float] = -80.0, + max_db: Optional[float] = None, + power: int = 1, +) -> xr.DataArray: + """Compute the decibel values of a data array. + + Parameters + ---------- + arr + The input data array. + ref + The reference value for the decibel computation. Defaults to 1.0. + amin + Minimum threshold for the input data array. Defaults to 1e-10. All + values below this threshold are replaced with this value before + computing the decibel values. + min_db + The minimum decibel value for the output data array. Defaults to 80.0. + All values below this threshold are replaced with this value. If None, + no minimum threshold is applied. + max_db + The maximum decibel value for the output data array. Defaults to None. + All values above this threshold are replaced with this value. If None, + no maximum threshold is applied. + + Returns + ------- + xarray.DataArray + The data array with decibel values computed. + + Notes + ----- + The function computes the decibel values of the input data array using the + formula 10 * log10(arr / ref). + + This function is heavily inspired by and includes modifications of code + originally found in librosa, available at + https://github.com/librosa/librosa/. + The original code is licensed under the ISC license. + + Original copyright notice: + Copyright (c) 2013--2023, librosa development team. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR + IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + """ + if amin < 0: + raise ValueError("amin must be greater than 0.") + + if callable(ref): + ref = ref(arr) + + if ref <= 0: + raise ValueError("ref must be greater than 0.") + + a_arr = np.maximum(np.power(amin, 1 / power), arr) + a_ref = np.maximum(np.power(amin, 1 / power), np.power(ref, 1 / power)) + + data = 10.0 * power * np.log10(a_arr) - 10.0 * power * np.log10(a_ref) + + if min_db is not None: + data = np.maximum(data, min_db) + + if max_db is not None: + data = np.minimum(data, max_db) + + # Update units to reflect the decibel values + attrs = arr.attrs.copy() + new_units = "dB" if "units" not in attrs else f"{attrs['units']} dB" + attrs["units"] = new_units + + return xr.DataArray( + data=data, + dims=arr.dims, + coords=arr.coords, + attrs=attrs, + ) diff --git a/src/soundevent/audio/io.py b/src/soundevent/audio/io.py index bbe4624..6d27934 100644 --- a/src/soundevent/audio/io.py +++ b/src/soundevent/audio/io.py @@ -12,6 +12,7 @@ import xarray as xr from soundevent import data +from soundevent.arrays import Dimensions, create_time_range from soundevent.audio.chunks import parse_into_chunks from soundevent.audio.media_info import extract_media_info_from_chunks from soundevent.audio.raw import RawData @@ -128,20 +129,16 @@ def load_recording( data=data, dims=("time", "channel"), coords={ - "time": np.linspace( - 0, - recording.duration, - data.shape[0], - endpoint=False, + Dimensions.time.value: create_time_range( + start_time=0, + end_time=recording.duration, + samplerate=recording.samplerate, ), - "channel": range(data.shape[1]), + Dimensions.channel.value: range(data.shape[1]), }, attrs={ - "recording_id": recording.uuid, - "path": recording.path, - "time_units": "seconds", - "time_expansion": recording.time_expansion, - "samplerate": recording.samplerate, + "recording_id": str(recording.uuid), + "path": str(recording.path), }, ) @@ -173,7 +170,7 @@ def load_clip( offset = int(np.floor(clip.start_time * samplerate)) duration = clip.end_time - clip.start_time - samples = int(np.ceil(duration * samplerate)) + samples = int(np.floor(duration * samplerate)) path = recording.path if audio_dir is not None: @@ -185,11 +182,11 @@ def load_clip( samples=samples, ) - # Adjust start and end time to be on sample boundaries. This is necessary - # because the specified start and end time might not align precisely with - # the sampling moments of the audio. By aligning with the sample - # boundaries, we ensure that any time location within the clip, relative to - # the original audio file, remains accurate. + # NOTE: Adjust start and end time to be on sample boundaries. This is + # necessary because the specified start and end time might not align + # precisely with the sampling moments of the audio. By aligning with the + # sample boundaries, we ensure that any time location within the clip, + # relative to the original audio file, remains accurate. start_time = offset / samplerate end_time = start_time + samples / samplerate @@ -197,21 +194,17 @@ def load_clip( data=data, dims=("time", "channel"), coords={ - "time": np.linspace( - start_time, - end_time, - data.shape[0], - endpoint=False, + Dimensions.time.value: create_time_range( + start_time=start_time, + end_time=end_time, + samplerate=samplerate, ), - "channel": range(data.shape[1]), + Dimensions.channel.value: range(data.shape[1]), }, attrs={ - "recording_id": recording.uuid, - "clip_id": clip.uuid, - "path": recording.path, - "time_units": "seconds", - "time_expansion": recording.time_expansion, - "samplerate": recording.samplerate, + "recording_id": str(recording.uuid), + "clip_id": str(clip.uuid), + "path": str(recording.path), }, ) diff --git a/src/soundevent/audio/scaling.py b/src/soundevent/audio/scaling.py index 8b8ea06..149712c 100644 --- a/src/soundevent/audio/scaling.py +++ b/src/soundevent/audio/scaling.py @@ -2,9 +2,9 @@ from typing import Literal -import numpy as np import xarray as xr +from soundevent.arrays import get_dim_step from soundevent.audio.spectrum import ( amplitude_to_db, db_to_amplitude, @@ -34,7 +34,7 @@ def clamp_amplitude( Parameters ---------- - spectogram + spec Spectrogram with clamped amplitude values. min_dB : float Minimum amplitude value in dB. Defaults to -80. @@ -56,18 +56,7 @@ def clamp_amplitude( min_dB = db_to_power(min_dB) max_dB = db_to_power(max_dB) - data = np.clip(spec.data, min_dB, max_dB) - - return xr.DataArray( - data, - dims=spec.dims, - coords=spec.coords, - attrs={ - **spec.attrs, - "min_dB": min_dB, - "max_dB": max_dB, - }, - ) + return spec.clip(min=min_dB, max=max_dB, keep_attrs=True) def scale_amplitude( @@ -131,9 +120,12 @@ def pcen(spec: xr.DataArray, **kwargs) -> xr.DataArray: Uses librosa.pcen implementation. If sr and hop_length are not provided, they will be inferred from the spectrogram attributes. """ - sr = spec.attrs["samplerate"] + step = get_dim_step(spec, "time") + sr = 1 / step + hop_length = int(spec.attrs["hop_size"] * sr) time_axis: int = spec.get_axis_num("time") # type: ignore + data = pcen_core( spec.data, **{ @@ -143,6 +135,7 @@ def pcen(spec: xr.DataArray, **kwargs) -> xr.DataArray: **kwargs, }, ) + return xr.DataArray( data, dims=spec.dims, diff --git a/src/soundevent/audio/spectrograms.py b/src/soundevent/audio/spectrograms.py index cd899ad..b467d1c 100644 --- a/src/soundevent/audio/spectrograms.py +++ b/src/soundevent/audio/spectrograms.py @@ -1,9 +1,19 @@ """Functions to compute several spectral representations of sound signals.""" +from typing import Callable, Literal, Optional, Union + import numpy as np import xarray as xr from scipy import signal +from soundevent.arrays import ( + ArrayAttrs, + Dimensions, + create_frequency_dim_from_array, + create_time_dim_from_array, + get_dim_step, +) + __all__ = [ "compute_spectrogram", ] @@ -14,72 +24,100 @@ def compute_spectrogram( window_size: float, hop_size: float, window_type: str = "hann", + detrend: Union[str, Callable, Literal[False]] = False, + padded: bool = True, + boundary: Optional[Literal["zeros", "odd", "even", "constant"]] = "zeros", ) -> xr.DataArray: """Compute the spectrogram of a signal. + This function calculates the short-time Fourier transform (STFT), which decomposes + a signal into overlapping windows and computes the Fourier transform of each window. + Parameters ---------- - audio - The audio signal. We are assuming that this has two dimensions: time - and channel. The time dimension should be the first dimension. Also, - the data array should have a sample rate attribute. This is - automatically True if the audio signal is loaded using - [`audio.load_recording`][soundevent.audio.load_recording]. - window_size + audio: xr.DataArray + The audio signal. + window_size: float The duration of the STFT window in seconds. - hop_size - The duration of the STFT hop in seconds. - window_type - The type of window to use. This should be one of the window types - supported by [`scipy.signal.get_window`][scipy.signal.get_window]. + hop_size: float + The duration of the STFT hop (in seconds). This determines the time + step between consecutive STFT frames. + window_type: str + The type of window to use. Refer to + [`scipy.signal.get_window`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.get_window.html) + for supported types. + detrend: Union[str, Callable, Literal[False]] + Specifies how to detrend each STFT window. See + [`scipy.signal.stft`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.stft.html) + for options. Default is False (no detrending). + padded: bool + Indicates whether the input signal is zero-padded at the beginning and + end before performing the STFT. See + [`scipy.signal.stft`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.stft.html). + Default is True. + boundary: Optional[Literal["zeros", "odd", "even", "constant"]] + Specifies the boundary extension mode for padding the signal to perform + the STFT. See + [`scipy.signal.stft`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.stft.html). + Default is "zeros". Returns ------- spectrogram : xr.DataArray The spectrogram of the audio signal. This is a three-dimensional xarray data array with the dimensions frequency, time, and channel. - """ - # Get the sample rate - sample_rate = audio.attrs["samplerate"] - # Compute the number of samples in each window - nperseg = int(window_size * sample_rate) - - # Compute the number of samples to overlap - noverlap = int((window_size - hop_size) * sample_rate) - - # Compute the window - window = signal.get_window(window_type, nperseg) + Notes + ----- + **Time Bin Calculation:** + * The time axis of the spectrogram represents the center of each STFT window. + * The first time bin is centered at time t=hop_size / 2. + * Subsequent time bins are spaced by hop_size. + """ + samplerate = 1 / get_dim_step(audio, Dimensions.time.value) + time_axis: int = audio.get_axis_num(Dimensions.time.value) # type: ignore + nperseg = int(window_size * samplerate) + noverlap = int((window_size - hop_size) * samplerate) # Compute the spectrogram - frequencies, times, spectrogram = signal.spectrogram( + frequencies, times, spectrogram = signal.stft( audio.data, - fs=sample_rate, - window=window, + fs=samplerate, + window=window_type, nperseg=nperseg, noverlap=noverlap, - axis=0, - mode="magnitude", + return_onesided=True, + axis=time_axis, + detrend=detrend, # type: ignore + padded=padded, + boundary=boundary, # type: ignore + scaling="psd", ) - # Convert to xarray + # Compute the power spectral density + psd = np.abs(spectrogram) ** 2 + return xr.DataArray( - data=np.swapaxes(spectrogram, 1, 2), + data=np.swapaxes(psd, 1, 2), dims=("frequency", "time", "channel"), coords={ - "frequency": frequencies, - # The times returned by scipy.signal.spectrogram are - # relative to the start of the signal and are the center - # times of each window. We need to add the start time of - # the signal and subtract half of the window size to get - # the times relative to the start of the recording. - "time": times + audio.time.data[0] - window_size / 2, - "channel": audio.channel, + Dimensions.frequency.value: create_frequency_dim_from_array( + frequencies, + step=samplerate / nperseg, + ), + Dimensions.time.value: create_time_dim_from_array( + times + audio.time.data[0], + step=hop_size, + ), + Dimensions.channel.value: audio.channel, }, attrs={ - "samplerate": sample_rate, + **audio.attrs, "window_size": window_size, "hop_size": hop_size, "window_type": window_type, + ArrayAttrs.units.value: "V**2/Hz", + ArrayAttrs.standard_name.value: "spectrogram", + ArrayAttrs.long_name.value: "Power Spectral Density", }, ) diff --git a/src/soundevent/geometry/__init__.py b/src/soundevent/geometry/__init__.py index 278b3fb..7342d11 100644 --- a/src/soundevent/geometry/__init__.py +++ b/src/soundevent/geometry/__init__.py @@ -20,6 +20,7 @@ ) from soundevent.geometry.html import geometry_to_html from soundevent.geometry.operations import buffer_geometry, compute_bounds +from soundevent.geometry.positions import get_geometry_point __all__ = [ "GeometricFeature", @@ -28,4 +29,5 @@ "compute_geometric_features", "geometry_to_html", "geometry_to_shapely", + "get_geometry_point", ] diff --git a/src/soundevent/geometry/positions.py b/src/soundevent/geometry/positions.py new file mode 100644 index 0000000..6997ee9 --- /dev/null +++ b/src/soundevent/geometry/positions.py @@ -0,0 +1,124 @@ +from typing import Tuple, Literal +import shapely + +from soundevent.data import Geometry +from soundevent.geometry.operations import compute_bounds +from soundevent.geometry.conversion import geometry_to_shapely + + +__all__ = [ + "get_geometry_point", +] + + +def get_geometry_point( + geometry: Geometry, + point_type: Literal[ + "bottom-left", + "bottom-right", + "top-left", + "top-right", + "center-left", + "center-right", + "top-center", + "bottom-center", + "center", + "centroid", + "point_on_surface", + ] = "bottom-left", +) -> Tuple[float, float]: + """ + Calculate the coordinates of a specific point within a geometry. + + Parameters + ---------- + geometry + The geometry object for which to calculate the point coordinates. + point_type + The specific point within the geometry to calculate coordinates for. + Defaults to 'bottom-left'. + + Returns + ------- + Tuple[float, float] + The coordinates of the specified point within the geometry. + + Raises + ------ + ValueError + If an invalid point is specified. + + Notes + ----- + The following points are supported: + - 'bottom-left': The point defined by the start time and lowest frequency + of the geometry. + - 'bottom-right': The point defined by the end time and lowest frequency + of the geometry. + - 'top-left': The point defined by the start time and highest frequency + of the geometry. + - 'top-right': The point defined by the end time and highest frequency + of the geometry. + - 'center-left': The point defined by the middle time and lowest frequency + of the geometry. + - 'center-right': The point defined by the middle time and highest frequency + of the geometry. + - 'top-center': The point defined by the end time and middle frequency + of the geometry. + - 'bottom-center': The point defined by the start time and middle frequency + of the geometry. + - 'center': The point defined by the middle time and middle frequency + of the geometry. + - 'centroid': The centroid of the geometry. Computed using the shapely + library. + - 'point_on_surface': A point on the surface of the geometry. Computed + using the shapely library. + + For all points except 'centroid' and 'point_on_surface', the time and + frequency values are calculated by first computing the bounds of the + geometry and then determining the appropriate values based on the + specified point type. + """ + if point_type not in [ + "bottom-left", + "bottom-right", + "top-left", + "top-right", + "center-left", + "center-right", + "top-center", + "bottom-center", + "center", + "centroid", + "point_on_surface", + ]: + raise ValueError(f"Invalid point type: {point_type}") + + if point_type == "centroid": + shp_geom = geometry_to_shapely(geometry) + return shp_geom.centroid.coords[0] + + if point_type == "point_on_surface": + shp_geom = geometry_to_shapely(geometry) + return shapely.point_on_surface(shp_geom).coords[0] + + start_time, low_freq, end_time, high_freq = compute_bounds(geometry) + + if point_type == "center": + return (start_time + end_time) / 2, (low_freq + high_freq) / 2 + + x, y = point_type.split("-") + + time_pos = { + "left": start_time, + "center": (start_time + end_time) / 2, + "right": end_time, + }[x] + + freq_pos = { + "bottom": low_freq, + "center": (low_freq + high_freq) / 2, + "right": high_freq, + }[y] + + return time_pos, freq_pos diff --git a/src/soundevent/types.py b/src/soundevent/types.py new file mode 100644 index 0000000..9790044 --- /dev/null +++ b/src/soundevent/types.py @@ -0,0 +1,44 @@ +"""Common types and interfaces within bioacoustic analysis.""" + +from abc import abstractmethod, ABC +from soundevent import data +from typing import List, Optional + + +class ClassMapper(ABC): + """Abstract class for encoding and decoding labels.""" + + class_labels: List[str] + + @abstractmethod + def encode( + self, + sound_event_annotation: data.SoundEventAnnotation, + ) -> Optional[str]: + pass + + @abstractmethod + def decode(self, label: str) -> List[data.Tag]: + pass + + def transform( + self, + sound_event_annotation: data.SoundEventAnnotation, + ) -> Optional[int]: + class_name = self.encode(sound_event_annotation) + + if class_name not in self.class_labels: + return None + + return self.class_labels.index(class_name) + + def inverse_transform(self, class_index: int) -> List[data.Tag]: + if class_index < 0 or class_index >= len(self.class_labels): + return [] + + class_name = self.class_labels[class_index] + return self.decode(class_name) + + @property + def num_classes(self) -> int: + return len(self.class_labels) diff --git a/tests/test_array/__init__.py b/tests/test_array/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_array/test_dimensions.py b/tests/test_array/test_dimensions.py new file mode 100644 index 0000000..ab31c39 --- /dev/null +++ b/tests/test_array/test_dimensions.py @@ -0,0 +1,208 @@ +"""Test suite for soundevent.arrays.dimensions module.""" + +import numpy as np +import pytest +import xarray as xr +from soundevent import arrays + + +def test_create_time_range_has_correct_attrs(): + """Test create_time_range function.""" + time_range = arrays.create_time_range(start_time=0, end_time=10, step=10) + assert time_range.attrs["units"] == "s" + assert time_range.attrs["long_name"] == "Time since start of recording" + assert time_range.attrs["standard_name"] == "time" + + +def test_create_time_range_with_step(): + """Test create_time_range_with_step function.""" + time_range = arrays.create_time_range(start_time=0, end_time=10, step=1) + assert isinstance(time_range, xr.Variable) + assert time_range.shape == (10,) + assert time_range.data.tolist() == list(range(0, 10)) + assert time_range.attrs["step"] == 1 + + +def test_create_time_range_with_samplerate(): + """Test create_time_range_with_samplerate function.""" + time_range = arrays.create_time_range( + start_time=0, + end_time=10, + samplerate=100, + ) + assert isinstance(time_range, xr.Variable) + assert time_range.shape == (1000,) + assert time_range.data.tolist() == [0.01 * i for i in range(1000)] + assert time_range.attrs["step"] == 0.01 + + +def test_create_frequency_range_has_correct_attrs(): + """Test create_frequency_range function.""" + frequency_range = arrays.create_frequency_range( + low_freq=0, + high_freq=1000, + step=100, + ) + assert frequency_range.attrs["units"] == "Hz" + assert frequency_range.attrs["long_name"] == "Frequency" + assert frequency_range.attrs["standard_name"] == "frequency" + assert frequency_range.attrs["step"] == 100 + + +def test_create_frequency_range_with_step(): + """Test create_frequency_range_with_step function.""" + frequency_range = arrays.create_frequency_range( + low_freq=0, + high_freq=1000, + step=100, + ) + assert isinstance(frequency_range, xr.Variable) + assert frequency_range.shape == (10,) + assert frequency_range.data.tolist() == list(range(0, 1000, 100)) + + +def test_set_dim_attrs_is_succesful(): + """Test set_dim_attrs function.""" + arr = xr.DataArray( + [1, 2, 3], + dims=("x",), + coords={"x": [0, 1, 2]}, + ) + + assert arr.x.attrs == {} + + arrays.set_dim_attrs( + arr, + dim="x", + units="m", + long_name="Distance", + standard_name="distance", + ) + assert arr.x.attrs["units"] == "m" + assert arr.x.attrs["long_name"] == "Distance" + assert arr.x.attrs["standard_name"] == "distance" + + +def test_get_dim_width_is_succesful(): + """Test get_dim_width function.""" + arr = xr.DataArray( + [1, 1, 1], + dims=("x",), + coords={"x": [0, 1, 2]}, + ) + assert arrays.get_dim_width(arr, "x") == 2 + + +def test_get_dim_step_fails_if_exceeds_tolerance(): + """Test estimate_dim_step function.""" + arr = xr.DataArray( + [1, 1, 1], + dims=("x",), + coords={"x": [0, 1, 4]}, + ) + + with pytest.raises(ValueError): + arrays.get_dim_step(arr, "x") + + assert arrays.get_dim_step(arr, "x", atol=1) == 2 + + +def test_get_dim_fails_if_not_found_and_no_estimate(): + """Test estimate_dim_step function.""" + arr = xr.DataArray( + [1, 1, 1], + dims=("x",), + coords={"x": [0, 1, 2]}, + ) + + with pytest.raises(ValueError): + arrays.get_dim_step(arr, "x", estimate_step=False) + + +def test_get_dim_step_estimates_step_if_not_found_in_attrs(): + """Test estimate_dim_step function.""" + arr = xr.DataArray( + [1, 1, 1], + dims=("x",), + coords={"x": [0, 1, 2]}, + ) + + assert arrays.get_dim_step(arr, "x") == 1 + + +def test_get_dim_step_reads_step_from_attrs(): + """Test estimate_dim_step function.""" + arr = xr.DataArray( + [1, 1, 1], + dims=("x",), + coords={"x": [0, 1, 2]}, + ) + arr.x.attrs["step"] = 0.5 + assert arrays.get_dim_step(arr, "x") == 0.5 + + +def test_create_time_dim_from_array_sets_attrs(): + """Test create_time_dim_from_array function.""" + arr = np.array([1, 2, 3]) + time_dim = arrays.create_time_dim_from_array(arr) + assert time_dim.attrs["units"] == "s" + assert time_dim.attrs["long_name"] == "Time since start of recording" + assert time_dim.attrs["standard_name"] == "time" + + +def test_create_time_dim_from_array_estimates_step(): + """Test create_time_dim_from_array function.""" + arr = np.array([1, 2, 3]) + time_dim = arrays.create_time_dim_from_array(arr, estimate_step=True) + assert time_dim.attrs["step"] == 1 + + +def test_can_give_step_to_create_time_dim_from_array(): + """Test create_time_dim_from_array function.""" + arr = np.array([1, 2, 3]) + time_dim = arrays.create_time_dim_from_array(arr, step=0.5) + assert time_dim.attrs["step"] == 0.5 + + +def test_can_give_samplerate_to_create_time_dim_from_array(): + """Test create_time_dim_from_array function.""" + arr = np.array([1, 2, 3]) + time_dim = arrays.create_time_dim_from_array(arr, samplerate=100) + assert time_dim.attrs["step"] == 0.01 + + +def test_create_frequency_dim_from_array_sets_attrs(): + """Test create_frequency_dim_from_array function.""" + arr = np.array([1, 2, 3]) + frequency_dim = arrays.create_frequency_dim_from_array(arr) + assert frequency_dim.attrs["units"] == "Hz" + assert frequency_dim.attrs["long_name"] == "Frequency" + assert frequency_dim.attrs["standard_name"] == "frequency" + + +def test_create_frequency_dim_from_array_estimates_step(): + """Test create_frequency_dim_from_array function.""" + arr = np.array([1, 2, 3]) + frequency_dim = arrays.create_frequency_dim_from_array( + arr, estimate_step=True + ) + assert frequency_dim.attrs["step"] == 1 + + +def test_can_give_step_to_create_frequency_dim_from_array(): + """Test create_frequency_dim_from_array function.""" + arr = np.array([1, 2, 3]) + frequency_dim = arrays.create_frequency_dim_from_array(arr, step=0.5) + assert frequency_dim.attrs["step"] == 0.5 + + +def test_create_range_dim_fails_without_step_and_size(): + """Test create_range_dim function.""" + with pytest.raises(ValueError): + arrays.create_range_dim("x", start=0, stop=10) + + +def test_create_time_range_fail_if_no_step_or_samplerate(): + """Test create_time_dim function.""" + with pytest.raises(ValueError): + arrays.create_time_range(start_time=0, end_time=10) diff --git a/tests/test_array/test_operations.py b/tests/test_array/test_operations.py new file mode 100644 index 0000000..b64e2dd --- /dev/null +++ b/tests/test_array/test_operations.py @@ -0,0 +1,442 @@ +"""Test suite for the soundevent.arrays.operations module.""" + +import pytest +import numpy as np +import xarray as xr +from soundevent.arrays import operations as ops + + +def test_successful_cropping(): + """Test successful cropping of an axis.""" + data = xr.DataArray(range(10), dims=["x"], coords={"x": range(10)}) + result = ops.crop_dim(data, "x", 2, 7) + + # Check dimensions and values + assert result.dims == ("x",) + assert result.x.values.tolist() == [2, 3, 4, 5, 6] + + +def test_closed_interval_cropping(): + """Test successful extending of an axis.""" + data = xr.DataArray(range(10), dims=["x"], coords={"x": range(10)}) + result = ops.crop_dim(data, "x", 2, 7, right_closed=True) + + # Check dimensions and values + assert result.dims == ("x",) + assert result.x.values.tolist() == [2, 3, 4, 5, 6, 7] + + +def test_left_open_interval_cropping(): + """Test successful extending of an axis.""" + data = xr.DataArray(range(10), dims=["x"], coords={"x": range(10)}) + result = ops.crop_dim(data, "x", 2, 7, left_closed=False) + + # Check dimensions and values + assert result.dims == ("x",) + assert result.x.values.tolist() == [3, 4, 5, 6] + + +def test_crop_fails_if_start_is_greater_than_end(): + """Test that cropping fails if the start index is greater than the end index.""" + data = xr.DataArray(range(10), dims=["x"], coords={"x": range(10)}) + + with pytest.raises(ValueError): + ops.crop_dim(data, "x", 7, 2) + + +def test_crop_fails_if_trying_to_crop_outside_of_bounds(): + """Test that cropping fails if the start index is greater than the end index.""" + data = xr.DataArray(range(10), dims=["x"], coords={"x": range(10)}) + + with pytest.raises(ValueError): + ops.crop_dim(data, "x", -1, 11) + + +def test_crop_without_start(): + """Test that cropping an axis without a start index works.""" + data = xr.DataArray(range(10), dims=["x"], coords={"x": range(10)}) + result = ops.crop_dim(data, "x", stop=7) + + # Check dimensions and values + assert result.dims == ("x",) + assert result.x.values.tolist() == [0, 1, 2, 3, 4, 5, 6] + + +def test_crop_without_end(): + """Test that cropping an axis without an end index works.""" + data = xr.DataArray(range(10), dims=["x"], coords={"x": range(10)}) + result = ops.crop_dim(data, "x", start=2) + + # Check dimensions and values + assert result.dims == ("x",) + assert result.x.values.tolist() == [2, 3, 4, 5, 6, 7, 8, 9] + + +def test_successful_extension(): + """Test successful extending of an axis.""" + data = xr.DataArray(range(10), dims=["x"], coords={"x": range(10)}) + result = ops.extend_dim(data, "x", start=-1, stop=11) + + # Check dimensions and values + assert result.dims == ("x",) + assert result.x.values.tolist() == [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + + +def test_extend_fails_if_start_is_greater_than_end(): + """Test that extending fails if the start index is greater than the end index.""" + data = xr.DataArray(range(10), dims=["x"], coords={"x": range(10)}) + + with pytest.raises(ValueError): + ops.extend_dim(data, "x", start=7, stop=2) + + +def test_extend_left_open_interval(): + """Test successful extending of an axis with a left-open interval.""" + data = xr.DataArray(range(5), dims=["x"], coords={"x": range(5)}) + result = ops.extend_dim(data, "x", start=-2, left_closed=False) + + # Check dimensions and values + assert result.dims == ("x",) + assert result.x.values.tolist() == [-1, 0, 1, 2, 3, 4] + + +def test_extend_right_closed(): + """Test successful extending of an axis with a right-closed interval.""" + data = xr.DataArray(range(5), dims=["x"], coords={"x": range(5)}) + result = ops.extend_dim(data, "x", stop=7, right_closed=True) + + # Check dimensions and values + assert result.dims == ("x",) + assert result.x.values.tolist() == [0, 1, 2, 3, 4, 5, 6, 7] + + +def test_extend_non_exact_range(): + """Test successful extending of an axis with a non-exact range.""" + data = xr.DataArray(range(5), dims=["x"], coords={"x": range(5)}) + result = ops.extend_dim(data, "x", start=-2.5, stop=7.5) + + # Check dimensions and values + assert result.dims == ("x",) + assert result.x.values.tolist() == [-2, -1, 0, 1, 2, 3, 4, 5, 6, 7] + + +def test_set_value_at_exact_position(): + """Test setting a value at an exact position.""" + data = xr.DataArray(np.zeros(5), dims=["x"], coords={"x": range(5)}) + result = ops.set_value_at_pos(data, 1, x=0) + assert result.values.tolist() == [1, 0, 0, 0, 0] + + data = xr.DataArray(np.zeros(5), dims=["x"], coords={"x": range(5)}) + result = ops.set_value_at_pos(data, 1, x=1) + assert result.values.tolist() == [0, 1, 0, 0, 0] + + data = xr.DataArray(np.zeros(5), dims=["x"], coords={"x": range(5)}) + result = ops.set_value_at_pos(data, 1, x=2) + assert result.values.tolist() == [0, 0, 1, 0, 0] + + data = xr.DataArray(np.zeros(5), dims=["x"], coords={"x": range(5)}) + result = ops.set_value_at_pos(data, 1, x=3) + assert result.values.tolist() == [0, 0, 0, 1, 0] + + data = xr.DataArray(np.zeros(5), dims=["x"], coords={"x": range(5)}) + result = ops.set_value_at_pos(data, 1, x=4) + assert result.values.tolist() == [0, 0, 0, 0, 1] + + +def test_set_value_at_non_exact_position(): + """Test setting a value at a non-exact position.""" + data = xr.DataArray(np.zeros(5), dims=["x"], coords={"x": range(5)}) + result = ops.set_value_at_pos(data, 1, x=0.5) + assert result.values.tolist() == [1, 0, 0, 0, 0] + + data = xr.DataArray(np.zeros(5), dims=["x"], coords={"x": range(5)}) + result = ops.set_value_at_pos(data, 1, x=0.9) + assert result.values.tolist() == [1, 0, 0, 0, 0] + + data = xr.DataArray(np.zeros(5), dims=["x"], coords={"x": range(5)}) + result = ops.set_value_at_pos(data, 1, x=1.1) + assert result.values.tolist() == [0, 1, 0, 0, 0] + + +def test_set_value_in_2d_array(): + """Test setting a value at a position in a 2D array.""" + data = xr.DataArray( + np.zeros((3, 3)), + dims=["x", "y"], + coords={"x": range(3), "y": range(3)}, + ) + result = ops.set_value_at_pos(data, 1, x=1, y=1) + assert result.values.tolist() == [[0, 0, 0], [0, 1, 0], [0, 0, 0]] + + +def test_set_value_at_position_outside_of_bounds(): + """Test that setting a value at a position outside of the bounds of the axis raises an error.""" + data = xr.DataArray(np.zeros(5), dims=["x"], coords={"x": range(5)}) + + with pytest.raises(KeyError): + ops.set_value_at_pos(data, 1, x=-1) + + with pytest.raises(KeyError): + ops.set_value_at_pos(data, 1, x=6) + + +def test_set_array_value_at_position(): + """Test setting an array value at a position.""" + data = xr.DataArray( + np.zeros((3, 3)), + dims=["x", "y"], + coords={"x": range(3), "y": range(3)}, + ) + value = np.array([1, 2, 3]) + result = ops.set_value_at_pos(data, value, x=1) + assert result.values.tolist() == [[0, 0, 0], [1, 2, 3], [0, 0, 0]] + + +def test_set_tuple_at_position(): + """Test setting a tuple value at a position.""" + data = xr.DataArray( + np.zeros((3, 3)), + dims=["x", "y"], + coords={"x": range(3), "y": range(3)}, + ) + value = (1, 2, 3) + result = ops.set_value_at_pos(data, value, x=1) + assert result.values.tolist() == [[0, 0, 0], [1, 2, 3], [0, 0, 0]] + + +def test_set_variable_value_at_position(): + """Test setting a variable value at a position.""" + data = xr.DataArray( + np.zeros((3, 3, 3)), + dims=["x", "y", "z"], + coords={"x": range(3), "y": range(3), "z": range(3)}, + ) + value = xr.DataArray( + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), + dims=["y", "z"], + coords={"y": range(3), "z": range(3)}, + ) + result = ops.set_value_at_pos(data, value, x=1) + assert result.values.tolist() == [ + [[0, 0, 0], [0, 0, 0], [0, 0, 0]], + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + [[0, 0, 0], [0, 0, 0], [0, 0, 0]], + ] + + +def test_set_variable_value_at_position_fails_with_invalid_dimension(): + """Test that setting a variable value at a position fails with an invalid dimension.""" + data = xr.DataArray( + np.zeros((3, 3, 3)), + dims=["x", "y", "z"], + coords={"x": range(3), "y": range(3), "z": range(3)}, + ) + + with pytest.raises(ValueError): + ops.set_value_at_pos(data, 1, w=1) + + +def test_offset_attribute_is_stored(): + """Test that the offset attribute is stored in the output.""" + data = xr.DataArray(range(5), dims=["x"], coords={"x": range(5)}) + result = ops.offset(data, 1) + assert result.attrs["add_offset"] == -1 + assert result.values.tolist() == [1, 2, 3, 4, 5] + + +def test_scale_attribute_is_stored(): + """Test that the scale attribute is stored in the output.""" + data = xr.DataArray(range(5), dims=["x"], coords={"x": range(5)}) + result = ops.scale(data, 2) + assert result.attrs["scale_factor"] == 0.5 + assert result.values.tolist() == [0, 2, 4, 6, 8] + + +def test_normalize_has_correct_range(): + """Test that the output of normalize has the correct range.""" + data = xr.DataArray(range(5), dims=["x"], coords={"x": range(5)}) + result = ops.normalize(data) + assert result.values.tolist() == [0, 0.25, 0.5, 0.75, 1] + + +def test_normalize_stores_offset_and_scale_attributes(): + """Test that the output of normalize has the correct attributes.""" + data = xr.DataArray( + data=np.array([1, 2, 3, 4, 5]), + dims=["x"], + coords={"x": range(5)}, + ) + result = ops.normalize(data) + assert result.attrs["add_offset"] == 1 + assert result.attrs["scale_factor"] == 4 + + +def test_normalize_a_constant_array(): + """Test that normalize works with a constant array.""" + data = xr.DataArray(np.ones(5), dims=["x"], coords={"x": range(5)}) + result = ops.normalize(data) + assert result.values.tolist() == [0, 0, 0, 0, 0] + + +def test_center_removes_mean(): + """Test that the output of center has a mean of zero.""" + data = xr.DataArray(range(5), dims=["x"], coords={"x": range(5)}) + result = ops.center(data) + assert result.values.mean() == 0 + + +def test_center_stores_offset_attribute(): + """Test that the output of center has the correct offset attribute.""" + data = xr.DataArray(range(5), dims=["x"], coords={"x": range(5)}) + result = ops.center(data) + assert result.attrs["add_offset"] == 2.0 + + +def test_resize_has_correct_shape(): + """Test that the output of resize has the correct shape.""" + data = xr.DataArray( + data=np.array([[1, 2], [3, 4]]), + dims=["x", "y"], + coords={"x": range(2), "y": range(2)}, + ) + result = ops.resize(data, x=3, y=3) + assert result.shape == (3, 3) + + +def test_resize_has_correct_coordinates(): + """Test that the output of resize has the correct dimension.""" + data = xr.DataArray( + data=np.array([[1, 2], [3, 4]]), + dims=["x", "y"], + coords={"x": range(2), "y": range(2)}, + ) + result = ops.resize(data, x=3, y=3) + assert result.x.values.tolist() == [0, 2 / 3, 4 / 3] + assert result.y.values.tolist() == [0, 2 / 3, 4 / 3] + + +def test_resize_fails_if_dimension_name_is_invalid(): + """Test that resize fails if the dimension name is invalid.""" + data = xr.DataArray( + data=np.array([[1, 2], [3, 4]]), + dims=["x", "y"], + coords={"x": range(2), "y": range(2)}, + ) + + with pytest.raises(ValueError): + ops.resize(data, z=3) + + +def test_resize_with_subset_of_dimensions(): + """Test that resize works with a subset of dimensions.""" + data = xr.DataArray( + data=np.array([[1, 2], [3, 4]]), + dims=["x", "y"], + coords={"x": range(2), "y": range(2)}, + ) + result = ops.resize(data, x=3) + assert result.shape == (3, 2) + + +def test_to_db_with_power_values(): + """Test that to_db returns the correct values.""" + data = xr.DataArray( + data=np.array([0.001, 0.01, 0.1, 1, 10, 100, 1000]), + dims=["x"], + coords={"x": range(7)}, + ) + result = ops.to_db(data) + assert result.values.tolist() == [-30, -20, -10, 0, 10, 20, 30] + + +def test_to_db_with_amplitude_values(): + """Test that to_db returns the correct values for amplitude values.""" + data = xr.DataArray( + data=np.array([0.001, 0.01, 0.1, 1, 10, 100, 1000]), + dims=["x"], + coords={"x": range(7)}, + ) + result = ops.to_db(data, power=2) + assert result.values.tolist() == [-60, -40, -20, 0, 20, 40, 60] + + +def test_to_db_with_max_reference(): + """Test that to_db returns the correct values with a reference value.""" + data = xr.DataArray( + data=np.array([0.001, 0.01, 0.1, 1, 10, 100, 1000]), + dims=["x"], + coords={"x": range(7)}, + ) + result = ops.to_db(data, ref=np.max) + assert result.values.tolist() == [-60, -50, -40, -30, -20, -10, 0] + + +def test_to_db_with_min_db(): + """Test that to_db returns the correct values with a minimum value.""" + data = xr.DataArray( + data=np.array([0.001, 0.01, 0.1, 1, 10, 100, 1000]), + dims=["x"], + coords={"x": range(7)}, + ) + result = ops.to_db(data, ref=np.max, min_db=-40) + assert result.values.tolist() == [-40, -40, -40, -30, -20, -10, 0] + + +def test_to_db_with_max_db(): + """Test that to_db returns the correct values with a maximum value.""" + data = xr.DataArray( + data=np.array([0.001, 0.01, 0.1, 1, 10, 100, 1000]), + dims=["x"], + coords={"x": range(7)}, + ) + result = ops.to_db(data, ref=np.max, max_db=-20) + assert result.values.tolist() == [-60, -50, -40, -30, -20, -20, -20] + + +def test_to_db_adjust_unit_attribute(): + """Test that to_db adjusts the unit attribute.""" + data = xr.DataArray( + data=np.array([0.001, 0.01, 0.1, 1, 10, 100, 1000]), + dims=["x"], + coords={"x": range(7)}, + attrs={"units": "V"}, + ) + result = ops.to_db(data) + assert result.attrs["units"] == "V dB" + + +def test_to_db_adds_db_unit(): + """Test that to_db adds the correct unit if it is not present.""" + data = xr.DataArray( + data=np.array([0.001, 0.01, 0.1, 1, 10, 100, 1000]), + dims=["x"], + coords={"x": range(7)}, + ) + result = ops.to_db(data) + assert result.attrs["units"] == "dB" + + +def test_to_db_has_sane_values_for_zero_values(): + """Test that to_db returns sane values for zero values.""" + data = xr.DataArray( + data=np.array([0, 0, 0, 0, 0]), + dims=["x"], + coords={"x": range(5)}, + ) + result = ops.to_db(data) + assert result.values.tolist() == [-80, -80, -80, -80, -80] + + +def test_to_db_validates_arguments(): + """Test that to_db validates its arguments.""" + data = xr.DataArray( + data=np.array([0.001, 0.01, 0.1, 1, 10, 100, 1000]), + dims=["x"], + coords={"x": range(7)}, + ) + + with pytest.raises(ValueError): + ops.to_db(data, amin=-1) + + with pytest.raises(ValueError): + ops.to_db(data, ref=-1) diff --git a/tests/test_audio/test_audio.py b/tests/test_audio/test_audio.py index dff3c96..edc22d7 100644 --- a/tests/test_audio/test_audio.py +++ b/tests/test_audio/test_audio.py @@ -9,7 +9,6 @@ from hypothesis import HealthCheck, given, settings from hypothesis import strategies as st from scipy.io import wavfile - from soundevent import data from soundevent.audio.io import load_clip, load_recording @@ -54,11 +53,6 @@ def test_load_recording( assert wav.coords["time"].shape == (int(samplerate * duration),) assert wav.coords["channel"].shape == (channels,) - # Check that the wav has the correct metadata - assert wav.attrs["samplerate"] == samplerate - assert wav.attrs["time_expansion"] == time_expansion - assert wav.attrs["time_units"] == "seconds" - @given( samplerate=st.sampled_from([8_000, 16_000, 44_100, 256_000]), @@ -114,20 +108,15 @@ def test_read_clip( # Check that clip is the correct shape assert isinstance(clip_wav, xr.DataArray) assert clip_wav.shape == ( - int(np.ceil(samplerate * clip_duration)), + int(np.floor(samplerate * clip_duration)), channels, ) assert clip_wav.dtype == np.float32 assert clip_wav.dims == ("time", "channel") - # Check that the wav has the correct metadata - assert clip_wav.attrs["samplerate"] == samplerate - assert clip_wav.attrs["time_expansion"] == time_expansion - assert clip_wav.attrs["time_units"] == "seconds" - # Check that the clip is the same as the original wav start_index = int(np.floor(clip.start_time * samplerate)) - clip_samples = int(np.ceil(clip_duration * samplerate)) + clip_samples = int(np.floor(clip_duration * samplerate)) end_index = start_index + clip_samples assert np.allclose(clip_wav, wav[start_index:end_index, :]) assert np.allclose( @@ -143,7 +132,6 @@ def test_read_clip( # Check that we can slice the recording array to get the same clip # Load recording with xarray. rec_xr = load_recording(recording) - assert rec_xr.attrs["samplerate"] == samplerate assert rec_xr.shape == (samples, channels) assert np.allclose( clip_wav.data, diff --git a/tests/test_audio/test_spectrograms.py b/tests/test_audio/test_spectrograms.py index d669d76..8561c06 100644 --- a/tests/test_audio/test_spectrograms.py +++ b/tests/test_audio/test_spectrograms.py @@ -2,8 +2,7 @@ import numpy as np import xarray as xr - -from soundevent import audio, data +from soundevent import arrays, audio, data def test_compute_spectrograms_from_recordings(random_wav): @@ -31,21 +30,16 @@ def test_compute_spectrograms_from_recordings(random_wav): # Has correct dimensions assert spectrogram.dims == ("frequency", "time", "channel") - assert spectrogram.shape == (513, 30, 1) + assert spectrogram.shape == (513, 33, 1) # Has correct metadata - assert spectrogram.attrs["samplerate"] == samplerate assert spectrogram.attrs["window_size"] == window_size assert spectrogram.attrs["hop_size"] == hop_size assert spectrogram.attrs["window_type"] == "hann" # Has correct coordinates assert spectrogram.time.data[0] == 0.0 - assert np.isclose( - spectrogram.time.data[-1], - 1.0 - window_size, - atol=hop_size / 4, - ) + assert np.abs(spectrogram.time.data[-1] - 1.0) < hop_size assert spectrogram.frequency.data[0] == 0.0 assert spectrogram.frequency.data[-1] == samplerate / 2 @@ -74,18 +68,60 @@ def test_compute_spectrograms_from_clip(random_wav): # Has correct dimensions assert spectrogram.dims == ("frequency", "time", "channel") - assert spectrogram.shape == (513, 24, 1) + assert spectrogram.shape == (513, 26, 1) # Has correct metadata - assert spectrogram.attrs["samplerate"] == samplerate assert spectrogram.attrs["window_size"] == window_size assert spectrogram.attrs["hop_size"] == hop_size assert spectrogram.attrs["window_type"] == "hann" # Has correct coordinates assert spectrogram.time.data[0] == 0.1 - assert np.isclose( - spectrogram.time.data[-1], - 0.9 - window_size, - atol=hop_size / 4, + assert np.abs(spectrogram.time.data[-1] - 0.9) < hop_size + + +def test_spectrogram_has_correct_time_step(random_wav): + # Arrange + samplerate = 16000 + window_size = 1024 / samplerate + hop_size = 512 / samplerate + + path = random_wav(samplerate=samplerate, duration=0.2) + recording = data.Recording.from_file(path) + clip = data.Clip(recording=recording, start_time=0.1, end_time=0.9) + waveform = audio.load_clip(clip) + + # Act + spectrogram = audio.compute_spectrogram( + waveform, + window_size=window_size, + hop_size=hop_size, ) + + step = arrays.estimate_dim_step(spectrogram.time) + assert step == hop_size + assert spectrogram.time.attrs["step"] == hop_size + + +def test_spectrogram_has_correct_frequency_step(random_wav): + # Arrange + samplerate = 16000 + nfft = 1024 + window_size = nfft / samplerate + hop_size = 512 / samplerate + + path = random_wav(samplerate=samplerate, duration=0.2) + recording = data.Recording.from_file(path) + clip = data.Clip(recording=recording, start_time=0.1, end_time=0.9) + waveform = audio.load_clip(clip) + + # Act + spectrogram = audio.compute_spectrogram( + waveform, + window_size=window_size, + hop_size=hop_size, + ) + + step = arrays.estimate_dim_step(spectrogram.frequency) + assert step == samplerate / 1024 + assert spectrogram.frequency.attrs["step"] == samplerate / 1024 From 9e4df0b453613a018ab66065d0460aacaec5086c Mon Sep 17 00:00:00 2001 From: mbsantiago Date: Thu, 9 May 2024 15:52:02 +0100 Subject: [PATCH 3/7] formatted --- src/soundevent/arrays/dimensions.py | 4 +- src/soundevent/arrays/operations.py | 18 ++---- src/soundevent/audio/chunks.py | 3 +- src/soundevent/audio/filter.py | 4 +- src/soundevent/audio/media_info.py | 4 +- src/soundevent/audio/spectrum.py | 8 +-- src/soundevent/data/annotation_sets.py | 4 +- src/soundevent/data/annotation_tasks.py | 8 +-- src/soundevent/data/clip_annotations.py | 4 +- src/soundevent/data/clip_evaluations.py | 8 +-- src/soundevent/data/evaluations.py | 4 +- src/soundevent/data/geometries.py | 34 +++-------- src/soundevent/data/notes.py | 4 +- src/soundevent/data/prediction_sets.py | 4 +- src/soundevent/data/recording_sets.py | 4 +- src/soundevent/data/recordings.py | 5 +- src/soundevent/evaluation/affinity.py | 19 ++----- src/soundevent/evaluation/metrics.py | 12 +--- src/soundevent/evaluation/tasks/__init__.py | 4 +- .../evaluation/tasks/clip_classification.py | 4 +- .../tasks/clip_multilabel_classification.py | 4 +- src/soundevent/evaluation/tasks/common.py | 4 +- .../tasks/sound_event_classification.py | 16 ++---- .../evaluation/tasks/sound_event_detection.py | 4 +- src/soundevent/geometry/__init__.py | 5 +- src/soundevent/geometry/features.py | 16 ++---- src/soundevent/geometry/html.py | 6 +- src/soundevent/geometry/positions.py | 6 +- src/soundevent/io/aoef/__init__.py | 13 +---- src/soundevent/io/aoef/adapters.py | 8 +-- src/soundevent/io/aoef/annotation_project.py | 27 +++------ src/soundevent/io/aoef/annotation_set.py | 21 ++----- src/soundevent/io/aoef/clip_annotations.py | 19 ++----- src/soundevent/io/aoef/clip_evaluation.py | 13 +---- src/soundevent/io/aoef/clip_predictions.py | 19 ++----- src/soundevent/io/aoef/dataset.py | 4 +- src/soundevent/io/aoef/evaluation.py | 47 +++++---------- src/soundevent/io/aoef/evaluation_set.py | 11 +--- src/soundevent/io/aoef/match.py | 8 +-- src/soundevent/io/aoef/prediction_set.py | 17 ++---- src/soundevent/io/aoef/recording.py | 18 ++---- src/soundevent/io/aoef/recording_set.py | 3 +- src/soundevent/io/aoef/sequence.py | 4 +- src/soundevent/io/aoef/sequence_prediction.py | 3 +- src/soundevent/io/aoef/sound_event.py | 4 +- .../io/aoef/sound_event_annotation.py | 8 +-- .../io/aoef/sound_event_prediction.py | 11 +--- src/soundevent/io/crowsetta/__init__.py | 5 +- src/soundevent/io/crowsetta/annotation.py | 17 ++---- src/soundevent/io/crowsetta/bbox.py | 9 +-- src/soundevent/io/formats.py | 4 +- src/soundevent/plot/annotation.py | 7 +-- src/soundevent/plot/geometries.py | 3 +- src/soundevent/plot/tags.py | 4 +- src/soundevent/types.py | 5 +- tests/test_array/test_dimensions.py | 4 +- tests/test_audio/test_io.py | 4 +- tests/test_data/test_datasets.py | 8 +-- tests/test_data/test_geometry.py | 4 +- .../test_clip_classification.py | 16 ++---- tests/test_evaluation/test_encode.py | 4 +- tests/test_evaluation/test_matching.py | 4 +- .../test_sound_event_detection.py | 4 +- tests/test_io/test_annotation_projects.py | 57 +++++-------------- tests/test_io/test_aoef/test_api.py | 4 +- .../test_io/test_crowsetta/test_annotation.py | 8 +-- tests/test_io/test_crowsetta/test_import.py | 10 ++-- tests/test_io/test_crowsetta/test_labels.py | 4 +- tests/test_io/test_crowsetta/test_segments.py | 8 +-- tests/test_io/test_crowsetta/test_sequence.py | 4 +- tests/test_io/test_model_runs.py | 14 +---- 71 files changed, 174 insertions(+), 515 deletions(-) diff --git a/src/soundevent/arrays/dimensions.py b/src/soundevent/arrays/dimensions.py index 203067e..b877b2e 100644 --- a/src/soundevent/arrays/dimensions.py +++ b/src/soundevent/arrays/dimensions.py @@ -493,9 +493,7 @@ def get_dim_step( return attrs[DimAttrs.step.value] if not estimate_step: - raise ValueError( - f"Step size not found in the '{dim}' dimension attributes." - ) + raise ValueError(f"Step size not found in the '{dim}' dimension attributes.") return estimate_dim_step( coord.data, diff --git a/src/soundevent/arrays/operations.py b/src/soundevent/arrays/operations.py index f01ac97..ba5a226 100644 --- a/src/soundevent/arrays/operations.py +++ b/src/soundevent/arrays/operations.py @@ -7,11 +7,7 @@ from numpy.typing import DTypeLike from xarray.core.types import InterpOptions -from soundevent.arrays.dimensions import ( - create_range_dim, - get_dim_range, - get_dim_step, -) +from soundevent.arrays.dimensions import create_range_dim, get_dim_range, get_dim_step __all__ = [ "center", @@ -92,9 +88,7 @@ def crop_dim( stop = current_stop if start > stop: - raise ValueError( - f"Start value {start} must be less than stop value {stop}" - ) + raise ValueError(f"Start value {start} must be less than stop value {stop}") if start < current_start or stop > current_stop: raise ValueError( @@ -180,9 +174,7 @@ def extend_dim( stop = current_stop if start > stop: - raise ValueError( - f"Start value {start} must be less than stop value {stop}" - ) + raise ValueError(f"Start value {start} must be less than stop value {stop}") step = get_dim_step(arr, dim) @@ -312,9 +304,7 @@ def set_value_at_pos( start, stop = get_dim_range(array, dim) if coord < start or coord > stop: - raise KeyError( - f"Position {coord} is outside the range of dimension {dim}." - ) + raise KeyError(f"Position {coord} is outside the range of dimension {dim}.") index = array.indexes[dim].get_slice_bound(coord, "right") indexer[dims[dim]] = index - 1 diff --git a/src/soundevent/audio/chunks.py b/src/soundevent/audio/chunks.py index 3f2b2d5..975e8bb 100644 --- a/src/soundevent/audio/chunks.py +++ b/src/soundevent/audio/chunks.py @@ -111,8 +111,7 @@ def _read_chunk(riff: BinaryIO) -> Optional[Chunk]: if chunk_id in CHUNKS_WITH_SUBCHUNKS: chunk.subchunks = { - subchunk.chunk_id: subchunk - for subchunk in _get_subchunks(riff, size - 4) + subchunk.chunk_id: subchunk for subchunk in _get_subchunks(riff, size - 4) } else: riff.seek(size, os.SEEK_CUR) diff --git a/src/soundevent/audio/filter.py b/src/soundevent/audio/filter.py index 6df6111..63ff040 100644 --- a/src/soundevent/audio/filter.py +++ b/src/soundevent/audio/filter.py @@ -18,9 +18,7 @@ def _get_filter( order: int = 5, ) -> np.ndarray: if low_freq is None and high_freq is None: - raise ValueError( - "At least one of low_freq and high_freq must be specified." - ) + raise ValueError("At least one of low_freq and high_freq must be specified.") if low_freq is None: # Low pass filter diff --git a/src/soundevent/audio/media_info.py b/src/soundevent/audio/media_info.py index e3ac9e6..6e44bd8 100644 --- a/src/soundevent/audio/media_info.py +++ b/src/soundevent/audio/media_info.py @@ -156,9 +156,7 @@ def get_media_info(path: PathLike) -> MediaInfo: # chunk is the size of the data subchunk divided by the number # of channels and the bit depth. data_chunk = chunk.subchunks["data"] - samples = ( - 8 * data_chunk.size // (fmt_info.channels * fmt_info.bit_depth) - ) + samples = 8 * data_chunk.size // (fmt_info.channels * fmt_info.bit_depth) duration = samples / fmt_info.samplerate return MediaInfo( diff --git a/src/soundevent/audio/spectrum.py b/src/soundevent/audio/spectrum.py index 57a4738..3ab9f40 100644 --- a/src/soundevent/audio/spectrum.py +++ b/src/soundevent/audio/spectrum.py @@ -120,9 +120,7 @@ def pcen_core( raise ValueError(f"eps={eps} must be strictly positive") if time_constant <= 0: - raise ValueError( - f"time_constant={time_constant} must be strictly positive" - ) + raise ValueError(f"time_constant={time_constant} must be strictly positive") if b is None: t_frames = time_constant * sr / float(hop_length) @@ -146,9 +144,7 @@ def pcen_core( if max_size == 1: ref = S elif S.ndim == 1: - raise ValueError( - "Max-filtering cannot be applied to 1-dimensional input" - ) + raise ValueError("Max-filtering cannot be applied to 1-dimensional input") else: if max_axis is None: if S.ndim != 2: diff --git a/src/soundevent/data/annotation_sets.py b/src/soundevent/data/annotation_sets.py index 6ea30e0..7273282 100644 --- a/src/soundevent/data/annotation_sets.py +++ b/src/soundevent/data/annotation_sets.py @@ -28,6 +28,4 @@ class AnnotationSet(BaseModel): default_factory=list, repr=False, ) - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) diff --git a/src/soundevent/data/annotation_tasks.py b/src/soundevent/data/annotation_tasks.py index 3c9495c..31ad438 100644 --- a/src/soundevent/data/annotation_tasks.py +++ b/src/soundevent/data/annotation_tasks.py @@ -60,15 +60,11 @@ class StatusBadge(BaseModel): state: AnnotationState owner: Optional[User] = None - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) class AnnotationTask(BaseModel): uuid: UUID = Field(default_factory=uuid4, repr=False) clip: Clip status_badges: List[StatusBadge] = Field(default_factory=list) - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) diff --git a/src/soundevent/data/clip_annotations.py b/src/soundevent/data/clip_annotations.py index f8d4686..acadefe 100644 --- a/src/soundevent/data/clip_annotations.py +++ b/src/soundevent/data/clip_annotations.py @@ -54,6 +54,4 @@ class ClipAnnotation(BaseModel): sequences: List[SequenceAnnotation] = Field(default_factory=list) tags: List[Tag] = Field(default_factory=list) notes: List[Note] = Field(default_factory=list) - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) diff --git a/src/soundevent/data/clip_evaluations.py b/src/soundevent/data/clip_evaluations.py index ad812b0..380a9c8 100644 --- a/src/soundevent/data/clip_evaluations.py +++ b/src/soundevent/data/clip_evaluations.py @@ -95,17 +95,13 @@ def _check_matches(self): } match_targets = [ - match.target.uuid - for match in self.matches - if match.target is not None + match.target.uuid for match in self.matches if match.target is not None ] match_targets_set = set(match_targets) match_sources = [ - match.source.uuid - for match in self.matches - if match.source is not None + match.source.uuid for match in self.matches if match.source is not None ] match_sources_set = set(match_sources) diff --git a/src/soundevent/data/evaluations.py b/src/soundevent/data/evaluations.py index d55db26..4ef34ed 100644 --- a/src/soundevent/data/evaluations.py +++ b/src/soundevent/data/evaluations.py @@ -25,9 +25,7 @@ class Evaluation(BaseModel): """Evaluation Class.""" uuid: UUID = Field(default_factory=uuid4, repr=False) - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) evaluation_task: str clip_evaluations: Sequence[ClipEvaluation] = Field(default_factory=list) metrics: Sequence[Feature] = Field(default_factory=list) diff --git a/src/soundevent/data/geometries.py b/src/soundevent/data/geometries.py index 35b8c37..be5af2c 100644 --- a/src/soundevent/data/geometries.py +++ b/src/soundevent/data/geometries.py @@ -252,9 +252,7 @@ def _validate_time_interval(cls, v: List[Time]) -> List[Time]: after the end time). """ if len(v) != 2: - raise ValueError( - "The time interval must have exactly two time stamps." - ) + raise ValueError("The time interval must have exactly two time stamps.") if v[0] > v[1]: raise ValueError("The start time must be before the end time.") @@ -325,9 +323,7 @@ def _validate_coordinates(cls, v: List[float]) -> List[float]: raise ValueError("The time must be positive.") if frequency < 0 or frequency > MAX_FREQUENCY: - raise ValueError( - f"The frequency must be between 0 and {MAX_FREQUENCY}." - ) + raise ValueError(f"The frequency must be between 0 and {MAX_FREQUENCY}.") return v @@ -473,8 +469,7 @@ def _validate_coordinates( if frequency < 0 or frequency > MAX_FREQUENCY: raise ValueError( - f"The frequency must be between 0 and " - f"{MAX_FREQUENCY}." + f"The frequency must be between 0 and " f"{MAX_FREQUENCY}." ) return v @@ -532,9 +527,7 @@ def _validate_coordinates(cls, v: List[float]) -> List[float]: negative or the frequency is outside the valid range). """ if len(v) != 4: - raise ValueError( - "The bounding box must have exactly four coordinates." - ) + raise ValueError("The bounding box must have exactly four coordinates.") start_time, low_freq, end_time, high_freq = v @@ -558,9 +551,7 @@ def _validate_coordinates(cls, v: List[float]) -> List[float]: raise ValueError("The start time must be before the end time.") if low_freq > high_freq: - raise ValueError( - "The start frequency must be before the end frequency." - ) + raise ValueError("The start frequency must be before the end frequency.") return v @@ -771,9 +762,7 @@ def _validate_coordinates( negative or the frequency is outside the valid range). """ if len(v) < 1: - raise ValueError( - "The multipolygon must have at least one polygon." - ) + raise ValueError("The multipolygon must have at least one polygon.") for polygon in v: if len(polygon) < 1: @@ -781,9 +770,7 @@ def _validate_coordinates( for ring in polygon: if len(ring) < 3: - raise ValueError( - "Each ring must have at least three points." - ) + raise ValueError("Each ring must have at least three points.") for time, frequency in ring: if time < 0: @@ -791,8 +778,7 @@ def _validate_coordinates( if frequency < 0 or frequency > MAX_FREQUENCY: raise ValueError( - f"The frequency must be between 0 and " - f"{MAX_FREQUENCY}." + f"The frequency must be between 0 and " f"{MAX_FREQUENCY}." ) return v @@ -921,6 +907,4 @@ def geometry_validate( from_attributes=mode == "attributes", ) except ValidationError as error: - raise ValueError( - f"Object {obj} is not a valid {geom_type}." - ) from error + raise ValueError(f"Object {obj} is not a valid {geom_type}.") from error diff --git a/src/soundevent/data/notes.py b/src/soundevent/data/notes.py index de0de81..3ee2de6 100644 --- a/src/soundevent/data/notes.py +++ b/src/soundevent/data/notes.py @@ -95,9 +95,7 @@ class Note(BaseModel): message: str created_by: Optional[User] = None is_issue: bool = False - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) def __hash__(self): """Hash the Note object.""" diff --git a/src/soundevent/data/prediction_sets.py b/src/soundevent/data/prediction_sets.py index edffd2a..1cf3bd6 100644 --- a/src/soundevent/data/prediction_sets.py +++ b/src/soundevent/data/prediction_sets.py @@ -73,6 +73,4 @@ class PredictionSet(BaseModel): uuid: UUID = Field(default_factory=uuid4) clip_predictions: List[ClipPrediction] = Field(default_factory=list) - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) diff --git a/src/soundevent/data/recording_sets.py b/src/soundevent/data/recording_sets.py index dde95ef..cb1ff5f 100644 --- a/src/soundevent/data/recording_sets.py +++ b/src/soundevent/data/recording_sets.py @@ -12,6 +12,4 @@ class RecordingSet(BaseModel): uuid: UUID = Field(default_factory=uuid4) recordings: List[Recording] = Field(default_factory=list, repr=False) - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) diff --git a/src/soundevent/data/recordings.py b/src/soundevent/data/recordings.py index 088d23a..b2cbb7c 100644 --- a/src/soundevent/data/recordings.py +++ b/src/soundevent/data/recordings.py @@ -193,10 +193,7 @@ def from_file( Recording The recording object. """ - from soundevent.audio.media_info import ( - compute_md5_checksum, - get_media_info, - ) + from soundevent.audio.media_info import compute_md5_checksum, get_media_info media_info = get_media_info(path) diff --git a/src/soundevent/evaluation/affinity.py b/src/soundevent/evaluation/affinity.py index 801e836..1de83f1 100644 --- a/src/soundevent/evaluation/affinity.py +++ b/src/soundevent/evaluation/affinity.py @@ -1,11 +1,7 @@ """Measures of affinity between sound events geometries.""" from soundevent import data -from soundevent.geometry import ( - buffer_geometry, - compute_bounds, - geometry_to_shapely, -) +from soundevent.geometry import buffer_geometry, compute_bounds, geometry_to_shapely __all__ = [ "compute_affinity", @@ -88,10 +84,7 @@ def compute_affinity( geometry1 = _prepare_geometry(geometry1, time_buffer, freq_buffer) geometry2 = _prepare_geometry(geometry2, time_buffer, freq_buffer) - if ( - geometry1.type in TIME_GEOMETRY_TYPES - or geometry2.type in TIME_GEOMETRY_TYPES - ): + if geometry1.type in TIME_GEOMETRY_TYPES or geometry2.type in TIME_GEOMETRY_TYPES: return compute_affinity_in_time(geometry1, geometry2) shp1 = geometry_to_shapely(geometry1) @@ -114,12 +107,8 @@ def compute_affinity_in_time( start_time1, _, end_time1, _ = compute_bounds(geometry1) start_time2, _, end_time2, _ = compute_bounds(geometry2) - intersection = max( - 0, min(end_time1, end_time2) - max(start_time1, start_time2) - ) - union = ( - (end_time1 - start_time1) + (end_time2 - start_time2) - intersection - ) + intersection = max(0, min(end_time1, end_time2) - max(start_time1, start_time2)) + union = (end_time1 - start_time1) + (end_time2 - start_time2) - intersection if union == 0: return 0 diff --git a/src/soundevent/evaluation/metrics.py b/src/soundevent/evaluation/metrics.py index 60dc829..22e3c71 100644 --- a/src/soundevent/evaluation/metrics.py +++ b/src/soundevent/evaluation/metrics.py @@ -43,9 +43,7 @@ def balanced_accuracy( y_score: np.ndarray, ) -> float: num_classes = y_score.shape[1] - y_true_array = np.array( - [y if y is not None else num_classes for y in y_true] - ) + y_true_array = np.array([y if y is not None else num_classes for y in y_true]) y_score = np.c_[y_score, 1 - y_score.sum(axis=1, keepdims=True)] y_pred = y_score.argmax(axis=1) return metrics.balanced_accuracy_score( @@ -59,9 +57,7 @@ def accuracy( y_score: np.ndarray, ) -> float: num_classes = y_score.shape[1] - y_true_array = np.array( - [y if y is not None else num_classes for y in y_true] - ) + y_true_array = np.array([y if y is not None else num_classes for y in y_true]) y_score = np.c_[y_score, 1 - y_score.sum(axis=1, keepdims=True)] y_pred = y_score.argmax(axis=1) return metrics.accuracy_score( # type: ignore @@ -75,9 +71,7 @@ def top_3_accuracy( y_score: np.ndarray, ) -> float: num_classes = y_score.shape[1] - y_true_array = np.array( - [y if y is not None else num_classes for y in y_true] - ) + y_true_array = np.array([y if y is not None else num_classes for y in y_true]) y_score = np.c_[y_score, 1 - y_score.sum(axis=1, keepdims=True)] return metrics.top_k_accuracy_score( # type: ignore y_true=y_true_array, diff --git a/src/soundevent/evaluation/tasks/__init__.py b/src/soundevent/evaluation/tasks/__init__.py index bc14174..0d1b3b3 100644 --- a/src/soundevent/evaluation/tasks/__init__.py +++ b/src/soundevent/evaluation/tasks/__init__.py @@ -5,9 +5,7 @@ from soundevent.evaluation.tasks.sound_event_classification import ( sound_event_classification, ) -from soundevent.evaluation.tasks.sound_event_detection import ( - sound_event_detection, -) +from soundevent.evaluation.tasks.sound_event_detection import sound_event_detection __all__ = [ "clip_classification", diff --git a/src/soundevent/evaluation/tasks/clip_classification.py b/src/soundevent/evaluation/tasks/clip_classification.py index 814052a..aa509ea 100644 --- a/src/soundevent/evaluation/tasks/clip_classification.py +++ b/src/soundevent/evaluation/tasks/clip_classification.py @@ -164,8 +164,6 @@ def _compute_overall_score( evaluated_examples: Sequence[data.ClipEvaluation], ) -> float: non_none_scores = [ - example.score - for example in evaluated_examples - if example.score is not None + example.score for example in evaluated_examples if example.score is not None ] return float(np.mean(non_none_scores)) if non_none_scores else 0.0 diff --git a/src/soundevent/evaluation/tasks/clip_multilabel_classification.py b/src/soundevent/evaluation/tasks/clip_multilabel_classification.py index c9f23a5..6cc3249 100644 --- a/src/soundevent/evaluation/tasks/clip_multilabel_classification.py +++ b/src/soundevent/evaluation/tasks/clip_multilabel_classification.py @@ -166,8 +166,6 @@ def _compute_overall_score( evaluated_examples: Sequence[data.ClipEvaluation], ) -> float: valid_scores = [ - example.score - for example in evaluated_examples - if example.score is not None + example.score for example in evaluated_examples if example.score is not None ] return float(np.mean(valid_scores)) if valid_scores else 0.0 diff --git a/src/soundevent/evaluation/tasks/common.py b/src/soundevent/evaluation/tasks/common.py index 0e2cb58..732f547 100644 --- a/src/soundevent/evaluation/tasks/common.py +++ b/src/soundevent/evaluation/tasks/common.py @@ -7,9 +7,7 @@ def iterate_over_valid_clips( clip_predictions: Sequence[data.ClipPrediction], clip_annotations: Sequence[data.ClipAnnotation], ) -> Iterable[Tuple[data.ClipAnnotation, data.ClipPrediction]]: - annotated_clips = { - example.clip.uuid: example for example in clip_annotations - } + annotated_clips = {example.clip.uuid: example for example in clip_annotations} for predictions in clip_predictions: if predictions.clip.uuid in annotated_clips: diff --git a/src/soundevent/evaluation/tasks/sound_event_classification.py b/src/soundevent/evaluation/tasks/sound_event_classification.py index c860467..029817c 100644 --- a/src/soundevent/evaluation/tasks/sound_event_classification.py +++ b/src/soundevent/evaluation/tasks/sound_event_classification.py @@ -18,9 +18,7 @@ "sound_event_classification", ] -SOUNDEVENT_METRICS: Sequence[metrics.Metric] = ( - metrics.true_class_probability, -) +SOUNDEVENT_METRICS: Sequence[metrics.Metric] = (metrics.true_class_probability,) EXAMPLE_METRICS: Sequence[metrics.Metric] = () @@ -121,9 +119,7 @@ def _evaluate_clip( if sound_event_prediction.sound_event.uuid not in _valid_sound_events: continue - annotation = _valid_sound_events[ - sound_event_prediction.sound_event.uuid - ] + annotation = _valid_sound_events[sound_event_prediction.sound_event.uuid] true_class, predicted_classes, match = _evaluate_sound_event( sound_event_prediction=sound_event_prediction, sound_event_annotation=annotation, @@ -134,9 +130,7 @@ def _evaluate_clip( predicted_classes_scores.append(predicted_classes) matches.append(match) - score = np.mean( - [match.score for match in matches if match.score is not None] - ) + score = np.mean([match.score for match in matches if match.score is not None]) return ( true_classes, @@ -193,8 +187,6 @@ def _compute_overall_score( evaluated_clip: Sequence[data.ClipEvaluation], ) -> float: non_none_scores = [ - example.score - for example in evaluated_clip - if example.score is not None + example.score for example in evaluated_clip if example.score is not None ] return float(np.mean(non_none_scores)) if non_none_scores else 0.0 diff --git a/src/soundevent/evaluation/tasks/sound_event_detection.py b/src/soundevent/evaluation/tasks/sound_event_detection.py index a841e55..09a3ba9 100644 --- a/src/soundevent/evaluation/tasks/sound_event_detection.py +++ b/src/soundevent/evaluation/tasks/sound_event_detection.py @@ -20,9 +20,7 @@ "evaluate_clip", ] -SOUNDEVENT_METRICS: Sequence[metrics.Metric] = ( - metrics.true_class_probability, -) +SOUNDEVENT_METRICS: Sequence[metrics.Metric] = (metrics.true_class_probability,) EXAMPLE_METRICS: Sequence[metrics.Metric] = () diff --git a/src/soundevent/geometry/__init__.py b/src/soundevent/geometry/__init__.py index 7342d11..79c7b10 100644 --- a/src/soundevent/geometry/__init__.py +++ b/src/soundevent/geometry/__init__.py @@ -14,10 +14,7 @@ """ from soundevent.geometry.conversion import geometry_to_shapely -from soundevent.geometry.features import ( - GeometricFeature, - compute_geometric_features, -) +from soundevent.geometry.features import GeometricFeature, compute_geometric_features from soundevent.geometry.html import geometry_to_html from soundevent.geometry.operations import buffer_geometry, compute_bounds from soundevent.geometry.positions import get_geometry_point diff --git a/src/soundevent/geometry/features.py b/src/soundevent/geometry/features.py index a251bfe..87b7dd3 100644 --- a/src/soundevent/geometry/features.py +++ b/src/soundevent/geometry/features.py @@ -154,9 +154,7 @@ def _compute_multi_point_features( Feature(name=GeometricFeature.LOW_FREQ, value=low_freq), Feature(name=GeometricFeature.HIGH_FREQ, value=high_freq), Feature(name=GeometricFeature.BANDWIDTH, value=high_freq - low_freq), - Feature( - name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates) - ), + Feature(name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates)), ] @@ -171,9 +169,7 @@ def _compute_multi_linestring_features( Feature(name=GeometricFeature.LOW_FREQ, value=low_freq), Feature(name=GeometricFeature.HIGH_FREQ, value=high_freq), Feature(name=GeometricFeature.BANDWIDTH, value=high_freq - low_freq), - Feature( - name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates) - ), + Feature(name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates)), ] @@ -188,15 +184,11 @@ def _compute_multi_polygon_features( Feature(name=GeometricFeature.LOW_FREQ, value=low_freq), Feature(name=GeometricFeature.HIGH_FREQ, value=high_freq), Feature(name=GeometricFeature.BANDWIDTH, value=high_freq - low_freq), - Feature( - name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates) - ), + Feature(name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates)), ] -_COMPUTE_FEATURES: Dict[ - geometries.GeometryType, Callable[[Any], List[Feature]] -] = { +_COMPUTE_FEATURES: Dict[geometries.GeometryType, Callable[[Any], List[Feature]]] = { geometries.TimeStamp.geom_type(): _compute_time_stamp_features, geometries.TimeInterval.geom_type(): _compute_time_interval_features, geometries.BoundingBox.geom_type(): _compute_bounding_box_features, diff --git a/src/soundevent/geometry/html.py b/src/soundevent/geometry/html.py index e705c85..7554044 100644 --- a/src/soundevent/geometry/html.py +++ b/src/soundevent/geometry/html.py @@ -105,11 +105,7 @@ def axis_label( inner_style = "; ".join( [ "display: inline", - ( - "vertical-align: top" - if axis == "time" - else "vertical-align: bottom" - ), + ("vertical-align: top" if axis == "time" else "vertical-align: bottom"), ] ) diff --git a/src/soundevent/geometry/positions.py b/src/soundevent/geometry/positions.py index 6997ee9..b371ca2 100644 --- a/src/soundevent/geometry/positions.py +++ b/src/soundevent/geometry/positions.py @@ -1,10 +1,10 @@ -from typing import Tuple, Literal +from typing import Literal, Tuple + import shapely from soundevent.data import Geometry -from soundevent.geometry.operations import compute_bounds from soundevent.geometry.conversion import geometry_to_shapely - +from soundevent.geometry.operations import compute_bounds __all__ = [ "get_geometry_point", diff --git a/src/soundevent/io/aoef/__init__.py b/src/soundevent/io/aoef/__init__.py index 08659c8..1aed5d9 100644 --- a/src/soundevent/io/aoef/__init__.py +++ b/src/soundevent/io/aoef/__init__.py @@ -34,10 +34,7 @@ from soundevent import data from soundevent.io.types import DataCollections, DataType -from .annotation_project import ( - AnnotationProjectAdapter, - AnnotationProjectObject, -) +from .annotation_project import AnnotationProjectAdapter, AnnotationProjectObject from .annotation_set import AnnotationSetAdapter, AnnotationSetObject from .dataset import DatasetAdapter, DatasetObject from .evaluation import EvaluationAdapter, EvaluationObject @@ -87,9 +84,7 @@ class AOEFObject(BaseModel): """Schema definition for an AOEF object.""" version: str = AOEF_VERSION - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) data: Union[ EvaluationObject, DatasetObject, @@ -162,9 +157,7 @@ def load( if aoef_object.version != AOEF_VERSION: version = aoef_object.version - raise ValueError( - f"Invalid AOEF version: {version} (expected {AOEF_VERSION})" - ) + raise ValueError(f"Invalid AOEF version: {version} (expected {AOEF_VERSION})") return to_soundevent(aoef_object, audio_dir=audio_dir) diff --git a/src/soundevent/io/aoef/adapters.py b/src/soundevent/io/aoef/adapters.py index ac21ca3..1ea7920 100644 --- a/src/soundevent/io/aoef/adapters.py +++ b/src/soundevent/io/aoef/adapters.py @@ -47,9 +47,7 @@ def to_aoef(self, obj: C) -> D: ... def to_soundevent(self, obj: D) -> C: ... -class DataAdapter( - ABC, Generic[SoundEventObject, AOEFObject, SoundEventKey, AOEFKey] -): +class DataAdapter(ABC, Generic[SoundEventObject, AOEFObject, SoundEventKey, AOEFKey]): """Base class for data adapters. A data adapter is used to convert between sound event and AOEF data @@ -66,9 +64,7 @@ def __init__(self): self._aoef_store: Dict[AOEFKey, AOEFObject] = {} @abstractmethod - def assemble_aoef( - self, obj: SoundEventObject, obj_id: AOEFKey - ) -> AOEFObject: + def assemble_aoef(self, obj: SoundEventObject, obj_id: AOEFKey) -> AOEFObject: """Create AOEF object from sound event object. Parameters diff --git a/src/soundevent/io/aoef/annotation_project.py b/src/soundevent/io/aoef/annotation_project.py index ef630b9..5046f5e 100644 --- a/src/soundevent/io/aoef/annotation_project.py +++ b/src/soundevent/io/aoef/annotation_project.py @@ -26,26 +26,18 @@ def __init__( **kwargs, ): super().__init__(**kwargs) - self.annotation_task_adapter = ( - annotation_task_adapter - or AnnotationTaskAdapter( - self.clip_adapter, - self.user_adapter, - ) + self.annotation_task_adapter = annotation_task_adapter or AnnotationTaskAdapter( + self.clip_adapter, + self.user_adapter, ) def to_aoef( # type: ignore self, obj: data.AnnotationProject, # type: ignore ) -> AnnotationProjectObject: - tasks = [ - self.annotation_task_adapter.to_aoef(task) - for task in obj.tasks or [] - ] + tasks = [self.annotation_task_adapter.to_aoef(task) for task in obj.tasks or []] - project_tags = [ - self.tag_adapter.to_aoef(tag).id for tag in obj.annotation_tags - ] + project_tags = [self.tag_adapter.to_aoef(tag).id for tag in obj.annotation_tags] annotation_set = super().to_aoef(obj) @@ -75,16 +67,11 @@ def to_soundevent( # type: ignore annotation_set = super().to_soundevent(obj) tasks = [ - self.annotation_task_adapter.to_soundevent(task) - for task in obj.tasks or [] + self.annotation_task_adapter.to_soundevent(task) for task in obj.tasks or [] ] return data.AnnotationProject( - **{ - field: value - for field, value in annotation_set - if value is not None - }, + **{field: value for field, value in annotation_set if value is not None}, tasks=tasks, name=obj.name, description=obj.description, diff --git a/src/soundevent/io/aoef/annotation_set.py b/src/soundevent/io/aoef/annotation_set.py index 9ed43f6..3feacb2 100644 --- a/src/soundevent/io/aoef/annotation_set.py +++ b/src/soundevent/io/aoef/annotation_set.py @@ -11,10 +11,7 @@ from .note import NoteAdapter from .recording import RecordingAdapter, RecordingObject from .sequence import SequenceAdapter, SequenceObject -from .sequence_annotation import ( - SequenceAnnotationAdapter, - SequenceAnnotationObject, -) +from .sequence_annotation import SequenceAnnotationAdapter, SequenceAnnotationObject from .sound_event import SoundEventAdapter, SoundEventObject from .sound_event_annotation import ( SoundEventAnnotationAdapter, @@ -50,12 +47,8 @@ def __init__( sound_event_adapter: Optional[SoundEventAdapter] = None, sequence_adapter: Optional[SequenceAdapter] = None, clip_adapter: Optional[ClipAdapter] = None, - sound_event_annotations_adapter: Optional[ - SoundEventAnnotationAdapter - ] = None, - sequence_annotations_adapter: Optional[ - SequenceAnnotationAdapter - ] = None, + sound_event_annotations_adapter: Optional[SoundEventAnnotationAdapter] = None, + sequence_annotations_adapter: Optional[SequenceAnnotationAdapter] = None, clip_annotation_adapter: Optional[ClipAnnotationsAdapter] = None, ): self.user_adapter = user_adapter or UserAdapter() @@ -148,14 +141,10 @@ def to_soundevent( self.sequence_adapter.to_soundevent(sequence) for sound_event_annotation in obj.sound_event_annotations or []: - self.sound_event_annotations_adapter.to_soundevent( - sound_event_annotation - ) + self.sound_event_annotations_adapter.to_soundevent(sound_event_annotation) for sequence_annotation in obj.sequence_annotations or []: - self.sequence_annotations_adapter.to_soundevent( - sequence_annotation - ) + self.sequence_annotations_adapter.to_soundevent(sequence_annotation) annotated_clips = [ self.clip_annotation_adapter.to_soundevent(clip_annotation) diff --git a/src/soundevent/io/aoef/clip_annotations.py b/src/soundevent/io/aoef/clip_annotations.py index c74cbe6..dc4cd19 100644 --- a/src/soundevent/io/aoef/clip_annotations.py +++ b/src/soundevent/io/aoef/clip_annotations.py @@ -59,9 +59,7 @@ def assemble_aoef( ), sound_events=( [ - self.sound_event_annotation_adapter.to_aoef( - annotation - ).uuid + self.sound_event_annotation_adapter.to_aoef(annotation).uuid for annotation in obj.sound_events ] if obj.sound_events @@ -103,25 +101,16 @@ def assemble_soundevent( se_ann for annotation_id in obj.sound_events or [] if ( - se_ann := self.sound_event_annotation_adapter.from_id( - annotation_id - ) + se_ann := self.sound_event_annotation_adapter.from_id(annotation_id) ) is not None ], sequences=[ seq_ann for annotation_id in obj.sequences or [] - if ( - seq_ann := self.sequence_annotation_adapter.from_id( - annotation_id - ) - ) + if (seq_ann := self.sequence_annotation_adapter.from_id(annotation_id)) is not None ], - notes=[ - self.note_adapter.to_soundevent(note) - for note in obj.notes or [] - ], + notes=[self.note_adapter.to_soundevent(note) for note in obj.notes or []], created_on=obj.created_on or datetime.datetime.now(), ) diff --git a/src/soundevent/io/aoef/clip_evaluation.py b/src/soundevent/io/aoef/clip_evaluation.py index 0cddee6..73fd191 100644 --- a/src/soundevent/io/aoef/clip_evaluation.py +++ b/src/soundevent/io/aoef/clip_evaluation.py @@ -50,10 +50,7 @@ def assemble_aoef( annotations=annotations.uuid, predictions=predictions.uuid, matches=( - [ - self.match_adapter.to_aoef(match).uuid - for match in obj.matches - ] + [self.match_adapter.to_aoef(match).uuid for match in obj.matches] if obj.matches else None ), @@ -73,14 +70,10 @@ def assemble_soundevent( predictions = self.clip_predictions_adapter.from_id(obj.predictions) if annotations is None: - raise ValueError( - f"Clip annotations with ID {obj.annotations} not found." - ) + raise ValueError(f"Clip annotations with ID {obj.annotations} not found.") if predictions is None: - raise ValueError( - f"Clip predictions with ID {obj.predictions} not found." - ) + raise ValueError(f"Clip predictions with ID {obj.predictions} not found.") matches = [ match diff --git a/src/soundevent/io/aoef/clip_predictions.py b/src/soundevent/io/aoef/clip_predictions.py index df7dc2c..cb0c358 100644 --- a/src/soundevent/io/aoef/clip_predictions.py +++ b/src/soundevent/io/aoef/clip_predictions.py @@ -47,9 +47,7 @@ def assemble_aoef( clip=self.clip_adapter.to_aoef(obj.clip).uuid, sound_events=( [ - self.sound_event_prediction_adapter.to_aoef( - sound_event - ).uuid + self.sound_event_prediction_adapter.to_aoef(sound_event).uuid for sound_event in obj.sound_events ] if obj.sound_events @@ -67,8 +65,7 @@ def assemble_aoef( [ (tag.id, predicted_tag.score) for predicted_tag in obj.tags - if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) - is not None + if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) is not None ] if obj.tags else None @@ -95,21 +92,13 @@ def assemble_soundevent( sound_events=[ se_pred for sound_event in obj.sound_events or [] - if ( - se_pred := self.sound_event_prediction_adapter.from_id( - sound_event - ) - ) + if (se_pred := self.sound_event_prediction_adapter.from_id(sound_event)) is not None ], sequences=[ seq_pred for sequence in obj.sequences or [] - if ( - seq_pred := self.sequence_prediction_adapter.from_id( - sequence - ) - ) + if (seq_pred := self.sequence_prediction_adapter.from_id(sequence)) is not None ], tags=[ diff --git a/src/soundevent/io/aoef/dataset.py b/src/soundevent/io/aoef/dataset.py index e667bf4..c62bc59 100644 --- a/src/soundevent/io/aoef/dataset.py +++ b/src/soundevent/io/aoef/dataset.py @@ -35,9 +35,7 @@ def to_soundevent( # type: ignore ) -> data.Dataset: recording_set = super().to_soundevent(obj) return data.Dataset( - **{ - key: value for key, value in recording_set if value is not None - }, + **{key: value for key, value in recording_set if value is not None}, name=obj.name, description=obj.description, ) diff --git a/src/soundevent/io/aoef/evaluation.py b/src/soundevent/io/aoef/evaluation.py index e53e41f..91b2dfe 100644 --- a/src/soundevent/io/aoef/evaluation.py +++ b/src/soundevent/io/aoef/evaluation.py @@ -14,14 +14,8 @@ from .note import NoteAdapter from .recording import RecordingAdapter, RecordingObject from .sequence import SequenceAdapter, SequenceObject -from .sequence_annotation import ( - SequenceAnnotationAdapter, - SequenceAnnotationObject, -) -from .sequence_prediction import ( - SequencePredictionAdapter, - SequencePredictionObject, -) +from .sequence_annotation import SequenceAnnotationAdapter, SequenceAnnotationObject +from .sequence_prediction import SequencePredictionAdapter, SequencePredictionObject from .sound_event import SoundEventAdapter, SoundEventObject from .sound_event_annotation import ( SoundEventAnnotationAdapter, @@ -69,19 +63,11 @@ def __init__( sound_event_adapter: Optional[SoundEventAdapter] = None, sequence_adapter: Optional[SequenceAdapter] = None, clip_adapter: Optional[ClipAdapter] = None, - sound_event_annotation_adapter: Optional[ - SoundEventAnnotationAdapter - ] = None, - sequence_annotation_adapter: Optional[ - SequenceAnnotationAdapter - ] = None, + sound_event_annotation_adapter: Optional[SoundEventAnnotationAdapter] = None, + sequence_annotation_adapter: Optional[SequenceAnnotationAdapter] = None, clip_annotations_adapter: Optional[ClipAnnotationsAdapter] = None, - sound_event_prediction_adapter: Optional[ - SoundEventPredictionAdapter - ] = None, - sequence_prediction_adapter: Optional[ - SequencePredictionAdapter - ] = None, + sound_event_prediction_adapter: Optional[SoundEventPredictionAdapter] = None, + sequence_prediction_adapter: Optional[SequencePredictionAdapter] = None, clip_predictions_adapter: Optional[ClipPredictionsAdapter] = None, clip_evaluation_adapter: Optional[ClipEvaluationAdapter] = None, match_adapter: Optional[MatchAdapter] = None, @@ -158,14 +144,11 @@ def __init__( self.sound_event_annotation_adapter, self.sound_event_prediction_adapter, ) - self.clip_evaluation_adapter = ( - clip_evaluation_adapter - or ClipEvaluationAdapter( - self.clip_annotations_adapter, - self.clip_predictions_adapter, - self.note_adapter, - self.match_adapter, - ) + self.clip_evaluation_adapter = clip_evaluation_adapter or ClipEvaluationAdapter( + self.clip_annotations_adapter, + self.clip_predictions_adapter, + self.note_adapter, + self.match_adapter, ) def to_aoef(self, obj: data.Evaluation) -> EvaluationObject: @@ -225,9 +208,7 @@ def to_soundevent( self.clip_adapter.to_soundevent(clip) for sound_event_annotation in obj.sound_event_annotations or []: - self.sound_event_annotation_adapter.to_soundevent( - sound_event_annotation - ) + self.sound_event_annotation_adapter.to_soundevent(sound_event_annotation) for sequence_annotation in obj.sequence_annotations or []: self.sequence_annotation_adapter.to_soundevent(sequence_annotation) @@ -236,9 +217,7 @@ def to_soundevent( self.clip_annotations_adapter.to_soundevent(clip_annotation) for sound_event_prediction in obj.sound_event_predictions or []: - self.sound_event_prediction_adapter.to_soundevent( - sound_event_prediction - ) + self.sound_event_prediction_adapter.to_soundevent(sound_event_prediction) for sequence_prediction in obj.sequence_predictions or []: self.sequence_prediction_adapter.to_soundevent(sequence_prediction) diff --git a/src/soundevent/io/aoef/evaluation_set.py b/src/soundevent/io/aoef/evaluation_set.py index 53b9e9f..0242ed2 100644 --- a/src/soundevent/io/aoef/evaluation_set.py +++ b/src/soundevent/io/aoef/evaluation_set.py @@ -35,10 +35,7 @@ def to_aoef( # type: ignore name=obj.name, description=obj.description, evaluation_tags=( - [ - self.tag_adapter.to_aoef(tag).id - for tag in obj.evaluation_tags - ] + [self.tag_adapter.to_aoef(tag).id for tag in obj.evaluation_tags] if obj.evaluation_tags else None ), @@ -50,11 +47,7 @@ def to_soundevent( # type: ignore ) -> data.EvaluationSet: annotation_set = super().to_soundevent(obj) return data.EvaluationSet( - **{ - field: value - for field, value in annotation_set - if value is not None - }, + **{field: value for field, value in annotation_set if value is not None}, name=obj.name, description=obj.description, evaluation_tags=[ diff --git a/src/soundevent/io/aoef/match.py b/src/soundevent/io/aoef/match.py index fcc2a9a..0b76dda 100644 --- a/src/soundevent/io/aoef/match.py +++ b/src/soundevent/io/aoef/match.py @@ -36,16 +36,12 @@ def assemble_aoef( ) -> MatchObject: source = None if obj.source is not None: - prediction = self.sound_event_prediction_adapter.to_aoef( - obj.source - ) + prediction = self.sound_event_prediction_adapter.to_aoef(obj.source) source = prediction.uuid if prediction is not None else None target = None if obj.target is not None: - annotation = self.sound_event_annotation_adapter.to_aoef( - obj.target - ) + annotation = self.sound_event_annotation_adapter.to_aoef(obj.target) target = annotation.uuid if annotation is not None else None return MatchObject( diff --git a/src/soundevent/io/aoef/prediction_set.py b/src/soundevent/io/aoef/prediction_set.py index 2c55188..58246b3 100644 --- a/src/soundevent/io/aoef/prediction_set.py +++ b/src/soundevent/io/aoef/prediction_set.py @@ -11,10 +11,7 @@ from .note import NoteAdapter from .recording import RecordingAdapter, RecordingObject from .sequence import SequenceAdapter, SequenceObject -from .sequence_prediction import ( - SequencePredictionAdapter, - SequencePredictionObject, -) +from .sequence_prediction import SequencePredictionAdapter, SequencePredictionObject from .sound_event import SoundEventAdapter, SoundEventObject from .sound_event_prediction import ( SoundEventPredictionAdapter, @@ -50,12 +47,8 @@ def __init__( sound_event_adapter: Optional[SoundEventAdapter] = None, sequence_adapter: Optional[SequenceAdapter] = None, clip_adapter: Optional[ClipAdapter] = None, - sound_event_prediction_adapter: Optional[ - SoundEventPredictionAdapter - ] = None, - sequence_prediction_adapter: Optional[ - SequencePredictionAdapter - ] = None, + sound_event_prediction_adapter: Optional[SoundEventPredictionAdapter] = None, + sequence_prediction_adapter: Optional[SequencePredictionAdapter] = None, clip_predictions_adapter: Optional[ClipPredictionsAdapter] = None, ): self.user_adapter = user_adapter or UserAdapter() @@ -136,9 +129,7 @@ def to_soundevent(self, obj: PredictionSetObject) -> data.PredictionSet: self.clip_adapter.to_soundevent(clip) for sound_event_prediction in obj.sound_event_predictions or []: - self.sound_event_prediction_adapter.to_soundevent( - sound_event_prediction - ) + self.sound_event_prediction_adapter.to_soundevent(sound_event_prediction) for sequence_prediction in obj.sequence_predictions or []: self.sequence_prediction_adapter.to_soundevent(sequence_prediction) diff --git a/src/soundevent/io/aoef/recording.py b/src/soundevent/io/aoef/recording.py index 59e9021..dbfc61a 100644 --- a/src/soundevent/io/aoef/recording.py +++ b/src/soundevent/io/aoef/recording.py @@ -34,9 +34,7 @@ class RecordingObject(BaseModel): rights: Optional[str] = None -class RecordingAdapter( - DataAdapter[data.Recording, RecordingObject, UUID, UUID] -): +class RecordingAdapter(DataAdapter[data.Recording, RecordingObject, UUID, UUID]): def __init__( self, user_adapter: UserAdapter, @@ -59,10 +57,7 @@ def assemble_aoef( notes = [self._note_adapter.to_aoef(note) for note in obj.notes] - owners = [ - self._user_adapter.to_aoef(owner).uuid - for owner in obj.owners or [] - ] + owners = [self._user_adapter.to_aoef(owner).uuid for owner in obj.owners or []] path = obj.path if self.audio_dir is not None: @@ -74,9 +69,7 @@ def assemble_aoef( duration=obj.duration, channels=obj.channels, samplerate=obj.samplerate, - time_expansion=( - obj.time_expansion if obj.time_expansion != 1.0 else None - ), + time_expansion=(obj.time_expansion if obj.time_expansion != 1.0 else None), hash=obj.hash, date=obj.date, time=obj.time, @@ -100,10 +93,7 @@ def assemble_soundevent(self, obj: RecordingObject) -> data.Recording: if (tag := self._tag_adapter.from_id(tag_id)) is not None ] - notes = [ - self._note_adapter.to_soundevent(note) - for note in (obj.notes or []) - ] + notes = [self._note_adapter.to_soundevent(note) for note in (obj.notes or [])] owners = [ user diff --git a/src/soundevent/io/aoef/recording_set.py b/src/soundevent/io/aoef/recording_set.py index 3e7c95a..3707d6d 100644 --- a/src/soundevent/io/aoef/recording_set.py +++ b/src/soundevent/io/aoef/recording_set.py @@ -47,8 +47,7 @@ def to_aoef( obj: data.RecordingSet, ) -> RecordingSetObject: recording_objects = [ - self.recording_adapter.to_aoef(recording) - for recording in obj.recordings + self.recording_adapter.to_aoef(recording) for recording in obj.recordings ] return RecordingSetObject( uuid=obj.uuid, diff --git a/src/soundevent/io/aoef/sequence.py b/src/soundevent/io/aoef/sequence.py index 2c1e53e..ec624b2 100644 --- a/src/soundevent/io/aoef/sequence.py +++ b/src/soundevent/io/aoef/sequence.py @@ -28,9 +28,7 @@ def __init__( super().__init__() self.soundevent_adapter = soundevent_adapter - def assemble_aoef( - self, obj: data.Sequence, obj_id: UUID - ) -> SequenceObject: + def assemble_aoef(self, obj: data.Sequence, obj_id: UUID) -> SequenceObject: parent = None if obj.parent: parent = self.to_aoef(obj.parent).uuid diff --git a/src/soundevent/io/aoef/sequence_prediction.py b/src/soundevent/io/aoef/sequence_prediction.py index 636f3df..1716d7d 100644 --- a/src/soundevent/io/aoef/sequence_prediction.py +++ b/src/soundevent/io/aoef/sequence_prediction.py @@ -42,8 +42,7 @@ def assemble_aoef( [ (tag.id, predicted_tag.score) for predicted_tag in obj.tags - if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) - is not None + if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) is not None ] if obj.tags else None diff --git a/src/soundevent/io/aoef/sound_event.py b/src/soundevent/io/aoef/sound_event.py index 9e5a175..424a20c 100644 --- a/src/soundevent/io/aoef/sound_event.py +++ b/src/soundevent/io/aoef/sound_event.py @@ -18,9 +18,7 @@ class SoundEventObject(BaseModel): features: Optional[Dict[str, float]] = None -class SoundEventAdapter( - DataAdapter[data.SoundEvent, SoundEventObject, UUID, UUID] -): +class SoundEventAdapter(DataAdapter[data.SoundEvent, SoundEventObject, UUID, UUID]): def __init__( self, recording_adapter: RecordingAdapter, diff --git a/src/soundevent/io/aoef/sound_event_annotation.py b/src/soundevent/io/aoef/sound_event_annotation.py index afa07c3..67733d4 100644 --- a/src/soundevent/io/aoef/sound_event_annotation.py +++ b/src/soundevent/io/aoef/sound_event_annotation.py @@ -22,9 +22,7 @@ class SoundEventAnnotationObject(BaseModel): class SoundEventAnnotationAdapter( - DataAdapter[ - data.SoundEventAnnotation, SoundEventAnnotationObject, UUID, UUID - ] + DataAdapter[data.SoundEventAnnotation, SoundEventAnnotationObject, UUID, UUID] ): def __init__( self, @@ -68,9 +66,7 @@ def assemble_soundevent( sound_event = self.sound_event_adapter.from_id(obj.sound_event) if sound_event is None: - raise ValueError( - f"Sound event with ID {obj.sound_event} not found." - ) + raise ValueError(f"Sound event with ID {obj.sound_event} not found.") return data.SoundEventAnnotation( uuid=obj.uuid, diff --git a/src/soundevent/io/aoef/sound_event_prediction.py b/src/soundevent/io/aoef/sound_event_prediction.py index 175e06a..007f4ae 100644 --- a/src/soundevent/io/aoef/sound_event_prediction.py +++ b/src/soundevent/io/aoef/sound_event_prediction.py @@ -18,9 +18,7 @@ class SoundEventPredictionObject(BaseModel): class SoundEventPredictionAdapter( - DataAdapter[ - data.SoundEventPrediction, SoundEventPredictionObject, UUID, UUID - ] + DataAdapter[data.SoundEventPrediction, SoundEventPredictionObject, UUID, UUID] ): def __init__( self, @@ -44,8 +42,7 @@ def assemble_aoef( [ (tag.id, predicted_tag.score) for predicted_tag in obj.tags - if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) - is not None + if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) is not None ] if obj.tags else None @@ -59,9 +56,7 @@ def assemble_soundevent( sound_event = self.sound_event_adapter.from_id(obj.sound_event) if sound_event is None: - raise ValueError( - f"Sound event with ID {obj.sound_event} not found." - ) + raise ValueError(f"Sound event with ID {obj.sound_event} not found.") return data.SoundEventPrediction( uuid=obj.uuid or uuid4(), diff --git a/src/soundevent/io/crowsetta/__init__.py b/src/soundevent/io/crowsetta/__init__.py index a933a8f..63fc6a2 100644 --- a/src/soundevent/io/crowsetta/__init__.py +++ b/src/soundevent/io/crowsetta/__init__.py @@ -8,10 +8,7 @@ annotation_from_clip_annotation, annotation_to_clip_annotation, ) -from soundevent.io.crowsetta.bbox import ( - bbox_from_annotation, - bbox_to_annotation, -) +from soundevent.io.crowsetta.bbox import bbox_from_annotation, bbox_to_annotation from soundevent.io.crowsetta.labels import ( label_from_tag, label_from_tags, diff --git a/src/soundevent/io/crowsetta/annotation.py b/src/soundevent/io/crowsetta/annotation.py index 240c4c3..6a8e91e 100644 --- a/src/soundevent/io/crowsetta/annotation.py +++ b/src/soundevent/io/crowsetta/annotation.py @@ -5,10 +5,7 @@ import crowsetta from soundevent import data -from soundevent.io.crowsetta.bbox import ( - bbox_from_annotation, - bbox_to_annotation, -) +from soundevent.io.crowsetta.bbox import bbox_from_annotation, bbox_to_annotation from soundevent.io.crowsetta.sequence import ( sequence_from_annotations, sequence_to_annotations, @@ -88,8 +85,7 @@ def annotation_from_clip_annotation( if annotation_fmt != "seq": raise ValueError( - "annotation_fmt must be either 'bbox' or 'seq', " - f"not {annotation_fmt}." + "annotation_fmt must be either 'bbox' or 'seq', " f"not {annotation_fmt}." ) return crowsetta.Annotation( @@ -176,8 +172,7 @@ def annotation_to_clip_annotation( if path is not None and path != recording.path: raise ValueError( - "The path of the annotation does not match the path of the " - "recording." + "The path of the annotation does not match the path of the " "recording." ) sound_event_annotations = [] @@ -195,9 +190,9 @@ def annotation_to_clip_annotation( ) ) - crowsetta_sequences: Union[ - List[crowsetta.Sequence], crowsetta.Sequence - ] = getattr(annot, "seq", []) + crowsetta_sequences: Union[List[crowsetta.Sequence], crowsetta.Sequence] = getattr( + annot, "seq", [] + ) if not isinstance(crowsetta_sequences, list): crowsetta_sequences = [crowsetta_sequences] diff --git a/src/soundevent/io/crowsetta/bbox.py b/src/soundevent/io/crowsetta/bbox.py index 78e6a98..3854a9c 100644 --- a/src/soundevent/io/crowsetta/bbox.py +++ b/src/soundevent/io/crowsetta/bbox.py @@ -23,10 +23,7 @@ def convert_geometry_to_bbox( "because the sound event geometry is not a BoundingBox." ) - if ( - geometry.type in ["TimeInterval", "TimeStamp"] - and raise_on_time_geometries - ): + if geometry.type in ["TimeInterval", "TimeStamp"] and raise_on_time_geometries: raise ValueError( "Cannot convert to a crowsetta bbox because " "the sound event geometry is a TimeInterval or TimeStamp " @@ -162,9 +159,7 @@ def bbox_to_annotation( low_freq = low_freq * recording.time_expansion high_freq = high_freq * recording.time_expansion - geometry = data.BoundingBox( - coordinates=[start_time, low_freq, end_time, high_freq] - ) + geometry = data.BoundingBox(coordinates=[start_time, low_freq, end_time, high_freq]) tags = label_to_tags(bbox.label, **kwargs) diff --git a/src/soundevent/io/formats.py b/src/soundevent/io/formats.py index 8f90b14..a23ae6e 100644 --- a/src/soundevent/io/formats.py +++ b/src/soundevent/io/formats.py @@ -36,6 +36,4 @@ def infer_format(path: PathLike) -> str: if inferrer(path): return format_ - raise ValueError( - f"Cannot infer format of file {path}, or format not supported." - ) + raise ValueError(f"Cannot infer format of file {path}, or format not supported.") diff --git a/src/soundevent/plot/annotation.py b/src/soundevent/plot/annotation.py index 6a79b9a..fc17e24 100644 --- a/src/soundevent/plot/annotation.py +++ b/src/soundevent/plot/annotation.py @@ -75,8 +75,7 @@ def get_tags_position( if func is None: raise NotImplementedError( - f"Plotting tags for geometry of type {geometry.type} " - "is not implemented." + f"Plotting tags for geometry of type {geometry.type} " "is not implemented." ) return func(geometry, bounds) @@ -117,9 +116,7 @@ def _get_tags_position_bounding_box( _TAG_POSITION_FUNCTIONS: Dict[ data.GeometryType, - Callable[ - [data.Geometry, Tuple[float, float, float, float]], Tuple[float, float] - ], + Callable[[data.Geometry, Tuple[float, float, float, float]], Tuple[float, float]], ] = { data.BoundingBox.geom_type(): _get_tags_position_bounding_box, } diff --git a/src/soundevent/plot/geometries.py b/src/soundevent/plot/geometries.py index f595ff2..607c0af 100644 --- a/src/soundevent/plot/geometries.py +++ b/src/soundevent/plot/geometries.py @@ -58,8 +58,7 @@ def _plot_bounding_box_geometry( ) -> Axes: if not isinstance(geometry, data.BoundingBox): raise ValueError( - f"Expected geometry of type {data.BoundingBox}, " - f"got {type(geometry)}." + f"Expected geometry of type {data.BoundingBox}, " f"got {type(geometry)}." ) start_time, low_freq, end_time, high_freq = geometry.coordinates diff --git a/src/soundevent/plot/tags.py b/src/soundevent/plot/tags.py index 73cc25b..96581a8 100644 --- a/src/soundevent/plot/tags.py +++ b/src/soundevent/plot/tags.py @@ -29,9 +29,7 @@ def __init__( self._tags: Dict[data.Tag, str] = {} colormap = get_cmap(cmap) - self._colors = cycle( - [colormap(x) for x in np.linspace(0, 1, num_colors)] - ) + self._colors = cycle([colormap(x) for x in np.linspace(0, 1, num_colors)]) def get_color(self, tag: data.Tag) -> str: """Get color for tag.""" diff --git a/src/soundevent/types.py b/src/soundevent/types.py index 9790044..88a3ffe 100644 --- a/src/soundevent/types.py +++ b/src/soundevent/types.py @@ -1,9 +1,10 @@ """Common types and interfaces within bioacoustic analysis.""" -from abc import abstractmethod, ABC -from soundevent import data +from abc import ABC, abstractmethod from typing import List, Optional +from soundevent import data + class ClassMapper(ABC): """Abstract class for encoding and decoding labels.""" diff --git a/tests/test_array/test_dimensions.py b/tests/test_array/test_dimensions.py index ab31c39..598d295 100644 --- a/tests/test_array/test_dimensions.py +++ b/tests/test_array/test_dimensions.py @@ -183,9 +183,7 @@ def test_create_frequency_dim_from_array_sets_attrs(): def test_create_frequency_dim_from_array_estimates_step(): """Test create_frequency_dim_from_array function.""" arr = np.array([1, 2, 3]) - frequency_dim = arrays.create_frequency_dim_from_array( - arr, estimate_step=True - ) + frequency_dim = arrays.create_frequency_dim_from_array(arr, estimate_step=True) assert frequency_dim.attrs["step"] == 1 diff --git a/tests/test_audio/test_io.py b/tests/test_audio/test_io.py index 2aaaa72..460e3b5 100644 --- a/tests/test_audio/test_io.py +++ b/tests/test_audio/test_io.py @@ -27,9 +27,7 @@ def test_audio_to_bytes_has_correct_length( dtype: np.dtype, ): samples = int(duration * samplerate) - array = np.random.random( - size=[int(duration * samplerate), channels] - ).astype(dtype) + array = np.random.random(size=[int(duration * samplerate), channels]).astype(dtype) bytes_per_sample = (bit_depth // 8) * channels expected_bytes = samples * bytes_per_sample diff --git a/tests/test_data/test_datasets.py b/tests/test_data/test_datasets.py index 1176655..a72938e 100644 --- a/tests/test_data/test_datasets.py +++ b/tests/test_data/test_datasets.py @@ -52,9 +52,7 @@ def test_create_dataset_ignores_non_audio_files(tmp_path: Path): def test_create_dataset_fails_with_non_existing_directory(): """Test that we can create a dataset from audio files.""" with pytest.raises(ValueError): - data.Dataset.from_directory( - Path("non-existing-directory"), name="test" - ) + data.Dataset.from_directory(Path("non-existing-directory"), name="test") def test_create_dataset_fails_if_path_is_file(tmp_path: Path): @@ -78,9 +76,7 @@ def test_create_dataset_without_recursive(tmp_path: Path, random_wav): """Test that we can create a dataset from audio files.""" (tmp_path / "test1").mkdir() random_wav(path=tmp_path / "test1" / "test1.wav") - dataset = data.Dataset.from_directory( - tmp_path, recursive=False, name="test" - ) + dataset = data.Dataset.from_directory(tmp_path, recursive=False, name="test") assert len(dataset.recordings) == 0 diff --git a/tests/test_data/test_geometry.py b/tests/test_data/test_geometry.py index 94bbea4..ac0e45e 100644 --- a/tests/test_data/test_geometry.py +++ b/tests/test_data/test_geometry.py @@ -184,9 +184,7 @@ def test_load_multilinestring_from_dict(): def test_load_multilinestring_from_attributes(): """Test that a MultiLineString can be loaded from attributes.""" - obj = data.MultiLineString( - coordinates=[[[0, 1], [2, 3]], [[4, 5], [6, 7]]] - ) + obj = data.MultiLineString(coordinates=[[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) geom = data.geometry_validate(obj, mode="attributes") assert isinstance(geom, data.MultiLineString) assert geom.coordinates == [[[0, 1], [2, 3]], [[4, 5], [6, 7]]] diff --git a/tests/test_evaluation/test_clip_classification.py b/tests/test_evaluation/test_clip_classification.py index be8a681..48790e4 100644 --- a/tests/test_evaluation/test_clip_classification.py +++ b/tests/test_evaluation/test_clip_classification.py @@ -156,9 +156,7 @@ def test_evaluation_has_balanced_accuracy( tags=evaluation_tags, ) - balanced_accuracy = data.find_feature( - evaluation.metrics, name="balanced_accuracy" - ) + balanced_accuracy = data.find_feature(evaluation.metrics, name="balanced_accuracy") assert balanced_accuracy is not None assert math.isclose(balanced_accuracy.value, 0.5, rel_tol=1e-6) @@ -175,9 +173,7 @@ def test_evaluation_has_top_3_accuracy( tags=evaluation_tags, ) - top_3_accuracy = data.find_feature( - evaluation.metrics, name="top_3_accuracy" - ) + top_3_accuracy = data.find_feature(evaluation.metrics, name="top_3_accuracy") assert top_3_accuracy is not None assert math.isclose(top_3_accuracy.value, 1.0, rel_tol=1e-6) @@ -244,11 +240,7 @@ def test_each_example_score_is_the_probability_of_the_true_class( assert len(evaluation.clip_evaluations[1].metrics) == 1 assert evaluation.clip_evaluations[0].score is not None - assert math.isclose( - evaluation.clip_evaluations[0].score, 0.9, rel_tol=1e-6 - ) + assert math.isclose(evaluation.clip_evaluations[0].score, 0.9, rel_tol=1e-6) assert evaluation.clip_evaluations[1].score is not None - assert math.isclose( - evaluation.clip_evaluations[1].score, 0.1, rel_tol=1e-6 - ) + assert math.isclose(evaluation.clip_evaluations[1].score, 0.1, rel_tol=1e-6) diff --git a/tests/test_evaluation/test_encode.py b/tests/test_evaluation/test_encode.py index cd2758b..1d7bb50 100644 --- a/tests/test_evaluation/test_encode.py +++ b/tests/test_evaluation/test_encode.py @@ -16,9 +16,7 @@ @pytest.fixture -def tags( - random_tags: Callable[[int], Sequence[data.Tag]] -) -> Sequence[data.Tag]: +def tags(random_tags: Callable[[int], Sequence[data.Tag]]) -> Sequence[data.Tag]: """Tags for testing.""" return random_tags(10) diff --git a/tests/test_evaluation/test_matching.py b/tests/test_evaluation/test_matching.py index 0c858da..8ce744d 100644 --- a/tests/test_evaluation/test_matching.py +++ b/tests/test_evaluation/test_matching.py @@ -96,9 +96,7 @@ def test_multi_linestring_is_supported(): def test_multi_polygon_is_supported(): - multi_polygon = data.MultiPolygon( - coordinates=[[[[1, 2], [4, 3], [5, 6], [1, 2]]]] - ) + multi_polygon = data.MultiPolygon(coordinates=[[[[1, 2], [4, 3], [5, 6], [1, 2]]]]) matches = list(match_geometries([multi_polygon], [multi_polygon])) assert len(matches) == 1 source_index, target_index, affinity = matches[0] diff --git a/tests/test_evaluation/test_sound_event_detection.py b/tests/test_evaluation/test_sound_event_detection.py index 09080e5..a798832 100644 --- a/tests/test_evaluation/test_sound_event_detection.py +++ b/tests/test_evaluation/test_sound_event_detection.py @@ -28,9 +28,7 @@ def test_can_evaluate_nips_data(): assert isinstance(evaluation, data.Evaluation) # check that all clips have been evaluated - assert len(evaluation.clip_evaluations) == len( - evaluation_set.clip_annotations - ) + assert len(evaluation.clip_evaluations) == len(evaluation_set.clip_annotations) # check that all metrics are present assert len(evaluation.metrics) == 4 diff --git a/tests/test_io/test_annotation_projects.py b/tests/test_io/test_annotation_projects.py index 85a6874..580b487 100644 --- a/tests/test_io/test_annotation_projects.py +++ b/tests/test_io/test_annotation_projects.py @@ -40,9 +40,7 @@ def test_saved_annotation_project_is_saved_to_json_file( assert path.exists() -def test_saved_annotation_project_has_correct_info( - monkeypatch, tmp_path: Path -) -> None: +def test_saved_annotation_project_has_correct_info(monkeypatch, tmp_path: Path) -> None: """Test that the saved annotation project has the correct info.""" # Arrange annotation_project = data.AnnotationProject( @@ -175,10 +173,7 @@ def test_can_recover_task_status( # Assert assert recovered == annotation_project - assert ( - recovered.tasks[0].status_badges[0].state - == data.AnnotationState.completed - ) + assert recovered.tasks[0].status_badges[0].state == data.AnnotationState.completed def test_can_recover_user_that_completed_task( @@ -285,9 +280,7 @@ def test_can_recover_task_simple_annotation( clip_annotations=[ data.ClipAnnotation( clip=clip, - sound_events=[ - data.SoundEventAnnotation(sound_event=sound_event) - ], + sound_events=[data.SoundEventAnnotation(sound_event=sound_event)], ) ], tasks=[data.AnnotationTask(clip=clip)], @@ -301,8 +294,7 @@ def test_can_recover_task_simple_annotation( # Assert assert recovered == annotation_project assert ( - recovered.clip_annotations[0].sound_events[0].sound_event.geometry - is not None + recovered.clip_annotations[0].sound_events[0].sound_event.geometry is not None ) assert sound_event.geometry is not None assert ( @@ -310,9 +302,7 @@ def test_can_recover_task_simple_annotation( == sound_event.geometry.type ) assert ( - recovered.clip_annotations[0] - .sound_events[0] - .sound_event.geometry.coordinates + recovered.clip_annotations[0].sound_events[0].sound_event.geometry.coordinates == sound_event.geometry.coordinates ) @@ -352,13 +342,8 @@ def test_can_recover_task_annotation_with_tags( # Assert assert recovered == annotation_project - assert ( - recovered.clip_annotations[0].sound_events[0].tags[0].key == "species" - ) - assert ( - recovered.clip_annotations[0].sound_events[0].tags[0].value - == "test_species" - ) + assert recovered.clip_annotations[0].sound_events[0].tags[0].key == "species" + assert recovered.clip_annotations[0].sound_events[0].tags[0].value == "test_species" def test_can_recover_annotation_creator( @@ -409,9 +394,7 @@ def test_can_recover_annotation_creation_date( data.ClipAnnotation( clip=clip, sound_events=[ - data.SoundEventAnnotation( - sound_event=sound_event, created_on=date - ) + data.SoundEventAnnotation(sound_event=sound_event, created_on=date) ], ), ], @@ -464,14 +447,8 @@ def test_can_recover_annotation_notes( # Assert assert recovered == annotation_project - assert ( - recovered.clip_annotations[0].sound_events[0].notes[0].message - == "test_note" - ) - assert ( - recovered.clip_annotations[0].sound_events[0].notes[0].created_by - == user - ) + assert recovered.clip_annotations[0].sound_events[0].notes[0].message == "test_note" + assert recovered.clip_annotations[0].sound_events[0].notes[0].created_by == user def test_can_recover_sound_event_features( @@ -513,17 +490,11 @@ def test_can_recover_sound_event_features( # Assert assert recovered == annotation_project assert ( - recovered.clip_annotations[0] - .sound_events[0] - .sound_event.features[0] - .name + recovered.clip_annotations[0].sound_events[0].sound_event.features[0].name == "duration" ) assert ( - recovered.clip_annotations[0] - .sound_events[0] - .sound_event.features[0] - .value + recovered.clip_annotations[0].sound_events[0].sound_event.features[0].value == 1.0 ) @@ -564,9 +535,7 @@ def test_recording_paths_are_stored_as_relative_if_audio_dir_is_provided( def test_can_parse_nips4plus(tmp_path: Path): """Test that NIPS4BPlus annotations can be parsed.""" - original_path = ( - BASE_DIR / "docs" / "user_guide" / "nips4b_plus_sample.json" - ) + original_path = BASE_DIR / "docs" / "user_guide" / "nips4b_plus_sample.json" path = tmp_path / "test.json" # Act diff --git a/tests/test_io/test_aoef/test_api.py b/tests/test_io/test_aoef/test_api.py index a278e59..b671410 100644 --- a/tests/test_io/test_aoef/test_api.py +++ b/tests/test_io/test_aoef/test_api.py @@ -71,9 +71,7 @@ def test_load_fails_if_aoef_version_is_not_supported(tmp_path): io.load(path) -def test_save_creates_parent_directories( - tmp_path: Path, dataset: data.Dataset -): +def test_save_creates_parent_directories(tmp_path: Path, dataset: data.Dataset): """Test that the save function creates parent directories.""" # Arrange path = tmp_path / "parent" / "child" / "test.json" diff --git a/tests/test_io/test_crowsetta/test_annotation.py b/tests/test_io/test_crowsetta/test_annotation.py index 3c90a29..07cc7f0 100644 --- a/tests/test_io/test_crowsetta/test_annotation.py +++ b/tests/test_io/test_crowsetta/test_annotation.py @@ -35,9 +35,7 @@ def clip_annotation(recording: data.Recording) -> data.ClipAnnotation: data.SoundEventAnnotation( sound_event=data.SoundEvent( recording=recording, - geometry=data.BoundingBox( - coordinates=[0.5, 0.5, 1.5, 1.5] - ), + geometry=data.BoundingBox(coordinates=[0.5, 0.5, 1.5, 1.5]), features=[data.Feature(name="test", value=1.0)], ), tags=[data.Tag(key="animal", value="cat")], @@ -46,9 +44,7 @@ def clip_annotation(recording: data.Recording) -> data.ClipAnnotation: data.SoundEventAnnotation( sound_event=data.SoundEvent( recording=recording, - geometry=data.LineString( - coordinates=[[0.5, 0.5], [1.5, 1.5]] - ), + geometry=data.LineString(coordinates=[[0.5, 0.5], [1.5, 1.5]]), features=[data.Feature(name="test", value=1.0)], ), tags=[data.Tag(key="animal", value="cat")], diff --git a/tests/test_io/test_crowsetta/test_import.py b/tests/test_io/test_crowsetta/test_import.py index b9a1b32..16e688c 100644 --- a/tests/test_io/test_crowsetta/test_import.py +++ b/tests/test_io/test_crowsetta/test_import.py @@ -52,9 +52,9 @@ def test_can_import_all_example_formats( from_file_kwargs = {"audio_path": recording.path} to_annot_kwargs = {"samplerate": recording.samplerate} - annotation = scribe.from_file( - example.annot_path, **from_file_kwargs - ).to_annot(**to_annot_kwargs) + annotation = scribe.from_file(example.annot_path, **from_file_kwargs).to_annot( + **to_annot_kwargs + ) if isinstance(annotation, list): annotation = annotation[0] @@ -62,9 +62,7 @@ def test_can_import_all_example_formats( assert isinstance(annotation, crowsetta.Annotation) if annotation.notated_path is not None: - recording = recording.model_copy( - update=dict(path=annotation.notated_path) - ) + recording = recording.model_copy(update=dict(path=annotation.notated_path)) clip_annotation = crowsetta_io.annotation_to_clip_annotation( annotation, diff --git a/tests/test_io/test_crowsetta/test_labels.py b/tests/test_io/test_crowsetta/test_labels.py index 5316aa4..26b2a15 100644 --- a/tests/test_io/test_crowsetta/test_labels.py +++ b/tests/test_io/test_crowsetta/test_labels.py @@ -136,9 +136,7 @@ def test_label_to_tags_with_key_mapping(): def test_label_to_tags_with_key_mapping_fallback(): key_mapping = {"bat": "animal"} - tag = crowsetta_io.label_to_tags( - "dog", key_mapping=key_mapping, fallback="pet" - ) + tag = crowsetta_io.label_to_tags("dog", key_mapping=key_mapping, fallback="pet") assert tag == [data.Tag(key="pet", value="dog")] diff --git a/tests/test_io/test_crowsetta/test_segments.py b/tests/test_io/test_crowsetta/test_segments.py index 3804d8f..e8bacfe 100644 --- a/tests/test_io/test_crowsetta/test_segments.py +++ b/tests/test_io/test_crowsetta/test_segments.py @@ -95,9 +95,7 @@ def test_segment_from_annotation( def test_segment_from_annotation_fails_if_not_a_time_interval( sound_event_annotation: data.SoundEventAnnotation, ): - sound_event_annotation.sound_event.geometry = data.Point( - coordinates=[0.5, 1] - ) + sound_event_annotation.sound_event.geometry = data.Point(coordinates=[0.5, 1]) with pytest.raises(ValueError): crowsetta_io.segment_from_annotation( sound_event_annotation, @@ -108,9 +106,7 @@ def test_segment_from_annotation_fails_if_not_a_time_interval( def test_segment_from_annotation_casts_to_segment( sound_event_annotation: data.SoundEventAnnotation, ): - sound_event_annotation.sound_event.geometry = data.Point( - coordinates=[0.5, 1] - ) + sound_event_annotation.sound_event.geometry = data.Point(coordinates=[0.5, 1]) segment = crowsetta_io.segment_from_annotation( sound_event_annotation, cast_to_segment=True, diff --git a/tests/test_io/test_crowsetta/test_sequence.py b/tests/test_io/test_crowsetta/test_sequence.py index 6c4897d..d02e5f6 100644 --- a/tests/test_io/test_crowsetta/test_sequence.py +++ b/tests/test_io/test_crowsetta/test_sequence.py @@ -170,7 +170,5 @@ def test_sequence_to_annotations( recording, ) assert len(annotations) == 2 - assert all( - isinstance(ann, data.SoundEventAnnotation) for ann in annotations - ) + assert all(isinstance(ann, data.SoundEventAnnotation) for ann in annotations) assert all(ann.sound_event.recording == recording for ann in annotations) diff --git a/tests/test_io/test_model_runs.py b/tests/test_io/test_model_runs.py index d9716fd..8b33b49 100644 --- a/tests/test_io/test_model_runs.py +++ b/tests/test_io/test_model_runs.py @@ -138,9 +138,7 @@ def test_can_recover_processed_clip_tags( # Assert assert model_run == recovered assert recovered.clip_predictions[0].tags[0].tag.key == "species" - assert ( - recovered.clip_predictions[0].tags[0].tag.value == "Myotis lucifugus" - ) + assert recovered.clip_predictions[0].tags[0].tag.value == "Myotis lucifugus" assert recovered.clip_predictions[0].tags[0].score == 0.9 @@ -211,10 +209,7 @@ def test_can_recover_simple_predicted_sound_event( # Assert assert recovered.clip_predictions[0].sound_events[0].score == 0.9 - assert ( - recovered.clip_predictions[0].sound_events[0].sound_event - == sound_event - ) + assert recovered.clip_predictions[0].sound_events[0].sound_event == sound_event assert model_run == recovered @@ -254,10 +249,7 @@ def test_can_recover_predicted_sound_event_with_predicted_tags( recovered = io.load(path, type="model_run") # Assert - assert ( - recovered.clip_predictions[0].sound_events[0].tags[0].tag.key - == "species" - ) + assert recovered.clip_predictions[0].sound_events[0].tags[0].tag.key == "species" assert ( recovered.clip_predictions[0].sound_events[0].tags[0].tag.value == "Myotis lucifugus" From 3b8b07787d211b060b3df5a503ebed264ccddb45 Mon Sep 17 00:00:00 2001 From: mbsantiago Date: Fri, 10 May 2024 11:01:17 +0100 Subject: [PATCH 4/7] migrated formatting to ruff --- .github/workflows/test.yml | 4 -- docs/user_guide/2_loading_audio.py | 2 +- src/soundevent/arrays/dimensions.py | 4 +- src/soundevent/arrays/operations.py | 18 ++++-- src/soundevent/audio/chunks.py | 3 +- src/soundevent/audio/filter.py | 4 +- src/soundevent/audio/media_info.py | 5 +- src/soundevent/audio/spectrum.py | 8 ++- src/soundevent/data/annotation_sets.py | 4 +- src/soundevent/data/annotation_tasks.py | 8 ++- src/soundevent/data/clip_annotations.py | 8 ++- src/soundevent/data/clip_evaluations.py | 8 ++- src/soundevent/data/evaluations.py | 4 +- src/soundevent/data/geometries.py | 36 +++++++---- src/soundevent/data/notes.py | 4 +- src/soundevent/data/prediction_sets.py | 4 +- src/soundevent/data/recording_sets.py | 4 +- src/soundevent/data/recordings.py | 5 +- src/soundevent/data/sequence_annotations.py | 4 +- .../data/sound_event_annotations.py | 4 +- src/soundevent/evaluation/affinity.py | 20 +++++-- src/soundevent/evaluation/encoding.py | 59 +++++++++++-------- src/soundevent/evaluation/match.py | 1 - src/soundevent/evaluation/metrics.py | 13 ++-- src/soundevent/evaluation/tasks/__init__.py | 4 +- .../evaluation/tasks/clip_classification.py | 7 ++- .../tasks/clip_multilabel_classification.py | 7 ++- src/soundevent/evaluation/tasks/common.py | 4 +- .../tasks/sound_event_classification.py | 19 ++++-- .../evaluation/tasks/sound_event_detection.py | 7 ++- src/soundevent/geometry/__init__.py | 5 +- src/soundevent/geometry/features.py | 16 +++-- src/soundevent/geometry/html.py | 6 +- src/soundevent/io/aoef/__init__.py | 13 +++- src/soundevent/io/aoef/adapters.py | 8 ++- src/soundevent/io/aoef/annotation_project.py | 27 ++++++--- src/soundevent/io/aoef/annotation_set.py | 21 +++++-- src/soundevent/io/aoef/clip_annotations.py | 19 ++++-- src/soundevent/io/aoef/clip_evaluation.py | 13 +++- src/soundevent/io/aoef/clip_predictions.py | 19 ++++-- src/soundevent/io/aoef/dataset.py | 4 +- src/soundevent/io/aoef/evaluation.py | 47 +++++++++++---- src/soundevent/io/aoef/evaluation_set.py | 11 +++- src/soundevent/io/aoef/match.py | 8 ++- src/soundevent/io/aoef/prediction_set.py | 17 ++++-- src/soundevent/io/aoef/recording.py | 18 ++++-- src/soundevent/io/aoef/recording_set.py | 3 +- src/soundevent/io/aoef/sequence.py | 4 +- src/soundevent/io/aoef/sequence_prediction.py | 3 +- src/soundevent/io/aoef/sound_event.py | 4 +- .../io/aoef/sound_event_annotation.py | 8 ++- .../io/aoef/sound_event_prediction.py | 11 +++- src/soundevent/io/crowsetta/__init__.py | 5 +- src/soundevent/io/crowsetta/annotation.py | 18 +++--- src/soundevent/io/crowsetta/bbox.py | 9 ++- src/soundevent/io/crowsetta/labels.py | 4 +- src/soundevent/io/crowsetta/segment.py | 5 +- src/soundevent/io/crowsetta/sequence.py | 1 - src/soundevent/io/formats.py | 4 +- src/soundevent/io/saver.py | 1 - src/soundevent/plot/annotation.py | 9 +-- src/soundevent/plot/geometries.py | 3 +- src/soundevent/plot/tags.py | 4 +- tests/conftest.py | 1 - tests/test_array/test_dimensions.py | 4 +- tests/test_array/test_operations.py | 2 +- tests/test_audio/test_filter.py | 1 - tests/test_audio/test_io.py | 7 ++- tests/test_audio/test_raw.py | 1 - tests/test_audio/test_resample.py | 1 - tests/test_audio/test_scaling.py | 1 - tests/test_data/test_datasets.py | 9 ++- tests/test_data/test_evaluated_samples.py | 1 - tests/test_data/test_geometry.py | 6 +- .../test_clip_classification.py | 20 +++++-- .../test_clip_multilabel_classification.py | 1 - tests/test_evaluation/test_encode.py | 6 +- tests/test_evaluation/test_matching.py | 4 +- tests/test_evaluation/test_metrics.py | 1 - .../test_sound_event_detection.py | 4 +- tests/test_geometry/conftest.py | 1 - tests/test_geometry/test_conversion.py | 1 - tests/test_geometry/test_html.py | 1 - tests/test_geometry/test_operations.py | 1 - tests/test_io/conftest.py | 1 - tests/test_io/test_annotation_projects.py | 57 ++++++++++++++---- tests/test_io/test_aoef/conftest.py | 1 - tests/test_io/test_aoef/test_api.py | 17 +++--- .../test_io/test_crowsetta/test_annotation.py | 9 ++- tests/test_io/test_crowsetta/test_bbox.py | 3 +- tests/test_io/test_crowsetta/test_import.py | 13 ++-- tests/test_io/test_crowsetta/test_labels.py | 4 +- tests/test_io/test_crowsetta/test_segments.py | 11 ++-- tests/test_io/test_crowsetta/test_sequence.py | 5 +- tests/test_io/test_model_runs.py | 14 ++++- 95 files changed, 582 insertions(+), 257 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f04eb83..870215d 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -23,10 +23,6 @@ jobs: python -m pip install --upgrade pip python -m pip install pytest hypothesis ruff mypy black html5lib python -m pip install ".[all]" - - name: Check format is correct - run: | - black --check src - black --check tests - name: Make sure types are consistent run: mypy --ignore-missing-imports src - name: Lint with ruff diff --git a/docs/user_guide/2_loading_audio.py b/docs/user_guide/2_loading_audio.py index 3aedb4b..281cbb4 100644 --- a/docs/user_guide/2_loading_audio.py +++ b/docs/user_guide/2_loading_audio.py @@ -6,7 +6,7 @@ [`xarray.DataArray`][xarray.DataArray] objects to hold loaded audio data. [`xarray.DataArray`][xarray.DataArray] objects are an extension of [`numpy`][numpy.ndarray] arrays, so there's no need to learn new concepts -if you are already familiar with [`numpy`][numpy.ndarray] arrays. +if you are already familiar with [`numpy`][numpy.ndarray] arrays. !!! note "Why use `xarray.DataArray` objects?" diff --git a/src/soundevent/arrays/dimensions.py b/src/soundevent/arrays/dimensions.py index b877b2e..203067e 100644 --- a/src/soundevent/arrays/dimensions.py +++ b/src/soundevent/arrays/dimensions.py @@ -493,7 +493,9 @@ def get_dim_step( return attrs[DimAttrs.step.value] if not estimate_step: - raise ValueError(f"Step size not found in the '{dim}' dimension attributes.") + raise ValueError( + f"Step size not found in the '{dim}' dimension attributes." + ) return estimate_dim_step( coord.data, diff --git a/src/soundevent/arrays/operations.py b/src/soundevent/arrays/operations.py index ba5a226..f01ac97 100644 --- a/src/soundevent/arrays/operations.py +++ b/src/soundevent/arrays/operations.py @@ -7,7 +7,11 @@ from numpy.typing import DTypeLike from xarray.core.types import InterpOptions -from soundevent.arrays.dimensions import create_range_dim, get_dim_range, get_dim_step +from soundevent.arrays.dimensions import ( + create_range_dim, + get_dim_range, + get_dim_step, +) __all__ = [ "center", @@ -88,7 +92,9 @@ def crop_dim( stop = current_stop if start > stop: - raise ValueError(f"Start value {start} must be less than stop value {stop}") + raise ValueError( + f"Start value {start} must be less than stop value {stop}" + ) if start < current_start or stop > current_stop: raise ValueError( @@ -174,7 +180,9 @@ def extend_dim( stop = current_stop if start > stop: - raise ValueError(f"Start value {start} must be less than stop value {stop}") + raise ValueError( + f"Start value {start} must be less than stop value {stop}" + ) step = get_dim_step(arr, dim) @@ -304,7 +312,9 @@ def set_value_at_pos( start, stop = get_dim_range(array, dim) if coord < start or coord > stop: - raise KeyError(f"Position {coord} is outside the range of dimension {dim}.") + raise KeyError( + f"Position {coord} is outside the range of dimension {dim}." + ) index = array.indexes[dim].get_slice_bound(coord, "right") indexer[dims[dim]] = index - 1 diff --git a/src/soundevent/audio/chunks.py b/src/soundevent/audio/chunks.py index 975e8bb..3f2b2d5 100644 --- a/src/soundevent/audio/chunks.py +++ b/src/soundevent/audio/chunks.py @@ -111,7 +111,8 @@ def _read_chunk(riff: BinaryIO) -> Optional[Chunk]: if chunk_id in CHUNKS_WITH_SUBCHUNKS: chunk.subchunks = { - subchunk.chunk_id: subchunk for subchunk in _get_subchunks(riff, size - 4) + subchunk.chunk_id: subchunk + for subchunk in _get_subchunks(riff, size - 4) } else: riff.seek(size, os.SEEK_CUR) diff --git a/src/soundevent/audio/filter.py b/src/soundevent/audio/filter.py index 63ff040..6df6111 100644 --- a/src/soundevent/audio/filter.py +++ b/src/soundevent/audio/filter.py @@ -18,7 +18,9 @@ def _get_filter( order: int = 5, ) -> np.ndarray: if low_freq is None and high_freq is None: - raise ValueError("At least one of low_freq and high_freq must be specified.") + raise ValueError( + "At least one of low_freq and high_freq must be specified." + ) if low_freq is None: # Low pass filter diff --git a/src/soundevent/audio/media_info.py b/src/soundevent/audio/media_info.py index 6e44bd8..0fba73a 100644 --- a/src/soundevent/audio/media_info.py +++ b/src/soundevent/audio/media_info.py @@ -156,7 +156,9 @@ def get_media_info(path: PathLike) -> MediaInfo: # chunk is the size of the data subchunk divided by the number # of channels and the bit depth. data_chunk = chunk.subchunks["data"] - samples = 8 * data_chunk.size // (fmt_info.channels * fmt_info.bit_depth) + samples = ( + 8 * data_chunk.size // (fmt_info.channels * fmt_info.bit_depth) + ) duration = samples / fmt_info.samplerate return MediaInfo( @@ -224,7 +226,6 @@ def generate_wav_header( The structure of the WAV header is described in (WAV PCM soundfile format)[http://soundfile.sapp.org/doc/WaveFormat/]. """ - data_size = samples * channels * bit_depth // 8 byte_rate = samplerate * channels * bit_depth // 8 block_align = channels * bit_depth // 8 diff --git a/src/soundevent/audio/spectrum.py b/src/soundevent/audio/spectrum.py index 3ab9f40..57a4738 100644 --- a/src/soundevent/audio/spectrum.py +++ b/src/soundevent/audio/spectrum.py @@ -120,7 +120,9 @@ def pcen_core( raise ValueError(f"eps={eps} must be strictly positive") if time_constant <= 0: - raise ValueError(f"time_constant={time_constant} must be strictly positive") + raise ValueError( + f"time_constant={time_constant} must be strictly positive" + ) if b is None: t_frames = time_constant * sr / float(hop_length) @@ -144,7 +146,9 @@ def pcen_core( if max_size == 1: ref = S elif S.ndim == 1: - raise ValueError("Max-filtering cannot be applied to 1-dimensional input") + raise ValueError( + "Max-filtering cannot be applied to 1-dimensional input" + ) else: if max_axis is None: if S.ndim != 2: diff --git a/src/soundevent/data/annotation_sets.py b/src/soundevent/data/annotation_sets.py index 7273282..6ea30e0 100644 --- a/src/soundevent/data/annotation_sets.py +++ b/src/soundevent/data/annotation_sets.py @@ -28,4 +28,6 @@ class AnnotationSet(BaseModel): default_factory=list, repr=False, ) - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) diff --git a/src/soundevent/data/annotation_tasks.py b/src/soundevent/data/annotation_tasks.py index 31ad438..3c9495c 100644 --- a/src/soundevent/data/annotation_tasks.py +++ b/src/soundevent/data/annotation_tasks.py @@ -60,11 +60,15 @@ class StatusBadge(BaseModel): state: AnnotationState owner: Optional[User] = None - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) class AnnotationTask(BaseModel): uuid: UUID = Field(default_factory=uuid4, repr=False) clip: Clip status_badges: List[StatusBadge] = Field(default_factory=list) - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) diff --git a/src/soundevent/data/clip_annotations.py b/src/soundevent/data/clip_annotations.py index acadefe..795c4b5 100644 --- a/src/soundevent/data/clip_annotations.py +++ b/src/soundevent/data/clip_annotations.py @@ -43,7 +43,9 @@ class ClipAnnotation(BaseModel): annotations A list of Annotation instances representing detailed annotations of sound events in the clip. - notes + + Notes + ----- A list of Note instances representing additional contextual information or remarks associated with the clip. """ @@ -54,4 +56,6 @@ class ClipAnnotation(BaseModel): sequences: List[SequenceAnnotation] = Field(default_factory=list) tags: List[Tag] = Field(default_factory=list) notes: List[Note] = Field(default_factory=list) - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) diff --git a/src/soundevent/data/clip_evaluations.py b/src/soundevent/data/clip_evaluations.py index 380a9c8..ad812b0 100644 --- a/src/soundevent/data/clip_evaluations.py +++ b/src/soundevent/data/clip_evaluations.py @@ -95,13 +95,17 @@ def _check_matches(self): } match_targets = [ - match.target.uuid for match in self.matches if match.target is not None + match.target.uuid + for match in self.matches + if match.target is not None ] match_targets_set = set(match_targets) match_sources = [ - match.source.uuid for match in self.matches if match.source is not None + match.source.uuid + for match in self.matches + if match.source is not None ] match_sources_set = set(match_sources) diff --git a/src/soundevent/data/evaluations.py b/src/soundevent/data/evaluations.py index 4ef34ed..d55db26 100644 --- a/src/soundevent/data/evaluations.py +++ b/src/soundevent/data/evaluations.py @@ -25,7 +25,9 @@ class Evaluation(BaseModel): """Evaluation Class.""" uuid: UUID = Field(default_factory=uuid4, repr=False) - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) evaluation_task: str clip_evaluations: Sequence[ClipEvaluation] = Field(default_factory=list) metrics: Sequence[Feature] = Field(default_factory=list) diff --git a/src/soundevent/data/geometries.py b/src/soundevent/data/geometries.py index be5af2c..a83a718 100644 --- a/src/soundevent/data/geometries.py +++ b/src/soundevent/data/geometries.py @@ -252,7 +252,9 @@ def _validate_time_interval(cls, v: List[Time]) -> List[Time]: after the end time). """ if len(v) != 2: - raise ValueError("The time interval must have exactly two time stamps.") + raise ValueError( + "The time interval must have exactly two time stamps." + ) if v[0] > v[1]: raise ValueError("The start time must be before the end time.") @@ -323,7 +325,9 @@ def _validate_coordinates(cls, v: List[float]) -> List[float]: raise ValueError("The time must be positive.") if frequency < 0 or frequency > MAX_FREQUENCY: - raise ValueError(f"The frequency must be between 0 and {MAX_FREQUENCY}.") + raise ValueError( + f"The frequency must be between 0 and {MAX_FREQUENCY}." + ) return v @@ -469,7 +473,8 @@ def _validate_coordinates( if frequency < 0 or frequency > MAX_FREQUENCY: raise ValueError( - f"The frequency must be between 0 and " f"{MAX_FREQUENCY}." + f"The frequency must be between 0 and " + f"{MAX_FREQUENCY}." ) return v @@ -527,7 +532,9 @@ def _validate_coordinates(cls, v: List[float]) -> List[float]: negative or the frequency is outside the valid range). """ if len(v) != 4: - raise ValueError("The bounding box must have exactly four coordinates.") + raise ValueError( + "The bounding box must have exactly four coordinates." + ) start_time, low_freq, end_time, high_freq = v @@ -551,7 +558,9 @@ def _validate_coordinates(cls, v: List[float]) -> List[float]: raise ValueError("The start time must be before the end time.") if low_freq > high_freq: - raise ValueError("The start frequency must be before the end frequency.") + raise ValueError( + "The start frequency must be before the end frequency." + ) return v @@ -762,7 +771,9 @@ def _validate_coordinates( negative or the frequency is outside the valid range). """ if len(v) < 1: - raise ValueError("The multipolygon must have at least one polygon.") + raise ValueError( + "The multipolygon must have at least one polygon." + ) for polygon in v: if len(polygon) < 1: @@ -770,7 +781,9 @@ def _validate_coordinates( for ring in polygon: if len(ring) < 3: - raise ValueError("Each ring must have at least three points.") + raise ValueError( + "Each ring must have at least three points." + ) for time, frequency in ring: if time < 0: @@ -778,7 +791,8 @@ def _validate_coordinates( if frequency < 0 or frequency > MAX_FREQUENCY: raise ValueError( - f"The frequency must be between 0 and " f"{MAX_FREQUENCY}." + f"The frequency must be between 0 and " + f"{MAX_FREQUENCY}." ) return v @@ -894,7 +908,7 @@ def geometry_validate( if not hasattr(obj, "type"): raise ValueError(f"Object {obj} does not have a type attribute.") - geom_type = getattr(obj, "type") + geom_type = obj.type if geom_type not in GEOMETRY_MAPPING: raise ValueError(f"Object {obj} does not have a geometry valid type.") @@ -907,4 +921,6 @@ def geometry_validate( from_attributes=mode == "attributes", ) except ValidationError as error: - raise ValueError(f"Object {obj} is not a valid {geom_type}.") from error + raise ValueError( + f"Object {obj} is not a valid {geom_type}." + ) from error diff --git a/src/soundevent/data/notes.py b/src/soundevent/data/notes.py index 3ee2de6..de0de81 100644 --- a/src/soundevent/data/notes.py +++ b/src/soundevent/data/notes.py @@ -95,7 +95,9 @@ class Note(BaseModel): message: str created_by: Optional[User] = None is_issue: bool = False - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) def __hash__(self): """Hash the Note object.""" diff --git a/src/soundevent/data/prediction_sets.py b/src/soundevent/data/prediction_sets.py index 1cf3bd6..edffd2a 100644 --- a/src/soundevent/data/prediction_sets.py +++ b/src/soundevent/data/prediction_sets.py @@ -73,4 +73,6 @@ class PredictionSet(BaseModel): uuid: UUID = Field(default_factory=uuid4) clip_predictions: List[ClipPrediction] = Field(default_factory=list) - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) diff --git a/src/soundevent/data/recording_sets.py b/src/soundevent/data/recording_sets.py index cb1ff5f..dde95ef 100644 --- a/src/soundevent/data/recording_sets.py +++ b/src/soundevent/data/recording_sets.py @@ -12,4 +12,6 @@ class RecordingSet(BaseModel): uuid: UUID = Field(default_factory=uuid4) recordings: List[Recording] = Field(default_factory=list, repr=False) - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) diff --git a/src/soundevent/data/recordings.py b/src/soundevent/data/recordings.py index b2cbb7c..088d23a 100644 --- a/src/soundevent/data/recordings.py +++ b/src/soundevent/data/recordings.py @@ -193,7 +193,10 @@ def from_file( Recording The recording object. """ - from soundevent.audio.media_info import compute_md5_checksum, get_media_info + from soundevent.audio.media_info import ( + compute_md5_checksum, + get_media_info, + ) media_info = get_media_info(path) diff --git a/src/soundevent/data/sequence_annotations.py b/src/soundevent/data/sequence_annotations.py index 47e2dfa..9aef0d9 100644 --- a/src/soundevent/data/sequence_annotations.py +++ b/src/soundevent/data/sequence_annotations.py @@ -25,7 +25,9 @@ class SequenceAnnotation(BaseModel): A unique identifier for the annotation. sequence The sequence being annotated. - notes + + Notes + ----- A list of notes associated with the sequence. tags The tags attached to the sequence providing semantic information. diff --git a/src/soundevent/data/sound_event_annotations.py b/src/soundevent/data/sound_event_annotations.py index a2e4316..647ca2b 100644 --- a/src/soundevent/data/sound_event_annotations.py +++ b/src/soundevent/data/sound_event_annotations.py @@ -83,7 +83,9 @@ class SoundEventAnnotation(BaseModel): being annotated. Sound events define distinct audio occurrences, such as bird calls or animal vocalizations, and are essential for categorizing the content of the audio data. - notes + + Notes + ----- A list of `Note` instances representing additional contextual information or remarks associated with the annotation. Notes can provide insights into specific characteristics of the sound event, aiding in the comprehensive understanding diff --git a/src/soundevent/evaluation/affinity.py b/src/soundevent/evaluation/affinity.py index 1de83f1..a2fe764 100644 --- a/src/soundevent/evaluation/affinity.py +++ b/src/soundevent/evaluation/affinity.py @@ -1,7 +1,11 @@ """Measures of affinity between sound events geometries.""" from soundevent import data -from soundevent.geometry import buffer_geometry, compute_bounds, geometry_to_shapely +from soundevent.geometry import ( + buffer_geometry, + compute_bounds, + geometry_to_shapely, +) __all__ = [ "compute_affinity", @@ -80,11 +84,13 @@ def compute_affinity( >>> affinity 0.75 """ - geometry1 = _prepare_geometry(geometry1, time_buffer, freq_buffer) geometry2 = _prepare_geometry(geometry2, time_buffer, freq_buffer) - if geometry1.type in TIME_GEOMETRY_TYPES or geometry2.type in TIME_GEOMETRY_TYPES: + if ( + geometry1.type in TIME_GEOMETRY_TYPES + or geometry2.type in TIME_GEOMETRY_TYPES + ): return compute_affinity_in_time(geometry1, geometry2) shp1 = geometry_to_shapely(geometry1) @@ -107,8 +113,12 @@ def compute_affinity_in_time( start_time1, _, end_time1, _ = compute_bounds(geometry1) start_time2, _, end_time2, _ = compute_bounds(geometry2) - intersection = max(0, min(end_time1, end_time2) - max(start_time1, start_time2)) - union = (end_time1 - start_time1) + (end_time2 - start_time2) - intersection + intersection = max( + 0, min(end_time1, end_time2) - max(start_time1, start_time2) + ) + union = ( + (end_time1 - start_time1) + (end_time2 - start_time2) - intersection + ) if union == 0: return 0 diff --git a/src/soundevent/evaluation/encoding.py b/src/soundevent/evaluation/encoding.py index c5676ab..306555c 100644 --- a/src/soundevent/evaluation/encoding.py +++ b/src/soundevent/evaluation/encoding.py @@ -142,10 +142,10 @@ def classification_encoding( -------- Consider the following set of tags: - >>> dog = data.Tag(key='animal', value='dog') - >>> cat = data.Tag(key='animal', value='cat') - >>> brown = data.Tag(key='color', value='brown') - >>> blue = data.Tag(key='color', value='blue') + >>> dog = data.Tag(key="animal", value="dog") + >>> cat = data.Tag(key="animal", value="cat") + >>> brown = data.Tag(key="color", value="brown") + >>> blue = data.Tag(key="color", value="blue") If we are interested in encoding only the 'dog' and 'brown' classes, the following examples demonstrate how the encoding works for tag list: @@ -193,10 +193,10 @@ def multilabel_encoding( -------- Consider the following set of tags: - >>> dog = data.Tag(key='animal', value='dog') - >>> cat = data.Tag(key='animal', value='cat') - >>> brown = data.Tag(key='color', value='brown') - >>> blue = data.Tag(key='color', value='blue') + >>> dog = data.Tag(key="animal", value="dog") + >>> cat = data.Tag(key="animal", value="cat") + >>> brown = data.Tag(key="color", value="brown") + >>> blue = data.Tag(key="color", value="blue") And we are only interested in encoding the following two classes: @@ -248,10 +248,10 @@ class corresponding to the input predicted tags. -------- Consider the following set of tags: - >>> dog = data.Tag(key='animal', value='dog') - >>> cat = data.Tag(key='animal', value='cat') - >>> brown = data.Tag(key='color', value='brown') - >>> blue = data.Tag(key='color', value='blue') + >>> dog = data.Tag(key="animal", value="dog") + >>> cat = data.Tag(key="animal", value="cat") + >>> brown = data.Tag(key="color", value="brown") + >>> blue = data.Tag(key="color", value="blue") And we are only interested in encoding the following two classes: @@ -259,21 +259,32 @@ class corresponding to the input predicted tags. Then the following examples show how the encoding works for predicted tags: - >>> prediction_encoding([data.PredictedTag(tag=brown, score=0.5)], encoder) + >>> prediction_encoding( + ... [data.PredictedTag(tag=brown, score=0.5)], encoder + ... ) array([0, 0.5]) - >>> multilabel_encoding([ - ... data.PredictedTag(tag=dog, score=0.2), - ... data.PredictedTag(tag=blue, score=0.9), - ... ], encoder) + >>> multilabel_encoding( + ... [ + ... data.PredictedTag(tag=dog, score=0.2), + ... data.PredictedTag(tag=blue, score=0.9), + ... ], + ... encoder, + ... ) array([0.2, 0]) - >>> multilabel_encoding([ - ... data.PredictedTag(tag=dog, score=0.2), - ... data.PredictedTag(tag=brown, score=0.5), - ... ], encoder) + >>> multilabel_encoding( + ... [ + ... data.PredictedTag(tag=dog, score=0.2), + ... data.PredictedTag(tag=brown, score=0.5), + ... ], + ... encoder, + ... ) array([0.2, 0.5]) - >>> classification_encoding([ - ... data.PredictedTag(tag=cat, score=0.7), - ... ], encoder) + >>> classification_encoding( + ... [ + ... data.PredictedTag(tag=cat, score=0.7), + ... ], + ... encoder, + ... ) array([0, 0]) """ encoded = np.zeros(encoder.num_classes, dtype=np.float32) diff --git a/src/soundevent/evaluation/match.py b/src/soundevent/evaluation/match.py index 9344368..189e8cb 100644 --- a/src/soundevent/evaluation/match.py +++ b/src/soundevent/evaluation/match.py @@ -44,7 +44,6 @@ def match_geometries( is not matched to any source geometry, the source index is None. Every source and target geometry is matched exactly once. """ - # Compute the affinity between all pairs of geometries. cost_matrix = np.zeros(shape=(len(source), len(target))) for (index1, geometry1), (index2, geometry2) in product( diff --git a/src/soundevent/evaluation/metrics.py b/src/soundevent/evaluation/metrics.py index 22e3c71..c1b0cf6 100644 --- a/src/soundevent/evaluation/metrics.py +++ b/src/soundevent/evaluation/metrics.py @@ -43,7 +43,9 @@ def balanced_accuracy( y_score: np.ndarray, ) -> float: num_classes = y_score.shape[1] - y_true_array = np.array([y if y is not None else num_classes for y in y_true]) + y_true_array = np.array( + [y if y is not None else num_classes for y in y_true] + ) y_score = np.c_[y_score, 1 - y_score.sum(axis=1, keepdims=True)] y_pred = y_score.argmax(axis=1) return metrics.balanced_accuracy_score( @@ -57,7 +59,9 @@ def accuracy( y_score: np.ndarray, ) -> float: num_classes = y_score.shape[1] - y_true_array = np.array([y if y is not None else num_classes for y in y_true]) + y_true_array = np.array( + [y if y is not None else num_classes for y in y_true] + ) y_score = np.c_[y_score, 1 - y_score.sum(axis=1, keepdims=True)] y_pred = y_score.argmax(axis=1) return metrics.accuracy_score( # type: ignore @@ -71,7 +75,9 @@ def top_3_accuracy( y_score: np.ndarray, ) -> float: num_classes = y_score.shape[1] - y_true_array = np.array([y if y is not None else num_classes for y in y_true]) + y_true_array = np.array( + [y if y is not None else num_classes for y in y_true] + ) y_score = np.c_[y_score, 1 - y_score.sum(axis=1, keepdims=True)] return metrics.top_k_accuracy_score( # type: ignore y_true=y_true_array, @@ -128,7 +134,6 @@ def jaccard( for each class. This function will convert the probabilities to binary predictions using the given threshold. """ - if y_true.ndim == 1: y_true = y_true[np.newaxis, :] diff --git a/src/soundevent/evaluation/tasks/__init__.py b/src/soundevent/evaluation/tasks/__init__.py index 0d1b3b3..bc14174 100644 --- a/src/soundevent/evaluation/tasks/__init__.py +++ b/src/soundevent/evaluation/tasks/__init__.py @@ -5,7 +5,9 @@ from soundevent.evaluation.tasks.sound_event_classification import ( sound_event_classification, ) -from soundevent.evaluation.tasks.sound_event_detection import sound_event_detection +from soundevent.evaluation.tasks.sound_event_detection import ( + sound_event_detection, +) __all__ = [ "clip_classification", diff --git a/src/soundevent/evaluation/tasks/clip_classification.py b/src/soundevent/evaluation/tasks/clip_classification.py index aa509ea..d68441c 100644 --- a/src/soundevent/evaluation/tasks/clip_classification.py +++ b/src/soundevent/evaluation/tasks/clip_classification.py @@ -89,7 +89,8 @@ def _evaluate_all_clips( def _compute_overall_metrics(true_classes, predicted_classes_scores): """Compute evaluation metrics based on true classes and predicted - scores.""" + scores. + """ evaluation_metrics = [ data.Feature( name=metric.__name__, @@ -164,6 +165,8 @@ def _compute_overall_score( evaluated_examples: Sequence[data.ClipEvaluation], ) -> float: non_none_scores = [ - example.score for example in evaluated_examples if example.score is not None + example.score + for example in evaluated_examples + if example.score is not None ] return float(np.mean(non_none_scores)) if non_none_scores else 0.0 diff --git a/src/soundevent/evaluation/tasks/clip_multilabel_classification.py b/src/soundevent/evaluation/tasks/clip_multilabel_classification.py index 6cc3249..7020b6e 100644 --- a/src/soundevent/evaluation/tasks/clip_multilabel_classification.py +++ b/src/soundevent/evaluation/tasks/clip_multilabel_classification.py @@ -98,7 +98,8 @@ def _compute_overall_metrics( predicted_classes_scores, ) -> List[data.Feature]: """Compute evaluation metrics based on true classes and predicted - scores.""" + scores. + """ return [ data.Feature( name=metric.__name__, @@ -166,6 +167,8 @@ def _compute_overall_score( evaluated_examples: Sequence[data.ClipEvaluation], ) -> float: valid_scores = [ - example.score for example in evaluated_examples if example.score is not None + example.score + for example in evaluated_examples + if example.score is not None ] return float(np.mean(valid_scores)) if valid_scores else 0.0 diff --git a/src/soundevent/evaluation/tasks/common.py b/src/soundevent/evaluation/tasks/common.py index 732f547..0e2cb58 100644 --- a/src/soundevent/evaluation/tasks/common.py +++ b/src/soundevent/evaluation/tasks/common.py @@ -7,7 +7,9 @@ def iterate_over_valid_clips( clip_predictions: Sequence[data.ClipPrediction], clip_annotations: Sequence[data.ClipAnnotation], ) -> Iterable[Tuple[data.ClipAnnotation, data.ClipPrediction]]: - annotated_clips = {example.clip.uuid: example for example in clip_annotations} + annotated_clips = { + example.clip.uuid: example for example in clip_annotations + } for predictions in clip_predictions: if predictions.clip.uuid in annotated_clips: diff --git a/src/soundevent/evaluation/tasks/sound_event_classification.py b/src/soundevent/evaluation/tasks/sound_event_classification.py index 029817c..9d7aba1 100644 --- a/src/soundevent/evaluation/tasks/sound_event_classification.py +++ b/src/soundevent/evaluation/tasks/sound_event_classification.py @@ -18,7 +18,9 @@ "sound_event_classification", ] -SOUNDEVENT_METRICS: Sequence[metrics.Metric] = (metrics.true_class_probability,) +SOUNDEVENT_METRICS: Sequence[metrics.Metric] = ( + metrics.true_class_probability, +) EXAMPLE_METRICS: Sequence[metrics.Metric] = () @@ -87,7 +89,8 @@ def _evaluate_clips( def _compute_overall_metrics(true_classes, predicted_classes_scores): """Compute evaluation metrics based on true classes and predicted - scores.""" + scores. + """ evaluation_metrics = [ data.Feature( name=metric.__name__, @@ -119,7 +122,9 @@ def _evaluate_clip( if sound_event_prediction.sound_event.uuid not in _valid_sound_events: continue - annotation = _valid_sound_events[sound_event_prediction.sound_event.uuid] + annotation = _valid_sound_events[ + sound_event_prediction.sound_event.uuid + ] true_class, predicted_classes, match = _evaluate_sound_event( sound_event_prediction=sound_event_prediction, sound_event_annotation=annotation, @@ -130,7 +135,9 @@ def _evaluate_clip( predicted_classes_scores.append(predicted_classes) matches.append(match) - score = np.mean([match.score for match in matches if match.score is not None]) + score = np.mean( + [match.score for match in matches if match.score is not None] + ) return ( true_classes, @@ -187,6 +194,8 @@ def _compute_overall_score( evaluated_clip: Sequence[data.ClipEvaluation], ) -> float: non_none_scores = [ - example.score for example in evaluated_clip if example.score is not None + example.score + for example in evaluated_clip + if example.score is not None ] return float(np.mean(non_none_scores)) if non_none_scores else 0.0 diff --git a/src/soundevent/evaluation/tasks/sound_event_detection.py b/src/soundevent/evaluation/tasks/sound_event_detection.py index 09a3ba9..04c251e 100644 --- a/src/soundevent/evaluation/tasks/sound_event_detection.py +++ b/src/soundevent/evaluation/tasks/sound_event_detection.py @@ -20,7 +20,9 @@ "evaluate_clip", ] -SOUNDEVENT_METRICS: Sequence[metrics.Metric] = (metrics.true_class_probability,) +SOUNDEVENT_METRICS: Sequence[metrics.Metric] = ( + metrics.true_class_probability, +) EXAMPLE_METRICS: Sequence[metrics.Metric] = () @@ -87,7 +89,8 @@ def _evaluate_clips( def compute_overall_metrics(true_classes, predicted_classes_scores): """Compute evaluation metrics based on true classes and predicted - scores.""" + scores. + """ evaluation_metrics = [ data.Feature( name=metric.__name__, diff --git a/src/soundevent/geometry/__init__.py b/src/soundevent/geometry/__init__.py index 79c7b10..7342d11 100644 --- a/src/soundevent/geometry/__init__.py +++ b/src/soundevent/geometry/__init__.py @@ -14,7 +14,10 @@ """ from soundevent.geometry.conversion import geometry_to_shapely -from soundevent.geometry.features import GeometricFeature, compute_geometric_features +from soundevent.geometry.features import ( + GeometricFeature, + compute_geometric_features, +) from soundevent.geometry.html import geometry_to_html from soundevent.geometry.operations import buffer_geometry, compute_bounds from soundevent.geometry.positions import get_geometry_point diff --git a/src/soundevent/geometry/features.py b/src/soundevent/geometry/features.py index 87b7dd3..a251bfe 100644 --- a/src/soundevent/geometry/features.py +++ b/src/soundevent/geometry/features.py @@ -154,7 +154,9 @@ def _compute_multi_point_features( Feature(name=GeometricFeature.LOW_FREQ, value=low_freq), Feature(name=GeometricFeature.HIGH_FREQ, value=high_freq), Feature(name=GeometricFeature.BANDWIDTH, value=high_freq - low_freq), - Feature(name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates)), + Feature( + name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates) + ), ] @@ -169,7 +171,9 @@ def _compute_multi_linestring_features( Feature(name=GeometricFeature.LOW_FREQ, value=low_freq), Feature(name=GeometricFeature.HIGH_FREQ, value=high_freq), Feature(name=GeometricFeature.BANDWIDTH, value=high_freq - low_freq), - Feature(name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates)), + Feature( + name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates) + ), ] @@ -184,11 +188,15 @@ def _compute_multi_polygon_features( Feature(name=GeometricFeature.LOW_FREQ, value=low_freq), Feature(name=GeometricFeature.HIGH_FREQ, value=high_freq), Feature(name=GeometricFeature.BANDWIDTH, value=high_freq - low_freq), - Feature(name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates)), + Feature( + name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates) + ), ] -_COMPUTE_FEATURES: Dict[geometries.GeometryType, Callable[[Any], List[Feature]]] = { +_COMPUTE_FEATURES: Dict[ + geometries.GeometryType, Callable[[Any], List[Feature]] +] = { geometries.TimeStamp.geom_type(): _compute_time_stamp_features, geometries.TimeInterval.geom_type(): _compute_time_interval_features, geometries.BoundingBox.geom_type(): _compute_bounding_box_features, diff --git a/src/soundevent/geometry/html.py b/src/soundevent/geometry/html.py index 7554044..e705c85 100644 --- a/src/soundevent/geometry/html.py +++ b/src/soundevent/geometry/html.py @@ -105,7 +105,11 @@ def axis_label( inner_style = "; ".join( [ "display: inline", - ("vertical-align: top" if axis == "time" else "vertical-align: bottom"), + ( + "vertical-align: top" + if axis == "time" + else "vertical-align: bottom" + ), ] ) diff --git a/src/soundevent/io/aoef/__init__.py b/src/soundevent/io/aoef/__init__.py index 1aed5d9..08659c8 100644 --- a/src/soundevent/io/aoef/__init__.py +++ b/src/soundevent/io/aoef/__init__.py @@ -34,7 +34,10 @@ from soundevent import data from soundevent.io.types import DataCollections, DataType -from .annotation_project import AnnotationProjectAdapter, AnnotationProjectObject +from .annotation_project import ( + AnnotationProjectAdapter, + AnnotationProjectObject, +) from .annotation_set import AnnotationSetAdapter, AnnotationSetObject from .dataset import DatasetAdapter, DatasetObject from .evaluation import EvaluationAdapter, EvaluationObject @@ -84,7 +87,9 @@ class AOEFObject(BaseModel): """Schema definition for an AOEF object.""" version: str = AOEF_VERSION - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) data: Union[ EvaluationObject, DatasetObject, @@ -157,7 +162,9 @@ def load( if aoef_object.version != AOEF_VERSION: version = aoef_object.version - raise ValueError(f"Invalid AOEF version: {version} (expected {AOEF_VERSION})") + raise ValueError( + f"Invalid AOEF version: {version} (expected {AOEF_VERSION})" + ) return to_soundevent(aoef_object, audio_dir=audio_dir) diff --git a/src/soundevent/io/aoef/adapters.py b/src/soundevent/io/aoef/adapters.py index 1ea7920..ac21ca3 100644 --- a/src/soundevent/io/aoef/adapters.py +++ b/src/soundevent/io/aoef/adapters.py @@ -47,7 +47,9 @@ def to_aoef(self, obj: C) -> D: ... def to_soundevent(self, obj: D) -> C: ... -class DataAdapter(ABC, Generic[SoundEventObject, AOEFObject, SoundEventKey, AOEFKey]): +class DataAdapter( + ABC, Generic[SoundEventObject, AOEFObject, SoundEventKey, AOEFKey] +): """Base class for data adapters. A data adapter is used to convert between sound event and AOEF data @@ -64,7 +66,9 @@ def __init__(self): self._aoef_store: Dict[AOEFKey, AOEFObject] = {} @abstractmethod - def assemble_aoef(self, obj: SoundEventObject, obj_id: AOEFKey) -> AOEFObject: + def assemble_aoef( + self, obj: SoundEventObject, obj_id: AOEFKey + ) -> AOEFObject: """Create AOEF object from sound event object. Parameters diff --git a/src/soundevent/io/aoef/annotation_project.py b/src/soundevent/io/aoef/annotation_project.py index 5046f5e..ef630b9 100644 --- a/src/soundevent/io/aoef/annotation_project.py +++ b/src/soundevent/io/aoef/annotation_project.py @@ -26,18 +26,26 @@ def __init__( **kwargs, ): super().__init__(**kwargs) - self.annotation_task_adapter = annotation_task_adapter or AnnotationTaskAdapter( - self.clip_adapter, - self.user_adapter, + self.annotation_task_adapter = ( + annotation_task_adapter + or AnnotationTaskAdapter( + self.clip_adapter, + self.user_adapter, + ) ) def to_aoef( # type: ignore self, obj: data.AnnotationProject, # type: ignore ) -> AnnotationProjectObject: - tasks = [self.annotation_task_adapter.to_aoef(task) for task in obj.tasks or []] + tasks = [ + self.annotation_task_adapter.to_aoef(task) + for task in obj.tasks or [] + ] - project_tags = [self.tag_adapter.to_aoef(tag).id for tag in obj.annotation_tags] + project_tags = [ + self.tag_adapter.to_aoef(tag).id for tag in obj.annotation_tags + ] annotation_set = super().to_aoef(obj) @@ -67,11 +75,16 @@ def to_soundevent( # type: ignore annotation_set = super().to_soundevent(obj) tasks = [ - self.annotation_task_adapter.to_soundevent(task) for task in obj.tasks or [] + self.annotation_task_adapter.to_soundevent(task) + for task in obj.tasks or [] ] return data.AnnotationProject( - **{field: value for field, value in annotation_set if value is not None}, + **{ + field: value + for field, value in annotation_set + if value is not None + }, tasks=tasks, name=obj.name, description=obj.description, diff --git a/src/soundevent/io/aoef/annotation_set.py b/src/soundevent/io/aoef/annotation_set.py index 3feacb2..9ed43f6 100644 --- a/src/soundevent/io/aoef/annotation_set.py +++ b/src/soundevent/io/aoef/annotation_set.py @@ -11,7 +11,10 @@ from .note import NoteAdapter from .recording import RecordingAdapter, RecordingObject from .sequence import SequenceAdapter, SequenceObject -from .sequence_annotation import SequenceAnnotationAdapter, SequenceAnnotationObject +from .sequence_annotation import ( + SequenceAnnotationAdapter, + SequenceAnnotationObject, +) from .sound_event import SoundEventAdapter, SoundEventObject from .sound_event_annotation import ( SoundEventAnnotationAdapter, @@ -47,8 +50,12 @@ def __init__( sound_event_adapter: Optional[SoundEventAdapter] = None, sequence_adapter: Optional[SequenceAdapter] = None, clip_adapter: Optional[ClipAdapter] = None, - sound_event_annotations_adapter: Optional[SoundEventAnnotationAdapter] = None, - sequence_annotations_adapter: Optional[SequenceAnnotationAdapter] = None, + sound_event_annotations_adapter: Optional[ + SoundEventAnnotationAdapter + ] = None, + sequence_annotations_adapter: Optional[ + SequenceAnnotationAdapter + ] = None, clip_annotation_adapter: Optional[ClipAnnotationsAdapter] = None, ): self.user_adapter = user_adapter or UserAdapter() @@ -141,10 +148,14 @@ def to_soundevent( self.sequence_adapter.to_soundevent(sequence) for sound_event_annotation in obj.sound_event_annotations or []: - self.sound_event_annotations_adapter.to_soundevent(sound_event_annotation) + self.sound_event_annotations_adapter.to_soundevent( + sound_event_annotation + ) for sequence_annotation in obj.sequence_annotations or []: - self.sequence_annotations_adapter.to_soundevent(sequence_annotation) + self.sequence_annotations_adapter.to_soundevent( + sequence_annotation + ) annotated_clips = [ self.clip_annotation_adapter.to_soundevent(clip_annotation) diff --git a/src/soundevent/io/aoef/clip_annotations.py b/src/soundevent/io/aoef/clip_annotations.py index dc4cd19..c74cbe6 100644 --- a/src/soundevent/io/aoef/clip_annotations.py +++ b/src/soundevent/io/aoef/clip_annotations.py @@ -59,7 +59,9 @@ def assemble_aoef( ), sound_events=( [ - self.sound_event_annotation_adapter.to_aoef(annotation).uuid + self.sound_event_annotation_adapter.to_aoef( + annotation + ).uuid for annotation in obj.sound_events ] if obj.sound_events @@ -101,16 +103,25 @@ def assemble_soundevent( se_ann for annotation_id in obj.sound_events or [] if ( - se_ann := self.sound_event_annotation_adapter.from_id(annotation_id) + se_ann := self.sound_event_annotation_adapter.from_id( + annotation_id + ) ) is not None ], sequences=[ seq_ann for annotation_id in obj.sequences or [] - if (seq_ann := self.sequence_annotation_adapter.from_id(annotation_id)) + if ( + seq_ann := self.sequence_annotation_adapter.from_id( + annotation_id + ) + ) is not None ], - notes=[self.note_adapter.to_soundevent(note) for note in obj.notes or []], + notes=[ + self.note_adapter.to_soundevent(note) + for note in obj.notes or [] + ], created_on=obj.created_on or datetime.datetime.now(), ) diff --git a/src/soundevent/io/aoef/clip_evaluation.py b/src/soundevent/io/aoef/clip_evaluation.py index 73fd191..0cddee6 100644 --- a/src/soundevent/io/aoef/clip_evaluation.py +++ b/src/soundevent/io/aoef/clip_evaluation.py @@ -50,7 +50,10 @@ def assemble_aoef( annotations=annotations.uuid, predictions=predictions.uuid, matches=( - [self.match_adapter.to_aoef(match).uuid for match in obj.matches] + [ + self.match_adapter.to_aoef(match).uuid + for match in obj.matches + ] if obj.matches else None ), @@ -70,10 +73,14 @@ def assemble_soundevent( predictions = self.clip_predictions_adapter.from_id(obj.predictions) if annotations is None: - raise ValueError(f"Clip annotations with ID {obj.annotations} not found.") + raise ValueError( + f"Clip annotations with ID {obj.annotations} not found." + ) if predictions is None: - raise ValueError(f"Clip predictions with ID {obj.predictions} not found.") + raise ValueError( + f"Clip predictions with ID {obj.predictions} not found." + ) matches = [ match diff --git a/src/soundevent/io/aoef/clip_predictions.py b/src/soundevent/io/aoef/clip_predictions.py index cb0c358..df7dc2c 100644 --- a/src/soundevent/io/aoef/clip_predictions.py +++ b/src/soundevent/io/aoef/clip_predictions.py @@ -47,7 +47,9 @@ def assemble_aoef( clip=self.clip_adapter.to_aoef(obj.clip).uuid, sound_events=( [ - self.sound_event_prediction_adapter.to_aoef(sound_event).uuid + self.sound_event_prediction_adapter.to_aoef( + sound_event + ).uuid for sound_event in obj.sound_events ] if obj.sound_events @@ -65,7 +67,8 @@ def assemble_aoef( [ (tag.id, predicted_tag.score) for predicted_tag in obj.tags - if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) is not None + if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) + is not None ] if obj.tags else None @@ -92,13 +95,21 @@ def assemble_soundevent( sound_events=[ se_pred for sound_event in obj.sound_events or [] - if (se_pred := self.sound_event_prediction_adapter.from_id(sound_event)) + if ( + se_pred := self.sound_event_prediction_adapter.from_id( + sound_event + ) + ) is not None ], sequences=[ seq_pred for sequence in obj.sequences or [] - if (seq_pred := self.sequence_prediction_adapter.from_id(sequence)) + if ( + seq_pred := self.sequence_prediction_adapter.from_id( + sequence + ) + ) is not None ], tags=[ diff --git a/src/soundevent/io/aoef/dataset.py b/src/soundevent/io/aoef/dataset.py index c62bc59..e667bf4 100644 --- a/src/soundevent/io/aoef/dataset.py +++ b/src/soundevent/io/aoef/dataset.py @@ -35,7 +35,9 @@ def to_soundevent( # type: ignore ) -> data.Dataset: recording_set = super().to_soundevent(obj) return data.Dataset( - **{key: value for key, value in recording_set if value is not None}, + **{ + key: value for key, value in recording_set if value is not None + }, name=obj.name, description=obj.description, ) diff --git a/src/soundevent/io/aoef/evaluation.py b/src/soundevent/io/aoef/evaluation.py index 91b2dfe..e53e41f 100644 --- a/src/soundevent/io/aoef/evaluation.py +++ b/src/soundevent/io/aoef/evaluation.py @@ -14,8 +14,14 @@ from .note import NoteAdapter from .recording import RecordingAdapter, RecordingObject from .sequence import SequenceAdapter, SequenceObject -from .sequence_annotation import SequenceAnnotationAdapter, SequenceAnnotationObject -from .sequence_prediction import SequencePredictionAdapter, SequencePredictionObject +from .sequence_annotation import ( + SequenceAnnotationAdapter, + SequenceAnnotationObject, +) +from .sequence_prediction import ( + SequencePredictionAdapter, + SequencePredictionObject, +) from .sound_event import SoundEventAdapter, SoundEventObject from .sound_event_annotation import ( SoundEventAnnotationAdapter, @@ -63,11 +69,19 @@ def __init__( sound_event_adapter: Optional[SoundEventAdapter] = None, sequence_adapter: Optional[SequenceAdapter] = None, clip_adapter: Optional[ClipAdapter] = None, - sound_event_annotation_adapter: Optional[SoundEventAnnotationAdapter] = None, - sequence_annotation_adapter: Optional[SequenceAnnotationAdapter] = None, + sound_event_annotation_adapter: Optional[ + SoundEventAnnotationAdapter + ] = None, + sequence_annotation_adapter: Optional[ + SequenceAnnotationAdapter + ] = None, clip_annotations_adapter: Optional[ClipAnnotationsAdapter] = None, - sound_event_prediction_adapter: Optional[SoundEventPredictionAdapter] = None, - sequence_prediction_adapter: Optional[SequencePredictionAdapter] = None, + sound_event_prediction_adapter: Optional[ + SoundEventPredictionAdapter + ] = None, + sequence_prediction_adapter: Optional[ + SequencePredictionAdapter + ] = None, clip_predictions_adapter: Optional[ClipPredictionsAdapter] = None, clip_evaluation_adapter: Optional[ClipEvaluationAdapter] = None, match_adapter: Optional[MatchAdapter] = None, @@ -144,11 +158,14 @@ def __init__( self.sound_event_annotation_adapter, self.sound_event_prediction_adapter, ) - self.clip_evaluation_adapter = clip_evaluation_adapter or ClipEvaluationAdapter( - self.clip_annotations_adapter, - self.clip_predictions_adapter, - self.note_adapter, - self.match_adapter, + self.clip_evaluation_adapter = ( + clip_evaluation_adapter + or ClipEvaluationAdapter( + self.clip_annotations_adapter, + self.clip_predictions_adapter, + self.note_adapter, + self.match_adapter, + ) ) def to_aoef(self, obj: data.Evaluation) -> EvaluationObject: @@ -208,7 +225,9 @@ def to_soundevent( self.clip_adapter.to_soundevent(clip) for sound_event_annotation in obj.sound_event_annotations or []: - self.sound_event_annotation_adapter.to_soundevent(sound_event_annotation) + self.sound_event_annotation_adapter.to_soundevent( + sound_event_annotation + ) for sequence_annotation in obj.sequence_annotations or []: self.sequence_annotation_adapter.to_soundevent(sequence_annotation) @@ -217,7 +236,9 @@ def to_soundevent( self.clip_annotations_adapter.to_soundevent(clip_annotation) for sound_event_prediction in obj.sound_event_predictions or []: - self.sound_event_prediction_adapter.to_soundevent(sound_event_prediction) + self.sound_event_prediction_adapter.to_soundevent( + sound_event_prediction + ) for sequence_prediction in obj.sequence_predictions or []: self.sequence_prediction_adapter.to_soundevent(sequence_prediction) diff --git a/src/soundevent/io/aoef/evaluation_set.py b/src/soundevent/io/aoef/evaluation_set.py index 0242ed2..53b9e9f 100644 --- a/src/soundevent/io/aoef/evaluation_set.py +++ b/src/soundevent/io/aoef/evaluation_set.py @@ -35,7 +35,10 @@ def to_aoef( # type: ignore name=obj.name, description=obj.description, evaluation_tags=( - [self.tag_adapter.to_aoef(tag).id for tag in obj.evaluation_tags] + [ + self.tag_adapter.to_aoef(tag).id + for tag in obj.evaluation_tags + ] if obj.evaluation_tags else None ), @@ -47,7 +50,11 @@ def to_soundevent( # type: ignore ) -> data.EvaluationSet: annotation_set = super().to_soundevent(obj) return data.EvaluationSet( - **{field: value for field, value in annotation_set if value is not None}, + **{ + field: value + for field, value in annotation_set + if value is not None + }, name=obj.name, description=obj.description, evaluation_tags=[ diff --git a/src/soundevent/io/aoef/match.py b/src/soundevent/io/aoef/match.py index 0b76dda..fcc2a9a 100644 --- a/src/soundevent/io/aoef/match.py +++ b/src/soundevent/io/aoef/match.py @@ -36,12 +36,16 @@ def assemble_aoef( ) -> MatchObject: source = None if obj.source is not None: - prediction = self.sound_event_prediction_adapter.to_aoef(obj.source) + prediction = self.sound_event_prediction_adapter.to_aoef( + obj.source + ) source = prediction.uuid if prediction is not None else None target = None if obj.target is not None: - annotation = self.sound_event_annotation_adapter.to_aoef(obj.target) + annotation = self.sound_event_annotation_adapter.to_aoef( + obj.target + ) target = annotation.uuid if annotation is not None else None return MatchObject( diff --git a/src/soundevent/io/aoef/prediction_set.py b/src/soundevent/io/aoef/prediction_set.py index 58246b3..2c55188 100644 --- a/src/soundevent/io/aoef/prediction_set.py +++ b/src/soundevent/io/aoef/prediction_set.py @@ -11,7 +11,10 @@ from .note import NoteAdapter from .recording import RecordingAdapter, RecordingObject from .sequence import SequenceAdapter, SequenceObject -from .sequence_prediction import SequencePredictionAdapter, SequencePredictionObject +from .sequence_prediction import ( + SequencePredictionAdapter, + SequencePredictionObject, +) from .sound_event import SoundEventAdapter, SoundEventObject from .sound_event_prediction import ( SoundEventPredictionAdapter, @@ -47,8 +50,12 @@ def __init__( sound_event_adapter: Optional[SoundEventAdapter] = None, sequence_adapter: Optional[SequenceAdapter] = None, clip_adapter: Optional[ClipAdapter] = None, - sound_event_prediction_adapter: Optional[SoundEventPredictionAdapter] = None, - sequence_prediction_adapter: Optional[SequencePredictionAdapter] = None, + sound_event_prediction_adapter: Optional[ + SoundEventPredictionAdapter + ] = None, + sequence_prediction_adapter: Optional[ + SequencePredictionAdapter + ] = None, clip_predictions_adapter: Optional[ClipPredictionsAdapter] = None, ): self.user_adapter = user_adapter or UserAdapter() @@ -129,7 +136,9 @@ def to_soundevent(self, obj: PredictionSetObject) -> data.PredictionSet: self.clip_adapter.to_soundevent(clip) for sound_event_prediction in obj.sound_event_predictions or []: - self.sound_event_prediction_adapter.to_soundevent(sound_event_prediction) + self.sound_event_prediction_adapter.to_soundevent( + sound_event_prediction + ) for sequence_prediction in obj.sequence_predictions or []: self.sequence_prediction_adapter.to_soundevent(sequence_prediction) diff --git a/src/soundevent/io/aoef/recording.py b/src/soundevent/io/aoef/recording.py index dbfc61a..59e9021 100644 --- a/src/soundevent/io/aoef/recording.py +++ b/src/soundevent/io/aoef/recording.py @@ -34,7 +34,9 @@ class RecordingObject(BaseModel): rights: Optional[str] = None -class RecordingAdapter(DataAdapter[data.Recording, RecordingObject, UUID, UUID]): +class RecordingAdapter( + DataAdapter[data.Recording, RecordingObject, UUID, UUID] +): def __init__( self, user_adapter: UserAdapter, @@ -57,7 +59,10 @@ def assemble_aoef( notes = [self._note_adapter.to_aoef(note) for note in obj.notes] - owners = [self._user_adapter.to_aoef(owner).uuid for owner in obj.owners or []] + owners = [ + self._user_adapter.to_aoef(owner).uuid + for owner in obj.owners or [] + ] path = obj.path if self.audio_dir is not None: @@ -69,7 +74,9 @@ def assemble_aoef( duration=obj.duration, channels=obj.channels, samplerate=obj.samplerate, - time_expansion=(obj.time_expansion if obj.time_expansion != 1.0 else None), + time_expansion=( + obj.time_expansion if obj.time_expansion != 1.0 else None + ), hash=obj.hash, date=obj.date, time=obj.time, @@ -93,7 +100,10 @@ def assemble_soundevent(self, obj: RecordingObject) -> data.Recording: if (tag := self._tag_adapter.from_id(tag_id)) is not None ] - notes = [self._note_adapter.to_soundevent(note) for note in (obj.notes or [])] + notes = [ + self._note_adapter.to_soundevent(note) + for note in (obj.notes or []) + ] owners = [ user diff --git a/src/soundevent/io/aoef/recording_set.py b/src/soundevent/io/aoef/recording_set.py index 3707d6d..3e7c95a 100644 --- a/src/soundevent/io/aoef/recording_set.py +++ b/src/soundevent/io/aoef/recording_set.py @@ -47,7 +47,8 @@ def to_aoef( obj: data.RecordingSet, ) -> RecordingSetObject: recording_objects = [ - self.recording_adapter.to_aoef(recording) for recording in obj.recordings + self.recording_adapter.to_aoef(recording) + for recording in obj.recordings ] return RecordingSetObject( uuid=obj.uuid, diff --git a/src/soundevent/io/aoef/sequence.py b/src/soundevent/io/aoef/sequence.py index ec624b2..2c1e53e 100644 --- a/src/soundevent/io/aoef/sequence.py +++ b/src/soundevent/io/aoef/sequence.py @@ -28,7 +28,9 @@ def __init__( super().__init__() self.soundevent_adapter = soundevent_adapter - def assemble_aoef(self, obj: data.Sequence, obj_id: UUID) -> SequenceObject: + def assemble_aoef( + self, obj: data.Sequence, obj_id: UUID + ) -> SequenceObject: parent = None if obj.parent: parent = self.to_aoef(obj.parent).uuid diff --git a/src/soundevent/io/aoef/sequence_prediction.py b/src/soundevent/io/aoef/sequence_prediction.py index 1716d7d..636f3df 100644 --- a/src/soundevent/io/aoef/sequence_prediction.py +++ b/src/soundevent/io/aoef/sequence_prediction.py @@ -42,7 +42,8 @@ def assemble_aoef( [ (tag.id, predicted_tag.score) for predicted_tag in obj.tags - if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) is not None + if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) + is not None ] if obj.tags else None diff --git a/src/soundevent/io/aoef/sound_event.py b/src/soundevent/io/aoef/sound_event.py index 424a20c..9e5a175 100644 --- a/src/soundevent/io/aoef/sound_event.py +++ b/src/soundevent/io/aoef/sound_event.py @@ -18,7 +18,9 @@ class SoundEventObject(BaseModel): features: Optional[Dict[str, float]] = None -class SoundEventAdapter(DataAdapter[data.SoundEvent, SoundEventObject, UUID, UUID]): +class SoundEventAdapter( + DataAdapter[data.SoundEvent, SoundEventObject, UUID, UUID] +): def __init__( self, recording_adapter: RecordingAdapter, diff --git a/src/soundevent/io/aoef/sound_event_annotation.py b/src/soundevent/io/aoef/sound_event_annotation.py index 67733d4..afa07c3 100644 --- a/src/soundevent/io/aoef/sound_event_annotation.py +++ b/src/soundevent/io/aoef/sound_event_annotation.py @@ -22,7 +22,9 @@ class SoundEventAnnotationObject(BaseModel): class SoundEventAnnotationAdapter( - DataAdapter[data.SoundEventAnnotation, SoundEventAnnotationObject, UUID, UUID] + DataAdapter[ + data.SoundEventAnnotation, SoundEventAnnotationObject, UUID, UUID + ] ): def __init__( self, @@ -66,7 +68,9 @@ def assemble_soundevent( sound_event = self.sound_event_adapter.from_id(obj.sound_event) if sound_event is None: - raise ValueError(f"Sound event with ID {obj.sound_event} not found.") + raise ValueError( + f"Sound event with ID {obj.sound_event} not found." + ) return data.SoundEventAnnotation( uuid=obj.uuid, diff --git a/src/soundevent/io/aoef/sound_event_prediction.py b/src/soundevent/io/aoef/sound_event_prediction.py index 007f4ae..175e06a 100644 --- a/src/soundevent/io/aoef/sound_event_prediction.py +++ b/src/soundevent/io/aoef/sound_event_prediction.py @@ -18,7 +18,9 @@ class SoundEventPredictionObject(BaseModel): class SoundEventPredictionAdapter( - DataAdapter[data.SoundEventPrediction, SoundEventPredictionObject, UUID, UUID] + DataAdapter[ + data.SoundEventPrediction, SoundEventPredictionObject, UUID, UUID + ] ): def __init__( self, @@ -42,7 +44,8 @@ def assemble_aoef( [ (tag.id, predicted_tag.score) for predicted_tag in obj.tags - if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) is not None + if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) + is not None ] if obj.tags else None @@ -56,7 +59,9 @@ def assemble_soundevent( sound_event = self.sound_event_adapter.from_id(obj.sound_event) if sound_event is None: - raise ValueError(f"Sound event with ID {obj.sound_event} not found.") + raise ValueError( + f"Sound event with ID {obj.sound_event} not found." + ) return data.SoundEventPrediction( uuid=obj.uuid or uuid4(), diff --git a/src/soundevent/io/crowsetta/__init__.py b/src/soundevent/io/crowsetta/__init__.py index 63fc6a2..a933a8f 100644 --- a/src/soundevent/io/crowsetta/__init__.py +++ b/src/soundevent/io/crowsetta/__init__.py @@ -8,7 +8,10 @@ annotation_from_clip_annotation, annotation_to_clip_annotation, ) -from soundevent.io.crowsetta.bbox import bbox_from_annotation, bbox_to_annotation +from soundevent.io.crowsetta.bbox import ( + bbox_from_annotation, + bbox_to_annotation, +) from soundevent.io.crowsetta.labels import ( label_from_tag, label_from_tags, diff --git a/src/soundevent/io/crowsetta/annotation.py b/src/soundevent/io/crowsetta/annotation.py index 6a8e91e..7af1372 100644 --- a/src/soundevent/io/crowsetta/annotation.py +++ b/src/soundevent/io/crowsetta/annotation.py @@ -5,7 +5,10 @@ import crowsetta from soundevent import data -from soundevent.io.crowsetta.bbox import bbox_from_annotation, bbox_to_annotation +from soundevent.io.crowsetta.bbox import ( + bbox_from_annotation, + bbox_to_annotation, +) from soundevent.io.crowsetta.sequence import ( sequence_from_annotations, sequence_to_annotations, @@ -85,7 +88,8 @@ def annotation_from_clip_annotation( if annotation_fmt != "seq": raise ValueError( - "annotation_fmt must be either 'bbox' or 'seq', " f"not {annotation_fmt}." + "annotation_fmt must be either 'bbox' or 'seq', " + f"not {annotation_fmt}." ) return crowsetta.Annotation( @@ -148,7 +152,6 @@ def annotation_to_clip_annotation( data.ClipAnnotation A ClipAnnotation representing the converted Crowsetta annotation. """ - if tags is None: tags = [] @@ -172,7 +175,8 @@ def annotation_to_clip_annotation( if path is not None and path != recording.path: raise ValueError( - "The path of the annotation does not match the path of the " "recording." + "The path of the annotation does not match the path of the " + "recording." ) sound_event_annotations = [] @@ -190,9 +194,9 @@ def annotation_to_clip_annotation( ) ) - crowsetta_sequences: Union[List[crowsetta.Sequence], crowsetta.Sequence] = getattr( - annot, "seq", [] - ) + crowsetta_sequences: Union[ + List[crowsetta.Sequence], crowsetta.Sequence + ] = getattr(annot, "seq", []) if not isinstance(crowsetta_sequences, list): crowsetta_sequences = [crowsetta_sequences] diff --git a/src/soundevent/io/crowsetta/bbox.py b/src/soundevent/io/crowsetta/bbox.py index 3854a9c..78e6a98 100644 --- a/src/soundevent/io/crowsetta/bbox.py +++ b/src/soundevent/io/crowsetta/bbox.py @@ -23,7 +23,10 @@ def convert_geometry_to_bbox( "because the sound event geometry is not a BoundingBox." ) - if geometry.type in ["TimeInterval", "TimeStamp"] and raise_on_time_geometries: + if ( + geometry.type in ["TimeInterval", "TimeStamp"] + and raise_on_time_geometries + ): raise ValueError( "Cannot convert to a crowsetta bbox because " "the sound event geometry is a TimeInterval or TimeStamp " @@ -159,7 +162,9 @@ def bbox_to_annotation( low_freq = low_freq * recording.time_expansion high_freq = high_freq * recording.time_expansion - geometry = data.BoundingBox(coordinates=[start_time, low_freq, end_time, high_freq]) + geometry = data.BoundingBox( + coordinates=[start_time, low_freq, end_time, high_freq] + ) tags = label_to_tags(bbox.label, **kwargs) diff --git a/src/soundevent/io/crowsetta/labels.py b/src/soundevent/io/crowsetta/labels.py index 832d4fb..546d3a5 100644 --- a/src/soundevent/io/crowsetta/labels.py +++ b/src/soundevent/io/crowsetta/labels.py @@ -9,7 +9,7 @@ customize the conversion process using various options. """ -from typing import Callable, List, Optional, Sequence, Union +from typing import Callable, Dict, List, Optional, Sequence, Union from soundevent import data @@ -30,7 +30,7 @@ def label_to_tags( label: str, tag_fn: Optional[LabelToTagFn] = None, tag_mapping: Optional[LabelToTagMap] = None, - key_mapping: Optional[dict[str, str]] = None, + key_mapping: Optional[Dict[str, str]] = None, key: Optional[str] = None, fallback: str = "crowsetta", empty_labels: Sequence[str] = (EMPTY_LABEL,), diff --git a/src/soundevent/io/crowsetta/segment.py b/src/soundevent/io/crowsetta/segment.py index 259cf3d..64ab9f7 100644 --- a/src/soundevent/io/crowsetta/segment.py +++ b/src/soundevent/io/crowsetta/segment.py @@ -1,6 +1,6 @@ """crowsetta.segment module.""" -from typing import List, Optional +from typing import List, Optional, Tuple import crowsetta @@ -17,7 +17,7 @@ def convert_geometry_to_interval( geometry: data.Geometry, cast_to_segment: bool = False, -) -> tuple[float, float]: +) -> Tuple[float, float]: if geometry.type != "TimeInterval": if not cast_to_segment: raise ValueError( @@ -176,7 +176,6 @@ def segment_to_annotation( containing a SoundEvent with the time interval, associated tags, notes, and creator information. """ - if notes is None: notes = [] diff --git a/src/soundevent/io/crowsetta/sequence.py b/src/soundevent/io/crowsetta/sequence.py index 0dfd6f4..57c43d0 100644 --- a/src/soundevent/io/crowsetta/sequence.py +++ b/src/soundevent/io/crowsetta/sequence.py @@ -52,7 +52,6 @@ def sequence_from_annotations( ValueError If an annotation cannot be converted and `ignore_errors` is False. """ - segments = [] for annotation in annotations: diff --git a/src/soundevent/io/formats.py b/src/soundevent/io/formats.py index a23ae6e..8f90b14 100644 --- a/src/soundevent/io/formats.py +++ b/src/soundevent/io/formats.py @@ -36,4 +36,6 @@ def infer_format(path: PathLike) -> str: if inferrer(path): return format_ - raise ValueError(f"Cannot infer format of file {path}, or format not supported.") + raise ValueError( + f"Cannot infer format of file {path}, or format not supported." + ) diff --git a/src/soundevent/io/saver.py b/src/soundevent/io/saver.py index 9de0ba5..22b088a 100644 --- a/src/soundevent/io/saver.py +++ b/src/soundevent/io/saver.py @@ -42,7 +42,6 @@ def save( Format to save the data in. If `None`, the format will be inferred from the file extension. """ - if format is None: format = infer_format(path) diff --git a/src/soundevent/plot/annotation.py b/src/soundevent/plot/annotation.py index fc17e24..0998aba 100644 --- a/src/soundevent/plot/annotation.py +++ b/src/soundevent/plot/annotation.py @@ -21,7 +21,6 @@ def plot_annotation( **kwargs, ) -> Axes: """Plot an annotation.""" - geometry = annotation.sound_event.geometry if geometry is None: @@ -70,12 +69,12 @@ def get_tags_position( float Frequency position for tag plotting in Hertz. """ - func = _TAG_POSITION_FUNCTIONS.get(geometry.type, None) if func is None: raise NotImplementedError( - f"Plotting tags for geometry of type {geometry.type} " "is not implemented." + f"Plotting tags for geometry of type {geometry.type} " + "is not implemented." ) return func(geometry, bounds) @@ -116,7 +115,9 @@ def _get_tags_position_bounding_box( _TAG_POSITION_FUNCTIONS: Dict[ data.GeometryType, - Callable[[data.Geometry, Tuple[float, float, float, float]], Tuple[float, float]], + Callable[ + [data.Geometry, Tuple[float, float, float, float]], Tuple[float, float] + ], ] = { data.BoundingBox.geom_type(): _get_tags_position_bounding_box, } diff --git a/src/soundevent/plot/geometries.py b/src/soundevent/plot/geometries.py index 607c0af..f595ff2 100644 --- a/src/soundevent/plot/geometries.py +++ b/src/soundevent/plot/geometries.py @@ -58,7 +58,8 @@ def _plot_bounding_box_geometry( ) -> Axes: if not isinstance(geometry, data.BoundingBox): raise ValueError( - f"Expected geometry of type {data.BoundingBox}, " f"got {type(geometry)}." + f"Expected geometry of type {data.BoundingBox}, " + f"got {type(geometry)}." ) start_time, low_freq, end_time, high_freq = geometry.coordinates diff --git a/src/soundevent/plot/tags.py b/src/soundevent/plot/tags.py index 96581a8..73cc25b 100644 --- a/src/soundevent/plot/tags.py +++ b/src/soundevent/plot/tags.py @@ -29,7 +29,9 @@ def __init__( self._tags: Dict[data.Tag, str] = {} colormap = get_cmap(cmap) - self._colors = cycle([colormap(x) for x in np.linspace(0, 1, num_colors)]) + self._colors = cycle( + [colormap(x) for x in np.linspace(0, 1, num_colors)] + ) def get_color(self, tag: data.Tag) -> str: """Get color for tag.""" diff --git a/tests/conftest.py b/tests/conftest.py index 1440eca..4321d51 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,7 +10,6 @@ import numpy as np import pytest import soundfile as sf - from soundevent import data if sys.version_info < (3, 9): diff --git a/tests/test_array/test_dimensions.py b/tests/test_array/test_dimensions.py index 598d295..ab31c39 100644 --- a/tests/test_array/test_dimensions.py +++ b/tests/test_array/test_dimensions.py @@ -183,7 +183,9 @@ def test_create_frequency_dim_from_array_sets_attrs(): def test_create_frequency_dim_from_array_estimates_step(): """Test create_frequency_dim_from_array function.""" arr = np.array([1, 2, 3]) - frequency_dim = arrays.create_frequency_dim_from_array(arr, estimate_step=True) + frequency_dim = arrays.create_frequency_dim_from_array( + arr, estimate_step=True + ) assert frequency_dim.attrs["step"] == 1 diff --git a/tests/test_array/test_operations.py b/tests/test_array/test_operations.py index b64e2dd..b931fe5 100644 --- a/tests/test_array/test_operations.py +++ b/tests/test_array/test_operations.py @@ -1,7 +1,7 @@ """Test suite for the soundevent.arrays.operations module.""" -import pytest import numpy as np +import pytest import xarray as xr from soundevent.arrays import operations as ops diff --git a/tests/test_audio/test_filter.py b/tests/test_audio/test_filter.py index f43c912..5d488bb 100644 --- a/tests/test_audio/test_filter.py +++ b/tests/test_audio/test_filter.py @@ -6,7 +6,6 @@ import pytest import xarray as xr from scipy import signal - from soundevent import audio diff --git a/tests/test_audio/test_io.py b/tests/test_audio/test_io.py index 460e3b5..6bcbac4 100644 --- a/tests/test_audio/test_io.py +++ b/tests/test_audio/test_io.py @@ -1,9 +1,8 @@ -from typing import Optional from pathlib import Path +from typing import Optional import numpy as np import pytest - from soundevent.audio.io import audio_to_bytes, load_audio @@ -27,7 +26,9 @@ def test_audio_to_bytes_has_correct_length( dtype: np.dtype, ): samples = int(duration * samplerate) - array = np.random.random(size=[int(duration * samplerate), channels]).astype(dtype) + array = np.random.random( + size=[int(duration * samplerate), channels] + ).astype(dtype) bytes_per_sample = (bit_depth // 8) * channels expected_bytes = samples * bytes_per_sample diff --git a/tests/test_audio/test_raw.py b/tests/test_audio/test_raw.py index 5faa871..6abe601 100644 --- a/tests/test_audio/test_raw.py +++ b/tests/test_audio/test_raw.py @@ -1,7 +1,6 @@ """Test suite for the RawData class.""" import soundfile as sf - from soundevent.audio.chunks import parse_into_chunks from soundevent.audio.raw import RawData diff --git a/tests/test_audio/test_resample.py b/tests/test_audio/test_resample.py index 413176b..97c94e9 100644 --- a/tests/test_audio/test_resample.py +++ b/tests/test_audio/test_resample.py @@ -3,7 +3,6 @@ import numpy as np import pytest import xarray as xr - from soundevent import audio diff --git a/tests/test_audio/test_scaling.py b/tests/test_audio/test_scaling.py index 0f3e40c..4b684ae 100644 --- a/tests/test_audio/test_scaling.py +++ b/tests/test_audio/test_scaling.py @@ -3,7 +3,6 @@ import numpy as np import pytest import xarray as xr - from soundevent import data from soundevent.audio import ( clamp_amplitude, diff --git a/tests/test_data/test_datasets.py b/tests/test_data/test_datasets.py index a72938e..66e607e 100644 --- a/tests/test_data/test_datasets.py +++ b/tests/test_data/test_datasets.py @@ -3,7 +3,6 @@ from pathlib import Path import pytest - from soundevent import data @@ -52,7 +51,9 @@ def test_create_dataset_ignores_non_audio_files(tmp_path: Path): def test_create_dataset_fails_with_non_existing_directory(): """Test that we can create a dataset from audio files.""" with pytest.raises(ValueError): - data.Dataset.from_directory(Path("non-existing-directory"), name="test") + data.Dataset.from_directory( + Path("non-existing-directory"), name="test" + ) def test_create_dataset_fails_if_path_is_file(tmp_path: Path): @@ -76,7 +77,9 @@ def test_create_dataset_without_recursive(tmp_path: Path, random_wav): """Test that we can create a dataset from audio files.""" (tmp_path / "test1").mkdir() random_wav(path=tmp_path / "test1" / "test1.wav") - dataset = data.Dataset.from_directory(tmp_path, recursive=False, name="test") + dataset = data.Dataset.from_directory( + tmp_path, recursive=False, name="test" + ) assert len(dataset.recordings) == 0 diff --git a/tests/test_data/test_evaluated_samples.py b/tests/test_data/test_evaluated_samples.py index 00e197b..0789d24 100644 --- a/tests/test_data/test_evaluated_samples.py +++ b/tests/test_data/test_evaluated_samples.py @@ -2,7 +2,6 @@ import pytest from pydantic import ValidationError - from soundevent import data diff --git a/tests/test_data/test_geometry.py b/tests/test_data/test_geometry.py index ac0e45e..cbad0d9 100644 --- a/tests/test_data/test_geometry.py +++ b/tests/test_data/test_geometry.py @@ -5,7 +5,6 @@ from typing import List import pytest - from soundevent import data @@ -184,7 +183,9 @@ def test_load_multilinestring_from_dict(): def test_load_multilinestring_from_attributes(): """Test that a MultiLineString can be loaded from attributes.""" - obj = data.MultiLineString(coordinates=[[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) + obj = data.MultiLineString( + coordinates=[[[0, 1], [2, 3]], [[4, 5], [6, 7]]] + ) geom = data.geometry_validate(obj, mode="attributes") assert isinstance(geom, data.MultiLineString) assert geom.coordinates == [[[0, 1], [2, 3]], [[4, 5], [6, 7]]] @@ -278,7 +279,6 @@ def test_invalid_time_interval_fails(): def test_invalid_bounding_box_fails(): """Test that an invalid bounds fails.""" - # No negative time with pytest.raises(ValueError): data.BoundingBox(coordinates=[-1, 0, 0, 1]) diff --git a/tests/test_evaluation/test_clip_classification.py b/tests/test_evaluation/test_clip_classification.py index 48790e4..11523dd 100644 --- a/tests/test_evaluation/test_clip_classification.py +++ b/tests/test_evaluation/test_clip_classification.py @@ -4,7 +4,6 @@ from typing import List import pytest - from soundevent import data from soundevent.evaluation import clip_classification @@ -156,7 +155,9 @@ def test_evaluation_has_balanced_accuracy( tags=evaluation_tags, ) - balanced_accuracy = data.find_feature(evaluation.metrics, name="balanced_accuracy") + balanced_accuracy = data.find_feature( + evaluation.metrics, name="balanced_accuracy" + ) assert balanced_accuracy is not None assert math.isclose(balanced_accuracy.value, 0.5, rel_tol=1e-6) @@ -173,7 +174,9 @@ def test_evaluation_has_top_3_accuracy( tags=evaluation_tags, ) - top_3_accuracy = data.find_feature(evaluation.metrics, name="top_3_accuracy") + top_3_accuracy = data.find_feature( + evaluation.metrics, name="top_3_accuracy" + ) assert top_3_accuracy is not None assert math.isclose(top_3_accuracy.value, 1.0, rel_tol=1e-6) @@ -212,7 +215,8 @@ def test_overall_score_is_the_mean_of_the_scores_of_all_evaluated_clips( evaluation_tags: List[data.Tag], ): """Test that the overall score is the mean of the scores of all evaluated - examples.""" + examples. + """ evaluation = clip_classification( clip_annotations=annotation_set.clip_annotations, clip_predictions=prediction_set.clip_predictions, @@ -240,7 +244,11 @@ def test_each_example_score_is_the_probability_of_the_true_class( assert len(evaluation.clip_evaluations[1].metrics) == 1 assert evaluation.clip_evaluations[0].score is not None - assert math.isclose(evaluation.clip_evaluations[0].score, 0.9, rel_tol=1e-6) + assert math.isclose( + evaluation.clip_evaluations[0].score, 0.9, rel_tol=1e-6 + ) assert evaluation.clip_evaluations[1].score is not None - assert math.isclose(evaluation.clip_evaluations[1].score, 0.1, rel_tol=1e-6) + assert math.isclose( + evaluation.clip_evaluations[1].score, 0.1, rel_tol=1e-6 + ) diff --git a/tests/test_evaluation/test_clip_multilabel_classification.py b/tests/test_evaluation/test_clip_multilabel_classification.py index 6b74fcb..b22c627 100644 --- a/tests/test_evaluation/test_clip_multilabel_classification.py +++ b/tests/test_evaluation/test_clip_multilabel_classification.py @@ -3,7 +3,6 @@ from typing import List import pytest - from soundevent import data from soundevent.evaluation import clip_multilabel_classification diff --git a/tests/test_evaluation/test_encode.py b/tests/test_evaluation/test_encode.py index 1d7bb50..0bcadb5 100644 --- a/tests/test_evaluation/test_encode.py +++ b/tests/test_evaluation/test_encode.py @@ -4,7 +4,6 @@ import numpy as np import pytest - from soundevent import data from soundevent.evaluation import ( classification_encoding, @@ -16,7 +15,9 @@ @pytest.fixture -def tags(random_tags: Callable[[int], Sequence[data.Tag]]) -> Sequence[data.Tag]: +def tags( + random_tags: Callable[[int], Sequence[data.Tag]], +) -> Sequence[data.Tag]: """Tags for testing.""" return random_tags(10) @@ -35,7 +36,6 @@ def test_classification_encoding( encoder: Encoder, ): """Test encoding objects with tags.""" - encoded = classification_encoding( tags=[tags[3]], encoder=encoder, diff --git a/tests/test_evaluation/test_matching.py b/tests/test_evaluation/test_matching.py index 8ce744d..0c858da 100644 --- a/tests/test_evaluation/test_matching.py +++ b/tests/test_evaluation/test_matching.py @@ -96,7 +96,9 @@ def test_multi_linestring_is_supported(): def test_multi_polygon_is_supported(): - multi_polygon = data.MultiPolygon(coordinates=[[[[1, 2], [4, 3], [5, 6], [1, 2]]]]) + multi_polygon = data.MultiPolygon( + coordinates=[[[[1, 2], [4, 3], [5, 6], [1, 2]]]] + ) matches = list(match_geometries([multi_polygon], [multi_polygon])) assert len(matches) == 1 source_index, target_index, affinity = matches[0] diff --git a/tests/test_evaluation/test_metrics.py b/tests/test_evaluation/test_metrics.py index 3597d27..c447d2f 100644 --- a/tests/test_evaluation/test_metrics.py +++ b/tests/test_evaluation/test_metrics.py @@ -1,7 +1,6 @@ """Test suite for soundevent.evaluation.metrics.py.""" import numpy as np - from soundevent.evaluation import metrics diff --git a/tests/test_evaluation/test_sound_event_detection.py b/tests/test_evaluation/test_sound_event_detection.py index a798832..09080e5 100644 --- a/tests/test_evaluation/test_sound_event_detection.py +++ b/tests/test_evaluation/test_sound_event_detection.py @@ -28,7 +28,9 @@ def test_can_evaluate_nips_data(): assert isinstance(evaluation, data.Evaluation) # check that all clips have been evaluated - assert len(evaluation.clip_evaluations) == len(evaluation_set.clip_annotations) + assert len(evaluation.clip_evaluations) == len( + evaluation_set.clip_annotations + ) # check that all metrics are present assert len(evaluation.metrics) == 4 diff --git a/tests/test_geometry/conftest.py b/tests/test_geometry/conftest.py index d28a055..1d69a60 100644 --- a/tests/test_geometry/conftest.py +++ b/tests/test_geometry/conftest.py @@ -1,7 +1,6 @@ """Common fixtures for testing geometry functions.""" import pytest - from soundevent import data diff --git a/tests/test_geometry/test_conversion.py b/tests/test_geometry/test_conversion.py index a53398a..0ff44e4 100644 --- a/tests/test_geometry/test_conversion.py +++ b/tests/test_geometry/test_conversion.py @@ -1,7 +1,6 @@ """Test Suite for geometry conversion functions.""" import shapely - from soundevent import data, geometry diff --git a/tests/test_geometry/test_html.py b/tests/test_geometry/test_html.py index dd019cf..39d217a 100644 --- a/tests/test_geometry/test_html.py +++ b/tests/test_geometry/test_html.py @@ -1,7 +1,6 @@ """Test that geometries get converted to HTML.""" import html5lib - from soundevent import data from soundevent.geometry.html import geometry_to_html diff --git a/tests/test_geometry/test_operations.py b/tests/test_geometry/test_operations.py index e400214..c555aff 100644 --- a/tests/test_geometry/test_operations.py +++ b/tests/test_geometry/test_operations.py @@ -4,7 +4,6 @@ from typing import List import pytest - from soundevent import data from soundevent.data.geometries import BaseGeometry from soundevent.geometry.operations import buffer_geometry, compute_bounds diff --git a/tests/test_io/conftest.py b/tests/test_io/conftest.py index 725e532..f1de546 100644 --- a/tests/test_io/conftest.py +++ b/tests/test_io/conftest.py @@ -3,7 +3,6 @@ from typing import Callable, List import pytest - from soundevent import data diff --git a/tests/test_io/test_annotation_projects.py b/tests/test_io/test_annotation_projects.py index 580b487..85a6874 100644 --- a/tests/test_io/test_annotation_projects.py +++ b/tests/test_io/test_annotation_projects.py @@ -40,7 +40,9 @@ def test_saved_annotation_project_is_saved_to_json_file( assert path.exists() -def test_saved_annotation_project_has_correct_info(monkeypatch, tmp_path: Path) -> None: +def test_saved_annotation_project_has_correct_info( + monkeypatch, tmp_path: Path +) -> None: """Test that the saved annotation project has the correct info.""" # Arrange annotation_project = data.AnnotationProject( @@ -173,7 +175,10 @@ def test_can_recover_task_status( # Assert assert recovered == annotation_project - assert recovered.tasks[0].status_badges[0].state == data.AnnotationState.completed + assert ( + recovered.tasks[0].status_badges[0].state + == data.AnnotationState.completed + ) def test_can_recover_user_that_completed_task( @@ -280,7 +285,9 @@ def test_can_recover_task_simple_annotation( clip_annotations=[ data.ClipAnnotation( clip=clip, - sound_events=[data.SoundEventAnnotation(sound_event=sound_event)], + sound_events=[ + data.SoundEventAnnotation(sound_event=sound_event) + ], ) ], tasks=[data.AnnotationTask(clip=clip)], @@ -294,7 +301,8 @@ def test_can_recover_task_simple_annotation( # Assert assert recovered == annotation_project assert ( - recovered.clip_annotations[0].sound_events[0].sound_event.geometry is not None + recovered.clip_annotations[0].sound_events[0].sound_event.geometry + is not None ) assert sound_event.geometry is not None assert ( @@ -302,7 +310,9 @@ def test_can_recover_task_simple_annotation( == sound_event.geometry.type ) assert ( - recovered.clip_annotations[0].sound_events[0].sound_event.geometry.coordinates + recovered.clip_annotations[0] + .sound_events[0] + .sound_event.geometry.coordinates == sound_event.geometry.coordinates ) @@ -342,8 +352,13 @@ def test_can_recover_task_annotation_with_tags( # Assert assert recovered == annotation_project - assert recovered.clip_annotations[0].sound_events[0].tags[0].key == "species" - assert recovered.clip_annotations[0].sound_events[0].tags[0].value == "test_species" + assert ( + recovered.clip_annotations[0].sound_events[0].tags[0].key == "species" + ) + assert ( + recovered.clip_annotations[0].sound_events[0].tags[0].value + == "test_species" + ) def test_can_recover_annotation_creator( @@ -394,7 +409,9 @@ def test_can_recover_annotation_creation_date( data.ClipAnnotation( clip=clip, sound_events=[ - data.SoundEventAnnotation(sound_event=sound_event, created_on=date) + data.SoundEventAnnotation( + sound_event=sound_event, created_on=date + ) ], ), ], @@ -447,8 +464,14 @@ def test_can_recover_annotation_notes( # Assert assert recovered == annotation_project - assert recovered.clip_annotations[0].sound_events[0].notes[0].message == "test_note" - assert recovered.clip_annotations[0].sound_events[0].notes[0].created_by == user + assert ( + recovered.clip_annotations[0].sound_events[0].notes[0].message + == "test_note" + ) + assert ( + recovered.clip_annotations[0].sound_events[0].notes[0].created_by + == user + ) def test_can_recover_sound_event_features( @@ -490,11 +513,17 @@ def test_can_recover_sound_event_features( # Assert assert recovered == annotation_project assert ( - recovered.clip_annotations[0].sound_events[0].sound_event.features[0].name + recovered.clip_annotations[0] + .sound_events[0] + .sound_event.features[0] + .name == "duration" ) assert ( - recovered.clip_annotations[0].sound_events[0].sound_event.features[0].value + recovered.clip_annotations[0] + .sound_events[0] + .sound_event.features[0] + .value == 1.0 ) @@ -535,7 +564,9 @@ def test_recording_paths_are_stored_as_relative_if_audio_dir_is_provided( def test_can_parse_nips4plus(tmp_path: Path): """Test that NIPS4BPlus annotations can be parsed.""" - original_path = BASE_DIR / "docs" / "user_guide" / "nips4b_plus_sample.json" + original_path = ( + BASE_DIR / "docs" / "user_guide" / "nips4b_plus_sample.json" + ) path = tmp_path / "test.json" # Act diff --git a/tests/test_io/test_aoef/conftest.py b/tests/test_io/test_aoef/conftest.py index 0073f4b..a2a1421 100644 --- a/tests/test_io/test_aoef/conftest.py +++ b/tests/test_io/test_aoef/conftest.py @@ -1,7 +1,6 @@ from pathlib import Path import pytest - from soundevent.io.aoef.annotation_project import AnnotationProjectAdapter from soundevent.io.aoef.annotation_set import AnnotationSetAdapter from soundevent.io.aoef.annotation_task import AnnotationTaskAdapter diff --git a/tests/test_io/test_aoef/test_api.py b/tests/test_io/test_aoef/test_api.py index b671410..e6b2117 100644 --- a/tests/test_io/test_aoef/test_api.py +++ b/tests/test_io/test_aoef/test_api.py @@ -5,9 +5,7 @@ from pathlib import Path import pytest - -from soundevent import data -from soundevent import io +from soundevent import data, io def test_load_fails_if_file_does_not_exist(): @@ -33,7 +31,8 @@ def test_load_fails_if_file_is_not_a_json_file(tmp_path): def test_load_fails_if_collection_type_is_not_supported(tmp_path): """Test that the load function fails if the collection type is not - supported.""" + supported. + """ # Arrange path = tmp_path / "collection_type_not_supported.json" path.write_text( @@ -53,7 +52,8 @@ def test_load_fails_if_collection_type_is_not_supported(tmp_path): def test_load_fails_if_aoef_version_is_not_supported(tmp_path): """Test that the load function fails if the aoef version is not - supported.""" + supported. + """ # Arrange path = tmp_path / "aoef_version_not_supported.json" path.write_text( @@ -71,7 +71,9 @@ def test_load_fails_if_aoef_version_is_not_supported(tmp_path): io.load(path) -def test_save_creates_parent_directories(tmp_path: Path, dataset: data.Dataset): +def test_save_creates_parent_directories( + tmp_path: Path, dataset: data.Dataset +): """Test that the save function creates parent directories.""" # Arrange path = tmp_path / "parent" / "child" / "test.json" @@ -91,7 +93,8 @@ def test_save_fails_if_trying_to_save_unsupported_collection_type( clip_evaluation: data.ClipEvaluation, ): """Test that the save function fails if trying to save an unsupported - collection type.""" + collection type. + """ # Arrange path = tmp_path / "unsupported_collection_type.json" diff --git a/tests/test_io/test_crowsetta/test_annotation.py b/tests/test_io/test_crowsetta/test_annotation.py index 07cc7f0..2ed8eb3 100644 --- a/tests/test_io/test_crowsetta/test_annotation.py +++ b/tests/test_io/test_crowsetta/test_annotation.py @@ -5,7 +5,6 @@ import crowsetta import pytest - import soundevent.io.crowsetta as crowsetta_io from soundevent import data from soundevent.io.crowsetta.segment import create_crowsetta_segment @@ -35,7 +34,9 @@ def clip_annotation(recording: data.Recording) -> data.ClipAnnotation: data.SoundEventAnnotation( sound_event=data.SoundEvent( recording=recording, - geometry=data.BoundingBox(coordinates=[0.5, 0.5, 1.5, 1.5]), + geometry=data.BoundingBox( + coordinates=[0.5, 0.5, 1.5, 1.5] + ), features=[data.Feature(name="test", value=1.0)], ), tags=[data.Tag(key="animal", value="cat")], @@ -44,7 +45,9 @@ def clip_annotation(recording: data.Recording) -> data.ClipAnnotation: data.SoundEventAnnotation( sound_event=data.SoundEvent( recording=recording, - geometry=data.LineString(coordinates=[[0.5, 0.5], [1.5, 1.5]]), + geometry=data.LineString( + coordinates=[[0.5, 0.5], [1.5, 1.5]] + ), features=[data.Feature(name="test", value=1.0)], ), tags=[data.Tag(key="animal", value="cat")], diff --git a/tests/test_io/test_crowsetta/test_bbox.py b/tests/test_io/test_crowsetta/test_bbox.py index 9935b48..f9aba0a 100644 --- a/tests/test_io/test_crowsetta/test_bbox.py +++ b/tests/test_io/test_crowsetta/test_bbox.py @@ -2,9 +2,8 @@ import crowsetta import pytest - -from soundevent import data import soundevent.io.crowsetta as crowsetta_io +from soundevent import data @pytest.fixture diff --git a/tests/test_io/test_crowsetta/test_import.py b/tests/test_io/test_crowsetta/test_import.py index 16e688c..8266c6f 100644 --- a/tests/test_io/test_crowsetta/test_import.py +++ b/tests/test_io/test_crowsetta/test_import.py @@ -7,9 +7,8 @@ from pathlib import Path import crowsetta - -from soundevent import data import soundevent.io.crowsetta as crowsetta_io +from soundevent import data @pytest.mark.skipif( @@ -52,9 +51,9 @@ def test_can_import_all_example_formats( from_file_kwargs = {"audio_path": recording.path} to_annot_kwargs = {"samplerate": recording.samplerate} - annotation = scribe.from_file(example.annot_path, **from_file_kwargs).to_annot( - **to_annot_kwargs - ) + annotation = scribe.from_file( + example.annot_path, **from_file_kwargs + ).to_annot(**to_annot_kwargs) if isinstance(annotation, list): annotation = annotation[0] @@ -62,7 +61,9 @@ def test_can_import_all_example_formats( assert isinstance(annotation, crowsetta.Annotation) if annotation.notated_path is not None: - recording = recording.model_copy(update=dict(path=annotation.notated_path)) + recording = recording.model_copy( + update=dict(path=annotation.notated_path) + ) clip_annotation = crowsetta_io.annotation_to_clip_annotation( annotation, diff --git a/tests/test_io/test_crowsetta/test_labels.py b/tests/test_io/test_crowsetta/test_labels.py index 26b2a15..5316aa4 100644 --- a/tests/test_io/test_crowsetta/test_labels.py +++ b/tests/test_io/test_crowsetta/test_labels.py @@ -136,7 +136,9 @@ def test_label_to_tags_with_key_mapping(): def test_label_to_tags_with_key_mapping_fallback(): key_mapping = {"bat": "animal"} - tag = crowsetta_io.label_to_tags("dog", key_mapping=key_mapping, fallback="pet") + tag = crowsetta_io.label_to_tags( + "dog", key_mapping=key_mapping, fallback="pet" + ) assert tag == [data.Tag(key="pet", value="dog")] diff --git a/tests/test_io/test_crowsetta/test_segments.py b/tests/test_io/test_crowsetta/test_segments.py index e8bacfe..79ead19 100644 --- a/tests/test_io/test_crowsetta/test_segments.py +++ b/tests/test_io/test_crowsetta/test_segments.py @@ -2,9 +2,8 @@ import crowsetta import pytest - -from soundevent import data import soundevent.io.crowsetta as crowsetta_io +from soundevent import data from soundevent.io.crowsetta.segment import ( create_crowsetta_segment, ) @@ -95,7 +94,9 @@ def test_segment_from_annotation( def test_segment_from_annotation_fails_if_not_a_time_interval( sound_event_annotation: data.SoundEventAnnotation, ): - sound_event_annotation.sound_event.geometry = data.Point(coordinates=[0.5, 1]) + sound_event_annotation.sound_event.geometry = data.Point( + coordinates=[0.5, 1] + ) with pytest.raises(ValueError): crowsetta_io.segment_from_annotation( sound_event_annotation, @@ -106,7 +107,9 @@ def test_segment_from_annotation_fails_if_not_a_time_interval( def test_segment_from_annotation_casts_to_segment( sound_event_annotation: data.SoundEventAnnotation, ): - sound_event_annotation.sound_event.geometry = data.Point(coordinates=[0.5, 1]) + sound_event_annotation.sound_event.geometry = data.Point( + coordinates=[0.5, 1] + ) segment = crowsetta_io.segment_from_annotation( sound_event_annotation, cast_to_segment=True, diff --git a/tests/test_io/test_crowsetta/test_sequence.py b/tests/test_io/test_crowsetta/test_sequence.py index d02e5f6..5a652f8 100644 --- a/tests/test_io/test_crowsetta/test_sequence.py +++ b/tests/test_io/test_crowsetta/test_sequence.py @@ -5,7 +5,6 @@ import crowsetta import numpy as np import pytest - import soundevent.io.crowsetta as crowsetta_io from soundevent import data from soundevent.io.crowsetta.segment import create_crowsetta_segment @@ -170,5 +169,7 @@ def test_sequence_to_annotations( recording, ) assert len(annotations) == 2 - assert all(isinstance(ann, data.SoundEventAnnotation) for ann in annotations) + assert all( + isinstance(ann, data.SoundEventAnnotation) for ann in annotations + ) assert all(ann.sound_event.recording == recording for ann in annotations) diff --git a/tests/test_io/test_model_runs.py b/tests/test_io/test_model_runs.py index 8b33b49..d9716fd 100644 --- a/tests/test_io/test_model_runs.py +++ b/tests/test_io/test_model_runs.py @@ -138,7 +138,9 @@ def test_can_recover_processed_clip_tags( # Assert assert model_run == recovered assert recovered.clip_predictions[0].tags[0].tag.key == "species" - assert recovered.clip_predictions[0].tags[0].tag.value == "Myotis lucifugus" + assert ( + recovered.clip_predictions[0].tags[0].tag.value == "Myotis lucifugus" + ) assert recovered.clip_predictions[0].tags[0].score == 0.9 @@ -209,7 +211,10 @@ def test_can_recover_simple_predicted_sound_event( # Assert assert recovered.clip_predictions[0].sound_events[0].score == 0.9 - assert recovered.clip_predictions[0].sound_events[0].sound_event == sound_event + assert ( + recovered.clip_predictions[0].sound_events[0].sound_event + == sound_event + ) assert model_run == recovered @@ -249,7 +254,10 @@ def test_can_recover_predicted_sound_event_with_predicted_tags( recovered = io.load(path, type="model_run") # Assert - assert recovered.clip_predictions[0].sound_events[0].tags[0].tag.key == "species" + assert ( + recovered.clip_predictions[0].sound_events[0].tags[0].tag.key + == "species" + ) assert ( recovered.clip_predictions[0].sound_events[0].tags[0].tag.value == "Myotis lucifugus" From ceed73ca0630530c9998efe4e2e33bb8d8da7dca Mon Sep 17 00:00:00 2001 From: mbsantiago Date: Fri, 10 May 2024 11:01:21 +0100 Subject: [PATCH 5/7] Update Makefile --- Makefile | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 479c1cb..81c22a6 100644 --- a/Makefile +++ b/Makefile @@ -23,15 +23,13 @@ install: ## Install the project in dev mode. .PHONY: fmt fmt: ## Format code using black & isort. - $(ENV_PREFIX)isort $(PROJECT_NAME)/ - $(ENV_PREFIX)black $(PROJECT_NAME)/ - $(ENV_PREFIX)black tests/ + $(ENV_PREFIX)ruff format $(PROJECT_NAME)/ + $(ENV_PREFIX)ruff format tests/ .PHONY: lint lint: ## Run ruff, black, mypy linters. - $(ENV_PREFIX)ruff $(PROJECT_NAME)/ - $(ENV_PREFIX)black --check $(PROJECT_NAME)/ - $(ENV_PREFIX)black --check tests/ + $(ENV_PREFIX)ruff check $(PROJECT_NAME)/ + $(ENV_PREFIX)ruff check tests/ $(ENV_PREFIX)mypy $(PROJECT_NAME)/ --config-file pyproject.toml .PHONY: test-watch From 08360c08d15627d451257aa2970d90fc54aa0f2b Mon Sep 17 00:00:00 2001 From: mbsantiago Date: Fri, 10 May 2024 11:12:16 +0100 Subject: [PATCH 6/7] migrated type checking to pyright --- .github/workflows/test.yml | 6 +++--- Makefile | 12 +++++++++--- pyproject.toml | 3 +-- requirements-dev.lock | 17 +++++------------ src/soundevent/data/geometries.py | 2 +- src/soundevent/io/crowsetta/labels.py | 4 ++-- 6 files changed, 21 insertions(+), 23 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 870215d..444794b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -21,11 +21,11 @@ jobs: run: | sudo apt-get update && sudo apt-get install libsndfile1 python -m pip install --upgrade pip - python -m pip install pytest hypothesis ruff mypy black html5lib + python -m pip install pytest hypothesis ruff pyright html5lib python -m pip install ".[all]" - name: Make sure types are consistent - run: mypy --ignore-missing-imports src + run: pyright src - name: Lint with ruff - run: ruff src + run: ruff check src - name: Test with pytest run: pytest tests diff --git a/Makefile b/Makefile index 81c22a6..eb00387 100644 --- a/Makefile +++ b/Makefile @@ -26,11 +26,17 @@ fmt: ## Format code using black & isort. $(ENV_PREFIX)ruff format $(PROJECT_NAME)/ $(ENV_PREFIX)ruff format tests/ -.PHONY: lint -lint: ## Run ruff, black, mypy linters. +.PHONY: lint-mypy +lint-pyright: + $(ENV_PREFIX)pyright $(PROJECT_NAME)/ + +.PHONY: lint-ruff +lint-ruff: $(ENV_PREFIX)ruff check $(PROJECT_NAME)/ $(ENV_PREFIX)ruff check tests/ - $(ENV_PREFIX)mypy $(PROJECT_NAME)/ --config-file pyproject.toml + +.PHONY: lint +lint: lint-mypy lint-ruff .PHONY: test-watch test-watch: ## Run tests and generate coverage report. diff --git a/pyproject.toml b/pyproject.toml index 9d0772d..81d49aa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,8 +41,6 @@ rye = { dev-dependencies = [ "icecream>=2.1.3", "pytest>=7.4.0", "coverage[toml]>=7.3.2", - "black>=23.3.0", - "mypy>=1.4.1", "pytest-coverage>=0.0", "mkdocs>=1.2.4", "importlib-metadata>=4.3", @@ -57,6 +55,7 @@ rye = { dev-dependencies = [ "pytest-watch>=4.2.0", "pytest-testmon>=2.0.12", "html5lib>=1.1", + "pyright>=1.1.362", ] } [tool.pytest.ini_options] diff --git a/requirements-dev.lock b/requirements-dev.lock index 42a3053..161a14f 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -23,7 +23,6 @@ babel==2.14.0 # via mkdocs-material birdsong-recognition-dataset==0.3.2.post1 # via crowsetta -black==24.4.2 cachetools==5.3.3 # via tox certifi==2024.2.2 @@ -35,7 +34,6 @@ chardet==5.2.0 charset-normalizer==3.3.2 # via requests click==8.1.7 - # via black # via mkdocs # via mkdocstrings colorama==0.4.6 @@ -143,11 +141,10 @@ mkdocstrings-python==1.10.0 # via mkdocstrings multimethod==1.10 # via pandera -mypy==1.10.0 mypy-extensions==1.0.0 - # via black - # via mypy # via typing-inspect +nodeenv==1.8.0 + # via pyright numpy==1.24.4 # via birdsong-recognition-dataset # via contourpy @@ -161,7 +158,6 @@ numpy==1.24.4 # via shapely # via xarray packaging==24.0 - # via black # via matplotlib # via mkdocs # via mkdocs-gallery @@ -179,12 +175,10 @@ pandas==2.0.3 pandera==0.18.3 # via crowsetta pathspec==0.12.1 - # via black # via mkdocs pillow==10.3.0 # via matplotlib platformdirs==4.2.1 - # via black # via mkdocs-get-deps # via mkdocstrings # via tox @@ -211,6 +205,7 @@ pyparsing==3.1.2 # via matplotlib pyproject-api==1.6.1 # via tox +pyright==1.1.362 pytest==8.1.2 # via pytest-cov # via pytest-testmon @@ -248,6 +243,8 @@ scipy==1.10.1 # via evfuncs # via scikit-learn # via soundevent +setuptools==69.5.1 + # via nodeenv shapely==2.0.4 # via soundevent six==1.16.0 @@ -263,9 +260,7 @@ soundfile==0.12.1 threadpoolctl==3.4.0 # via scikit-learn tomli==2.0.1 - # via black # via coverage - # via mypy # via pyproject-api # via pytest # via tox @@ -276,9 +271,7 @@ typeguard==4.2.1 # via pandera typing-extensions==4.11.0 # via annotated-types - # via black # via mkdocstrings - # via mypy # via pydantic # via pydantic-core # via typeguard diff --git a/src/soundevent/data/geometries.py b/src/soundevent/data/geometries.py index a83a718..83d6a54 100644 --- a/src/soundevent/data/geometries.py +++ b/src/soundevent/data/geometries.py @@ -908,7 +908,7 @@ def geometry_validate( if not hasattr(obj, "type"): raise ValueError(f"Object {obj} does not have a type attribute.") - geom_type = obj.type + geom_type = obj.type # type: ignore if geom_type not in GEOMETRY_MAPPING: raise ValueError(f"Object {obj} does not have a geometry valid type.") diff --git a/src/soundevent/io/crowsetta/labels.py b/src/soundevent/io/crowsetta/labels.py index 546d3a5..03ca830 100644 --- a/src/soundevent/io/crowsetta/labels.py +++ b/src/soundevent/io/crowsetta/labels.py @@ -23,7 +23,7 @@ LabelToTagFn = Callable[[str], Union[List[data.Tag], data.Tag]] -LabelToTagMap = dict[str, Union[List[data.Tag], data.Tag]] +LabelToTagMap = Dict[str, Union[List[data.Tag], data.Tag]] def label_to_tags( @@ -110,7 +110,7 @@ def label_to_tags( def label_from_tag( tag: data.Tag, label_fn: Optional[Callable[[data.Tag], str]] = None, - label_mapping: Optional[dict[data.Tag, str]] = None, + label_mapping: Optional[Dict[data.Tag, str]] = None, value_only: bool = False, separator: str = ":", ) -> str: From 4c1bef599b51b96c05dc5371699cf502c686ce05 Mon Sep 17 00:00:00 2001 From: mbsantiago Date: Fri, 10 May 2024 11:49:59 +0100 Subject: [PATCH 7/7] Removed stringent docstring rule --- pyproject.toml | 1 + src/soundevent/audio/spectrum.py | 4 +- src/soundevent/evaluation/encoding.py | 6 +-- src/soundevent/evaluation/metrics.py | 6 +-- .../evaluation/tasks/clip_classification.py | 4 +- .../tasks/clip_multilabel_classification.py | 4 +- .../tasks/sound_event_classification.py | 4 +- .../evaluation/tasks/sound_event_detection.py | 4 +- src/soundevent/io/crowsetta/annotation.py | 7 ++++ src/soundevent/io/crowsetta/bbox.py | 8 ++++ src/soundevent/io/crowsetta/sequence.py | 7 ++++ src/soundevent/io/loader.py | 6 +++ src/soundevent/io/saver.py | 7 ++++ src/soundevent/plot/geometries.py | 1 + src/soundevent/plot/sound_event.py | 0 src/soundevent/types.py | 39 +++++++++++++++++++ .../test_clip_classification.py | 4 +- tests/test_evaluation/test_encode.py | 2 +- tests/test_io/test_aoef/test_api.py | 12 ++---- .../test_io/test_crowsetta/test_annotation.py | 2 +- tests/test_io/test_crowsetta/test_bbox.py | 2 +- tests/test_io/test_crowsetta/test_import.py | 9 ++--- tests/test_io/test_crowsetta/test_labels.py | 2 +- tests/test_io/test_crowsetta/test_segments.py | 2 +- tests/test_io/test_crowsetta/test_sequence.py | 2 +- 25 files changed, 102 insertions(+), 43 deletions(-) delete mode 100644 src/soundevent/plot/sound_event.py diff --git a/pyproject.toml b/pyproject.toml index 81d49aa..cf162bc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -71,6 +71,7 @@ docstring-code-line-length = 60 [tool.ruff.lint] select = ["E4", "E7", "E9", "F", "B", "Q", "I", "D"] +ignore = ["D1"] [tool.ruff.lint.pydocstyle] convention = "numpy" diff --git a/src/soundevent/audio/spectrum.py b/src/soundevent/audio/spectrum.py index 57a4738..f3e8f94 100644 --- a/src/soundevent/audio/spectrum.py +++ b/src/soundevent/audio/spectrum.py @@ -76,10 +76,12 @@ def amplitude_to_db( def db_to_power(value: float) -> float: + """Convert dB to power.""" return np.power(10.0, value * 0.1) def db_to_amplitude(value: float) -> float: + """Convert dB to amplitude.""" return db_to_power(value) ** 0.5 @@ -100,7 +102,7 @@ def pcen_core( max_axis: Optional[int] = None, zi: Optional[np.ndarray] = None, ) -> np.ndarray: - """Per-channel energy normalization (PCEN) + """Per-channel energy normalization (PCEN). Notes ----- diff --git a/src/soundevent/evaluation/encoding.py b/src/soundevent/evaluation/encoding.py index 306555c..df92d2a 100644 --- a/src/soundevent/evaluation/encoding.py +++ b/src/soundevent/evaluation/encoding.py @@ -117,7 +117,7 @@ def classification_encoding( tags: Sequence[data.Tag], encoder: Encoder, ) -> Optional[int]: - """Encodes a list of tags into an integer value. + """Encode a list of tags into an integer value. This function is commonly used for mapping a list of tags to a compact integer representation, typically representing classes associated with @@ -171,7 +171,7 @@ def multilabel_encoding( tags: Sequence[data.Tag], encoder: Encoder, ) -> np.ndarray: - """Encodes a list of tags into a binary multilabel array. + """Encode a list of tags into a binary multilabel array. Parameters ---------- @@ -226,7 +226,7 @@ def prediction_encoding( tags: Sequence[data.PredictedTag], encoder: Encoder, ) -> np.ndarray: - """Encodes a list of predicted tags into a floating-point array of scores. + """Encode a list of predicted tags into a floating-point array of scores. Parameters ---------- diff --git a/src/soundevent/evaluation/metrics.py b/src/soundevent/evaluation/metrics.py index c1b0cf6..8ada9f2 100644 --- a/src/soundevent/evaluation/metrics.py +++ b/src/soundevent/evaluation/metrics.py @@ -154,8 +154,7 @@ def average_precision( y_true: np.ndarray, y_score: np.ndarray, ) -> float: - """Compute the average precision score for the given true and predicted - labels. + """Compute the average precision score for the given true and predicted labels. Parameters ---------- @@ -189,8 +188,7 @@ def mean_average_precision( y_true: np.ndarray, y_score: np.ndarray, ) -> float: - """Compute the mean average precision score for the given true and - predicted labels. + """Compute the mean average precision score for the given true and predicted labels. Parameters ---------- diff --git a/src/soundevent/evaluation/tasks/clip_classification.py b/src/soundevent/evaluation/tasks/clip_classification.py index d68441c..2b908a9 100644 --- a/src/soundevent/evaluation/tasks/clip_classification.py +++ b/src/soundevent/evaluation/tasks/clip_classification.py @@ -88,9 +88,7 @@ def _evaluate_all_clips( def _compute_overall_metrics(true_classes, predicted_classes_scores): - """Compute evaluation metrics based on true classes and predicted - scores. - """ + """Compute evaluation metrics based on true classes and predicted scores.""" evaluation_metrics = [ data.Feature( name=metric.__name__, diff --git a/src/soundevent/evaluation/tasks/clip_multilabel_classification.py b/src/soundevent/evaluation/tasks/clip_multilabel_classification.py index 7020b6e..71208fc 100644 --- a/src/soundevent/evaluation/tasks/clip_multilabel_classification.py +++ b/src/soundevent/evaluation/tasks/clip_multilabel_classification.py @@ -97,9 +97,7 @@ def _compute_overall_metrics( true_classes, predicted_classes_scores, ) -> List[data.Feature]: - """Compute evaluation metrics based on true classes and predicted - scores. - """ + """Compute evaluation metrics based on true classes and predicted scores.""" return [ data.Feature( name=metric.__name__, diff --git a/src/soundevent/evaluation/tasks/sound_event_classification.py b/src/soundevent/evaluation/tasks/sound_event_classification.py index 9d7aba1..05d9ab3 100644 --- a/src/soundevent/evaluation/tasks/sound_event_classification.py +++ b/src/soundevent/evaluation/tasks/sound_event_classification.py @@ -88,9 +88,7 @@ def _evaluate_clips( def _compute_overall_metrics(true_classes, predicted_classes_scores): - """Compute evaluation metrics based on true classes and predicted - scores. - """ + """Compute evaluation metrics based on true classes and predicted scores.""" evaluation_metrics = [ data.Feature( name=metric.__name__, diff --git a/src/soundevent/evaluation/tasks/sound_event_detection.py b/src/soundevent/evaluation/tasks/sound_event_detection.py index 04c251e..e8dc40a 100644 --- a/src/soundevent/evaluation/tasks/sound_event_detection.py +++ b/src/soundevent/evaluation/tasks/sound_event_detection.py @@ -88,9 +88,7 @@ def _evaluate_clips( def compute_overall_metrics(true_classes, predicted_classes_scores): - """Compute evaluation metrics based on true classes and predicted - scores. - """ + """Compute evaluation metrics based on true classes and predicted scores.""" evaluation_metrics = [ data.Feature( name=metric.__name__, diff --git a/src/soundevent/io/crowsetta/annotation.py b/src/soundevent/io/crowsetta/annotation.py index 7af1372..3fc5c5e 100644 --- a/src/soundevent/io/crowsetta/annotation.py +++ b/src/soundevent/io/crowsetta/annotation.py @@ -1,3 +1,10 @@ +"""Module for converting between ClipAnnotation and Crowsetta annotation formats. + +This module provides functions to facilitate the interoperability between sound +event data represented in the SoundEvent library's `ClipAnnotation` format and +the `crowsetta.Annotation` format used by the Crowsetta tool. +""" + import os from pathlib import Path from typing import List, Literal, Optional, Union diff --git a/src/soundevent/io/crowsetta/bbox.py b/src/soundevent/io/crowsetta/bbox.py index 78e6a98..b61a5f3 100644 --- a/src/soundevent/io/crowsetta/bbox.py +++ b/src/soundevent/io/crowsetta/bbox.py @@ -1,3 +1,11 @@ +"""Module for converting between SoundEvent annotations and Crowsetta bounding boxes. + +This module provides functions to seamlessly convert between +`SoundEventAnnotation` objects, containing sound event information with +bounding box geometries, and `crowsetta.BBox` objects used by the Crowsetta +tool. +""" + from typing import List, Optional, Tuple from crowsetta import BBox diff --git a/src/soundevent/io/crowsetta/sequence.py b/src/soundevent/io/crowsetta/sequence.py index 57c43d0..a5b7359 100644 --- a/src/soundevent/io/crowsetta/sequence.py +++ b/src/soundevent/io/crowsetta/sequence.py @@ -1,3 +1,10 @@ +"""Module for converting between SoundEvent annotations and Crowsetta sequences. + +This module facilitates the conversion between sequences of +`SoundEventAnnotation` objects, used within the SoundEvent library, and +`crowsetta.Sequence` objects employed by the Crowsetta tool. +""" + from typing import List, Optional, Sequence import crowsetta diff --git a/src/soundevent/io/loader.py b/src/soundevent/io/loader.py index a40ee5d..c5c7d8c 100644 --- a/src/soundevent/io/loader.py +++ b/src/soundevent/io/loader.py @@ -1,3 +1,9 @@ +"""Module for loading sound event data in various formats. + +This module provides a flexible `load` function to load different types of sound +event data. +""" + import sys from typing import Dict, Optional, overload diff --git a/src/soundevent/io/saver.py b/src/soundevent/io/saver.py index 22b088a..2eb4033 100644 --- a/src/soundevent/io/saver.py +++ b/src/soundevent/io/saver.py @@ -1,3 +1,10 @@ +"""Module for saving sound event data in various formats. + +This module provides a versatile `save` function for storing different types of +sound event data. Data can be saved in formats compatible with the +`soundevent.io.load` function. +""" + from typing import Dict, Optional from soundevent import data diff --git a/src/soundevent/plot/geometries.py b/src/soundevent/plot/geometries.py index f595ff2..53f6ca7 100644 --- a/src/soundevent/plot/geometries.py +++ b/src/soundevent/plot/geometries.py @@ -34,6 +34,7 @@ def plot_geometry( ax: Optional[Axes] = None, **kwargs, ) -> Axes: + """Plot a geometry in the given ax.""" if ax is None: ax = create_axes(**kwargs) diff --git a/src/soundevent/plot/sound_event.py b/src/soundevent/plot/sound_event.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/soundevent/types.py b/src/soundevent/types.py index 88a3ffe..0a73e87 100644 --- a/src/soundevent/types.py +++ b/src/soundevent/types.py @@ -16,16 +16,42 @@ def encode( self, sound_event_annotation: data.SoundEventAnnotation, ) -> Optional[str]: + """Encode a sound event annotation into a class label. + + The user should implement this method to encode a sound event annotation + into a class label. If the sound event annotation does not have a class + label, the method should return None. This is helpful in case + the user wants to ignore some sound event annotations. + """ pass @abstractmethod def decode(self, label: str) -> List[data.Tag]: + """Decode a class label into a list of tags. + + The user should implement this method to decode a class label into a + list of tags. This is helpful when the user wants to convert a class + label into a list of tags to reconstruct the sound event annotation. + """ pass def transform( self, sound_event_annotation: data.SoundEventAnnotation, ) -> Optional[int]: + """Transform a sound event annotation into a class index. + + Parameters + ---------- + sound_event_annotation : data.SoundEventAnnotation + A sound event annotation. + + Returns + ------- + Optional[int] + The class index of the sound event annotation. + If no class is provided, returns None. + """ class_name = self.encode(sound_event_annotation) if class_name not in self.class_labels: @@ -34,6 +60,18 @@ def transform( return self.class_labels.index(class_name) def inverse_transform(self, class_index: int) -> List[data.Tag]: + """Inverse transform a class index into a list of tags. + + Parameters + ---------- + class_index : int + The class index. + + Returns + ------- + List[data.Tag] + A list of tags that represent the class index. + """ if class_index < 0 or class_index >= len(self.class_labels): return [] @@ -42,4 +80,5 @@ def inverse_transform(self, class_index: int) -> List[data.Tag]: @property def num_classes(self) -> int: + """Return the number of classes.""" return len(self.class_labels) diff --git a/tests/test_evaluation/test_clip_classification.py b/tests/test_evaluation/test_clip_classification.py index 11523dd..7cea7a5 100644 --- a/tests/test_evaluation/test_clip_classification.py +++ b/tests/test_evaluation/test_clip_classification.py @@ -214,9 +214,7 @@ def test_overall_score_is_the_mean_of_the_scores_of_all_evaluated_clips( prediction_set: data.PredictionSet, evaluation_tags: List[data.Tag], ): - """Test that the overall score is the mean of the scores of all evaluated - examples. - """ + """Test that the overall score is the mean of the scores of all evaluated examples.""" evaluation = clip_classification( clip_annotations=annotation_set.clip_annotations, clip_predictions=prediction_set.clip_predictions, diff --git a/tests/test_evaluation/test_encode.py b/tests/test_evaluation/test_encode.py index 0bcadb5..b5c309e 100644 --- a/tests/test_evaluation/test_encode.py +++ b/tests/test_evaluation/test_encode.py @@ -26,7 +26,7 @@ def tags( def encoder( tags: Sequence[data.Tag], ) -> Encoder: - """Encoder for testing.""" + """Encode for testing.""" target_tags = tags[:5] return create_tag_encoder(target_tags) diff --git a/tests/test_io/test_aoef/test_api.py b/tests/test_io/test_aoef/test_api.py index e6b2117..f878731 100644 --- a/tests/test_io/test_aoef/test_api.py +++ b/tests/test_io/test_aoef/test_api.py @@ -30,9 +30,7 @@ def test_load_fails_if_file_is_not_a_json_file(tmp_path): def test_load_fails_if_collection_type_is_not_supported(tmp_path): - """Test that the load function fails if the collection type is not - supported. - """ + """Test that the load function fails if the collection type is not supported.""" # Arrange path = tmp_path / "collection_type_not_supported.json" path.write_text( @@ -51,9 +49,7 @@ def test_load_fails_if_collection_type_is_not_supported(tmp_path): def test_load_fails_if_aoef_version_is_not_supported(tmp_path): - """Test that the load function fails if the aoef version is not - supported. - """ + """Test that the load function fails if the aoef version is not supported.""" # Arrange path = tmp_path / "aoef_version_not_supported.json" path.write_text( @@ -92,9 +88,7 @@ def test_save_fails_if_trying_to_save_unsupported_collection_type( tmp_path: Path, clip_evaluation: data.ClipEvaluation, ): - """Test that the save function fails if trying to save an unsupported - collection type. - """ + """Test that the save function fails if trying to save an unsupported collection type.""" # Arrange path = tmp_path / "unsupported_collection_type.json" diff --git a/tests/test_io/test_crowsetta/test_annotation.py b/tests/test_io/test_crowsetta/test_annotation.py index 2ed8eb3..dad002c 100644 --- a/tests/test_io/test_crowsetta/test_annotation.py +++ b/tests/test_io/test_crowsetta/test_annotation.py @@ -1,4 +1,4 @@ -"""Test suite for the soundevent.io.crowsetta.annotation module""" +"""Test suite for the soundevent.io.crowsetta.annotation module.""" import datetime from pathlib import Path diff --git a/tests/test_io/test_crowsetta/test_bbox.py b/tests/test_io/test_crowsetta/test_bbox.py index f9aba0a..6ec80b8 100644 --- a/tests/test_io/test_crowsetta/test_bbox.py +++ b/tests/test_io/test_crowsetta/test_bbox.py @@ -1,4 +1,4 @@ -"""Test suite for the soundevent.io.crowsetta.bbox module""" +"""Test suite for the soundevent.io.crowsetta.bbox module.""" import crowsetta import pytest diff --git a/tests/test_io/test_crowsetta/test_import.py b/tests/test_io/test_crowsetta/test_import.py index 8266c6f..949db8d 100644 --- a/tests/test_io/test_crowsetta/test_import.py +++ b/tests/test_io/test_crowsetta/test_import.py @@ -1,15 +1,14 @@ import sys import warnings - -import pytest - -warnings.filterwarnings("ignore", category=UserWarning, module="crowsetta") -from pathlib import Path +from pathlib import Path # noqa: E402 import crowsetta +import pytest import soundevent.io.crowsetta as crowsetta_io from soundevent import data +warnings.filterwarnings("ignore", category=UserWarning, module="crowsetta") + @pytest.mark.skipif( sys.version_info < (3, 9), diff --git a/tests/test_io/test_crowsetta/test_labels.py b/tests/test_io/test_crowsetta/test_labels.py index 5316aa4..64774fe 100644 --- a/tests/test_io/test_crowsetta/test_labels.py +++ b/tests/test_io/test_crowsetta/test_labels.py @@ -1,4 +1,4 @@ -"""Test Suite for the soundevent.io.crowsetta.labels module""" +"""Test Suite for the soundevent.io.crowsetta.labels module.""" import soundevent.io.crowsetta as crowsetta_io from soundevent import data diff --git a/tests/test_io/test_crowsetta/test_segments.py b/tests/test_io/test_crowsetta/test_segments.py index 79ead19..96078e7 100644 --- a/tests/test_io/test_crowsetta/test_segments.py +++ b/tests/test_io/test_crowsetta/test_segments.py @@ -1,4 +1,4 @@ -"""Test suite for the soundevent.io.crowsetta.segments module""" +"""Test suite for the soundevent.io.crowsetta.segments module.""" import crowsetta import pytest diff --git a/tests/test_io/test_crowsetta/test_sequence.py b/tests/test_io/test_crowsetta/test_sequence.py index 5a652f8..e324a7a 100644 --- a/tests/test_io/test_crowsetta/test_sequence.py +++ b/tests/test_io/test_crowsetta/test_sequence.py @@ -1,4 +1,4 @@ -"""Test suite for the soundevent.io.crowsetta.sequence module""" +"""Test suite for the soundevent.io.crowsetta.sequence module.""" from typing import List