Skip to content

Commit 59ea359

Browse files
committed
Add extra linter checks and make all those checks pass.
1 parent 94827ac commit 59ea359

File tree

14 files changed

+203
-313
lines changed

14 files changed

+203
-313
lines changed

examples/mrbles.ipynb

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@
5555
"outputs": [],
5656
"source": [
5757
"# Setup the pipeline. You should rarely need to modify these values outside of `num_codes`\n",
58-
"# unless you're switching between different image binning modes (since that changes the pixel sizes).\n",
58+
"# unless you're switching between different image binning modes (since that changes pixel sizes).\n",
5959
"xp = mg.mrbles( # The type of experiment we're processing, in this case mrbles.\n",
6060
" \"data/xp1.ome.tif\", # The path to the image we collected.\n",
6161
" darkfield=\"data/darkfield.tiff\", # The location of the darkfield correction image.\n",
@@ -103,7 +103,9 @@
103103
}
104104
],
105105
"source": [
106-
"# Now let's check the image corresponding to the 620 channel to make sure magnify correctly processed our experiment.\n",
106+
"# Now let's check the image corresponding to the 620 channel to make sure magnify correctly\n",
107+
"# processed our experiment.\n",
108+
"\n",
107109
"# First display the image.\n",
108110
"plt.imshow(xp.image.sel(channel=\"620\"))\n",
109111
"\n",
@@ -163,7 +165,7 @@
163165
}
164166
],
165167
"source": [
166-
"# We can also visualize the lanthanide ratios for each bead. We see here that we get distinct clusters.\n",
168+
"# We can also visualize the lanthanide ratios for each bead. We see that we get distinct clusters.\n",
167169
"plt.scatter(xp.ln_ratio.sel(ln=\"dy\"), xp.ln_ratio.sel(ln=\"sm\"), s=1.0, alpha=0.3)\n",
168170
"plt.xlabel(\"Dy Ratio\")\n",
169171
"plt.ylabel(\"Sm Ratio\")"
@@ -299,10 +301,13 @@
299301
},
300302
"outputs": [],
301303
"source": [
302-
"# Setup the pipeline. Here we needed to change darkfield, flatfield, min_bead_radius, and max_bead_radius to account for the 2x2 binning.\n",
303-
"# The filepath supports globs (https://en.wikipedia.org/wiki/Glob_(programming)) so * expands to match anything that matches the pattern.\n",
304-
"# (row), (col) are equivalent to * but they also save the segment of the filename they match in the resulting experiment.\n",
305-
"# In this case we're telling the pipeline that the filename specified the tile column and row.\n",
304+
"# Setup the pipeline. Here we needed to change darkfield, flatfield, min_bead_radius,\n",
305+
"# and max_bead_radius to account for the 2x2 binning.\n",
306+
"# The filepath supports globs (https://en.wikipedia.org/wiki/Glob_(programming)) so * expands to\n",
307+
"# match anything that matches the pattern.\n",
308+
"# (row), (col) are equivalent to * but they also save the segment of the filename they match\n",
309+
"# in the resulting experiment. In this case we're telling the pipeline that the filename specified\n",
310+
"# the tile column and row.\n",
306311
"xp = mg.mrbles(\n",
307312
" \"data/06.08.23 1-50 mix/mixed all tile 2b2_2/*Pos(col)_(row).ome.tif\",\n",
308313
" darkfield=\"data/darkfield2x2.tiff\",\n",

pyproject.toml

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,5 +53,22 @@ build-backend = "uv_build"
5353
line-length = 100
5454
target-version = "py312"
5555

56+
[tool.ruff.lint]
57+
select = [
58+
# pycodestyle
59+
"E",
60+
"W",
61+
# Pyflakes
62+
"F",
63+
# pyupgrade
64+
"UP",
65+
# flake8-bugbear
66+
"B",
67+
# flake8-simplify
68+
"SIM",
69+
# isort
70+
"I",
71+
]
72+
5673
[tool.ruff.format]
5774
docstring-code-format = true

src/magnify/filter.py

Lines changed: 4 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -14,18 +14,14 @@ def filter_expression(
1414
search_channel: str | list[str] | None = None,
1515
min_contrast: int | None = None,
1616
):
17-
if search_channel is None:
18-
search_channels = assay.channel
19-
else:
20-
search_channels = utils.to_list(search_channel)
21-
17+
search_channels = assay.channel if search_channel is None else utils.to_list(search_channel)
2218
valid = xr.zeros_like(assay.valid, dtype=bool)
2319
for channel in search_channels:
2420
subassay = assay.isel(time=0).sel(channel=channel)
2521
fg = subassay.roi.where(subassay.fg).median(dim=["roi_x", "roi_y"]).compute()
2622
bg = subassay.roi.where(subassay.bg).median(dim=["roi_x", "roi_y"]).compute()
2723
if min_contrast is None:
28-
# Compute the intensity differences between every pair of backgrounds on the first timestep.
24+
# Compute intensity differences between every pair of backgrounds on the first timestep.
2925
bg_n = bg.to_numpy().flatten()
3026
diffs = bg_n[:, np.newaxis] - bg_n[np.newaxis, :]
3127
offdiag = np.ones_like(diffs, dtype=bool) & (~np.eye(len(diffs), dtype=bool))
@@ -47,11 +43,7 @@ def filter_nonround(
4743
min_roundness: float = 0.75,
4844
search_channel: str | list[str] | None = None,
4945
):
50-
if search_channel is None:
51-
search_channels = assay.channel
52-
else:
53-
search_channels = utils.to_list(search_channel)
54-
46+
search_channels = assay.channel if search_channel is None else utils.to_list(search_channel)
5547
valid = assay.valid.to_numpy()
5648
for channel in search_channels:
5749
subassay = assay.isel(time=0).sel(channel=channel)
@@ -72,11 +64,7 @@ def filter_nonround(
7264

7365
@registry.component("filter_leaky")
7466
def filter_leaky_buttons(assay: xr.Dataset, search_channel: str | list[str] | None = None):
75-
if search_channel is None:
76-
search_channels = assay.channel
77-
else:
78-
search_channels = utils.to_list(search_channel)
79-
67+
search_channels = assay.channel if search_channel is None else utils.to_list(search_channel)
8068
tag = assay.tag.to_numpy()
8169
valid = assay.valid.to_numpy()
8270
rows = assay.mark_row.to_numpy()

src/magnify/find.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -145,13 +145,12 @@ def __call__(self, assay: xr.Dataset) -> xr.Dataset:
145145
# Skip this timestep since we've already processed it.
146146
continue
147147

148-
if t < self.search_timesteps[0]:
149-
# Backfill timesteps that come before the first searched timestep.
150-
copy_t = self.search_timesteps[0]
151-
else:
152-
# Re-use the button locations of the timestep just before this one since it's either
153-
# a searched timestep or a timestep we just copied locations into.
154-
copy_t = t - 1
148+
# Either backfill timesteps that come before the first searched timestep or
149+
# re-use the button locations of the timestep just before this one since it's either
150+
# a searched timestep or a timestep we just copied locations into.
151+
copy_t = self.search_timesteps[0] = (
152+
self.search_timesteps[0] if t < self.search_timesteps[0] else t - 1
153+
)
155154

156155
# Preload all images for this timestep so we only read from disk once and
157156
# convert all relevant data to numpy arrays since iterating through xarrays is slow.
@@ -183,7 +182,8 @@ def __call__(self, assay: xr.Dataset) -> xr.Dataset:
183182
assay["fg"] = assay.fg.persist()
184183
assay["bg"] = assay.bg.persist()
185184
assay = assay.stack(mark=("mark_row", "mark_col"), create_index=True).transpose("mark", ...)
186-
# Rechunk the array to chunk along markers since users will usually want to slice along that dimension.
185+
# Rechunk the array to chunk along markers since users will usually want
186+
# to slice along that dimension.
187187
mark_chunk_size = min(
188188
math.ceil(chunk_bytes / (roi_bytes * assay.sizes["time"] * assay.sizes["channel"])),
189189
num_rows,
@@ -739,7 +739,7 @@ def regress_clusters(
739739
)
740740
# Re-estimate intercepts using a weighted mean of global and local estimates.
741741
# This reduces outlier effects while still allowing uneven intercepts from image stitching.
742-
for i, (x, y) in enumerate(cluster_points):
742+
for i, (x, _y) in enumerate(cluster_points):
743743
if ideal_num_points[i] != 0 and not_nan[i]:
744744
weight = min(len(x), ideal_num_points[i]) / ideal_num_points[i]
745745
intercepts[i] = weight * intercepts[i] + (1 - weight) * (intercept_m * i + intercept_b)

src/magnify/identify.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,8 @@ def fit_1d(points, codes, counts, N=100):
173173
if proportions[i] > 1:
174174
covs[i] += np.cov(X_r[tag_idxs == i], rowvar=False)
175175

176-
# Set all component variances to be the same since initializing individual covariances can lead to huge terms.
176+
# Set all component variances to be identical since initializing individual
177+
# covariances can lead to huge terms.
177178
covs[:] = np.median(covs, axis=0)
178179
# Initialize the uniform component.
179180
proportions[-1] = 1e-10
@@ -184,7 +185,7 @@ def fit_1d(points, codes, counts, N=100):
184185

185186
tag_names = np.append(tag_names, "outlier")
186187
# Run the Expectation-Maximization algorithm.
187-
for i in range(50):
188+
for _ in range(50):
188189
# E-step: Compute the probability of each point belonging to each component.
189190
diff = X[:, np.newaxis, :] - means[np.newaxis, :, :]
190191
# Work in log space most of the time to avoid numerical issues.

src/magnify/pipeline.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from typing import Callable
1+
from collections.abc import Callable
22

33
import xarray as xr
44
from numpy.typing import ArrayLike
@@ -55,7 +55,7 @@ def func(xp):
5555
raise ValueError("Only one of after, before, first, and last can be set.")
5656

5757
# Check that the new component's name is unique
58-
if self.components and name in next(zip(*self.components)):
58+
if self.components and name in next(zip(*self.components, strict=True)):
5959
raise ValueError(f"A component with the name '{name}' already exists in the pipeline.")
6060

6161
# Find where to insert the component.
@@ -80,7 +80,7 @@ def func(xp):
8080
def remove_pipe(self, name: str) -> None:
8181
if not self.components:
8282
raise ValueError(f"Cannot remove pipe '{name}': pipeline has no components")
83-
component_names = list(zip(*self.components))[0]
83+
component_names = list(zip(*self.components, strict=True))[0]
8484
if name not in component_names:
8585
raise ValueError(f"Component '{name}' not found in pipeline")
8686
idx = component_names.index(name)

src/magnify/plot/__init__.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,11 @@
11
__all__ = [
22
"imshow",
33
"mrbles_clusters",
4-
"ndplot",
5-
"relplot",
64
"roishow",
75
"set_style",
86
]
97
from magnify.plot.image import imshow, roishow
108
from magnify.plot.mrbles import mrbles_clusters
11-
from magnify.plot.ndplot import ndplot
12-
from magnify.plot.relation import relplot
139
from magnify.plot.style import set_style
1410

1511
set_style()

src/magnify/plot/image.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ def roishow(xp: xr.Dataset):
1717
roi = np.zeros((counts.max(), len(tags)) + xp.roi.isel(mark=0).shape)
1818
fg = np.zeros((counts.max(), len(tags)) + xp.roi.isel(mark=0, channel=0).shape, dtype=bool)
1919
bg = np.zeros_like(fg)
20-
for i, (tag, group) in enumerate(xp.roi.groupby("tag")):
20+
for i, (_tag, group) in enumerate(xp.roi.groupby("tag")):
2121
roi[: group.sizes["mark"], i] = group
2222
fg[: group.sizes["mark"], i] = group.fg.isel(channel=0)
2323
bg[: group.sizes["mark"], i] = group.bg.isel(channel=0)

src/magnify/plot/ndplot.py

Lines changed: 0 additions & 101 deletions
This file was deleted.

0 commit comments

Comments
 (0)