Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
51 commits
Select commit Hold shift + click to select a range
00f74cc
Modified parser, and flash matching to support merging flashes
Apr 8, 2025
6fd7425
Modified parser, and flash matching to support merging flashes
Apr 12, 2025
6ed50d6
Remove old flash merge function
Apr 12, 2025
2d654f1
Add option to store information from original flashes into the matche…
Apr 14, 2025
09adc36
Remove unused lines
bear-is-asleep Apr 14, 2025
56a19b5
Update spine/io/parse/optical.py
bear-is-asleep Apr 14, 2025
2f49998
Update spine/data/optical.py
bear-is-asleep Apr 14, 2025
da74198
Remove unused lines
bear-is-asleep Apr 14, 2025
c060fd2
Fix bug with trying to fill values when flash is none
Apr 14, 2025
b95ba60
Merge branch 'feature/bearc_merge_flashes' of https://github.com/bear…
Apr 14, 2025
64925fe
Correctly assign new flash id to flashes in dictionary
Apr 14, 2025
14d072a
Correctly store flash information
Apr 15, 2025
f8d43b6
Update optical display to show PD ID
bear-is-asleep Apr 17, 2025
e1c201b
Updates for using merged flashes
Apr 17, 2025
a0aeacf
Remove print statements
bear-is-asleep Apr 18, 2025
4d59cb0
Remove print statements
bear-is-asleep Apr 18, 2025
73a1962
Add option to specify width of window to merge flashes
May 8, 2025
57d6ced
Merge branch 'feature/bearc_merge_flashes' of https://github.com/bear…
May 8, 2025
2c86707
Fixes bug for computing gradient
bear-is-asleep Jun 27, 2025
c676f52
Add attribute for storing start/end dedx of tracks
Jun 30, 2025
fbce4a6
Add attribute for storing start/end dedx of tracks
Jun 30, 2025
eea655a
Merge branch 'develop' into feature/bearc_merge_flashes
francois-drielsma Jul 2, 2025
2d096c3
Merge pull request #12 from bear-is-asleep/feature/bearc_merge_flashes
francois-drielsma Jul 2, 2025
4d13cfc
Refactoring of the flash merging code
francois-drielsma Jul 15, 2025
804a93f
Merge pull request #101 from francois-drielsma/develop
francois-drielsma Jul 15, 2025
d1ca4e5
Merge pull request #14 from bear-is-asleep/feature/track_dedx
francois-drielsma Jul 15, 2025
3e34041
FlashParser was not on the list of imports...
francois-drielsma Jul 16, 2025
901b7d2
Unified dE/dx computation in a single post-processor
francois-drielsma Jul 16, 2025
94252af
Merge pull request #102 from francois-drielsma/develop
francois-drielsma Jul 16, 2025
8537a5c
Bug fix in feature tensor collation
francois-drielsma Jul 16, 2025
682c8c5
Fallback on average charge when collection charge is missing
francois-drielsma Jul 16, 2025
6380ab0
Merge pull request #103 from francois-drielsma/develop
francois-drielsma Jul 16, 2025
e495c00
Merge pull request #18 from bear-is-asleep/feature/cathode_crosser
francois-drielsma Jul 16, 2025
c160786
Use the cathode thickness to offset objects at the cathode instead of…
francois-drielsma Jul 22, 2025
dd4650e
Move object match reset at the base level
francois-drielsma Jul 22, 2025
c73ec0f
Fix typo
francois-drielsma Jul 22, 2025
8b70030
Merge pull request #104 from francois-drielsma/develop
francois-drielsma Jul 22, 2025
4ad1b90
Merge pull request #16 from bear-is-asleep/patch-3
francois-drielsma Jul 22, 2025
fa41c16
Cosmetic fix
francois-drielsma Jul 22, 2025
7c769d2
Merge pull request #105 from francois-drielsma/develop
francois-drielsma Jul 22, 2025
553f8a2
Merge pull request #19 from bear-is-asleep/patch-2
francois-drielsma Jul 22, 2025
792ef1d
Enable passing hovertext to optical geo drawer
francois-drielsma Jul 23, 2025
08f7eb5
Merge pull request #106 from francois-drielsma/develop
francois-drielsma Jul 23, 2025
e8b1abd
In the CC merging code, prevent particles from entering the physical …
francois-drielsma Jul 29, 2025
f27a630
Move flash PE vector resizing upstream of flash merging code
francois-drielsma Jul 29, 2025
6f2b6a4
Simple logic bug in the flash indexing when merging flashes
francois-drielsma Jul 29, 2025
0f134b7
Removed spurious print statement
francois-drielsma Jul 29, 2025
b018cbe
Fix issue with PE array passed to OpT0Finder
francois-drielsma Jul 30, 2025
3847c8f
Update version.py
francois-drielsma Jul 30, 2025
78ecf0e
Merge pull request #107 from francois-drielsma/develop
francois-drielsma Jul 30, 2025
20a19c2
Update neutrino.py
francois-drielsma Aug 6, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion spine/data/neutrino.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class Neutrino(PosDataBase):
pdg_code : int
PDG code of the neutrino
lepton_pdg_code : int
PDF code of the outgoing lepton
PDG code of the outgoing lepton
current_type : int
Enumerated current type of the neutrino interaction
interaction_mode : int
Expand Down
65 changes: 64 additions & 1 deletion spine/data/optical.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class Flash(PosDataBase):
id : int
Index of the flash in the list
volume_id : int
Index of the optical volume in which the flahs was recorded
Index of the optical volume in which the flash was recorded
time : float
Time with respect to the trigger in microseconds
time_width : float
Expand Down Expand Up @@ -111,3 +111,66 @@ def from_larcv(cls, flash):
time_abs=flash.absTime(), time_width=flash.timeWidth(),
total_pe=flash.TotalPE(), pe_per_ch=pe_per_ch,
center=center, width=width)

def merge(self, other):
"""Merge another flash into this one.

The merging strategy proceeds as follows:
- The earlier flash takes precedence over the later flash as far as
all timing-related information is concerned (flash time, etc.)
- The combined flash centroid is produced by taking the weighted average
of the two existing flash centroids
- The PE values in each light collection system are added together, so
is the total PE value of the combined flash

Parameters
----------
other : Flash
Flash to merge into this one
"""
# Check that the position units are the same
assert self.units == other.units, (
"The units of the flash to be merged do not match.")

# Determine the flash window end points (to merge time widths later)
end_i, end_j = self.time + self.time_width, other.time + other.time_width

# If the other flash happened first, update the timing information
if self.time > other.time:
self.time = other.time
self.time_abs = other.time_abs
self.on_beam_time = other.on_beam_time
self.frame = other.frame
self.in_beam_frame = other.in_beam_frame

# Take the union of the two time widths as the new combined width
self.time_width = max(end_i, end_j) - self.time

# Take the weighted average of the centroids to compute the new one
valid_mask = (self.width > 0.) & (other.width > 0.)

w_i, w_j = 1./self.width**2, 1./other.width**2
self.center = (w_i*self.center + w_j*other.center)/(w_i + w_j)

self.width = 1./np.sqrt(w_i + w_j)
self.width[~valid_mask] = -1.

# Compute the new total PE and fast light component to total ratio
t_i, t_j = self.total_pe, other.total_pe
self.total_pe = t_i + t_j

r_i, r_j = self.fast_to_total, other.fast_to_total
self.fast_to_total = (r_i*t_i + r_j*t_j)/(t_i + t_j)

# Merge the PE count in each PMT
pe_per_ch = np.zeros(
max(len(self.pe_per_ch), len(other.pe_per_ch)),
dtype=self.pe_per_ch.dtype)
pe_per_ch[:len(self.pe_per_ch)] += self.pe_per_ch
pe_per_ch[:len(other.pe_per_ch)] += other.pe_per_ch

self.pe_per_ch = pe_per_ch

# The new volume ID is invalid if the two original volumes differ
if self.volume_id != other.volume_id:
self.volume_id = -1
5 changes: 5 additions & 0 deletions spine/data/out/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,11 @@ class OutBase(PosDataBase):
# Attributes that must not be stored to file when storing lite files
_lite_skip_attrs = ('index',)

def reset_match(self):
"""Resets the reco/truth matching information for the object."""
self.is_matched = False
self.match_ids = np.empty(0, dtype=np.int64)

@property
def size(self):
"""Total number of voxels that make up the object.
Expand Down
22 changes: 22 additions & 0 deletions spine/data/out/interaction.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,28 @@ def __str__(self):

return info

def reset_flash_match(self, typed=True):
"""Reset all the flash matching attributes.

Parameters
----------
typed : bool, default True
If `True`, the underlying arrays are reset to typed empty arrays
"""
self.is_flash_matched = False
self.flash_total_pe = -1.
self.flash_type_pe = -1.
if typed:
self.flash_ids = np.empty(0, dtype=np.int32)
self.flash_volume_ids = np.empty(0, dtype=np.int32)
self.flash_times = np.empty(0, dtype=np.float32)
self.flash_scores = np.empty(0, dtype=np.float32)
else:
self.flash_ids = []
self.flash_volume_ids = []
self.flash_times = []
self.flash_scores = []

@property
def primary_particles(self):
"""List of primary particles associated with this interaction.
Expand Down
10 changes: 3 additions & 7 deletions spine/data/out/particle.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,13 +196,6 @@ def p(self):
def p(self, p):
pass

def unmatch(self):
"""
Unmatch the particle from its reco or truth particle match.
"""
self.match_ids = []
self.is_matched = False


@dataclass(eq=False)
@inherit_docstring(RecoBase, ParticleBase)
Expand All @@ -224,6 +217,8 @@ class RecoParticle(ParticleBase, RecoBase):
interaction vertex position in cm
start_dedx : float
dE/dx around a user-defined neighborhood of the start point in MeV/cm
end_dedx : float
dE/dx around a user-defined neighborhood of the end point in MeV/cm
start_straightness : float
Explained variance ratio of the beginning of the particle
directional_spread : float
Expand All @@ -238,6 +233,7 @@ class RecoParticle(ParticleBase, RecoBase):
ppn_points: np.ndarray = None
vertex_distance: float = -1.
start_dedx: float = -1.
end_dedx: float = -1.
start_straightness: float = -1.
directional_spread: float = -1.
axial_spread: float = -np.inf
Expand Down
2 changes: 1 addition & 1 deletion spine/io/collate.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ def stack_feat_tensors(self, batch, key):
# Dispatch
if not self.split or sources is None:
tensor = np.concatenate([sample[key].features for sample in batch])
counts = [len(sample[key]) for sample in batch]
counts = [len(sample[key].features) for sample in batch]

else:
batch_size = len(batch)
Expand Down
26 changes: 24 additions & 2 deletions spine/io/parse/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
- :class:`Meta2DParser`
- :class:`Meta3DParser`
- :class:`RunInfoParser`
- :class:`OpFlashParser`
- :class:`CRTHitParser`
- :class:`TriggerParser`
"""
Expand All @@ -13,6 +12,7 @@
from spine.data import Meta, RunInfo, Flash, CRTHit, Trigger

from spine.utils.conditional import larcv
from spine.utils.optical import FlashMerger

from .base import ParserBase
from .data import ParserObjectList
Expand Down Expand Up @@ -67,7 +67,7 @@ def __init__(self, projection_id=None, **kwargs):
projection_id : int, optional
Projection ID to get the 2D image from (if fetching from 2D)
**kwargs : dict, optional
Data product arguments to be passed to the `process` function
data product arguments to be passed to the `process` function
"""
# Initialize the parent class
super().__init__(**kwargs)
Expand Down Expand Up @@ -186,6 +186,24 @@ class FlashParser(ParserBase):
# Type of object(s) returned by the parser
returns = 'object_list'

def __init__(self, merge=None, **kwargs):
"""Initialize the flash parser.

Parameters
----------
merge : dict, optional
Flash merging configuration
**kwargs : dict, optional
data product arguments to be passed to the `process` function
"""
# Initialize the parent class
super().__init__(**kwargs)

# Initialize the flash merging class, if needed
self.merger = None
if merge is not None:
self.merger = FlashMerger(**merge)

def __call__(self, trees):
"""Parse one entry.

Expand Down Expand Up @@ -238,6 +256,10 @@ def process(self, flash_event=None, flash_event_list=None):
flashes.append(flash)
idx += 1

# If requested, merge flashes which match in time
if self.merger is not None:
flashes, _ = merger(flashes)

return ParserObjectList(flashes, Flash())


Expand Down
12 changes: 4 additions & 8 deletions spine/io/parse/sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from spine.data import Meta

from spine.utils.globals import GHOST_SHP, SHAPE_PREC
from spine.utils.ghost import compute_rescaled_charge
from spine.utils.ghost import ChargeRescaler
from spine.utils.conditional import larcv

from .base import ParserBase
Expand Down Expand Up @@ -457,9 +457,8 @@ def __init__(self, dtype, collection_only=False, collection_id=2, **kwargs):
# Initialize the parent class
super().__init__(dtype, **kwargs)

# Store the revelant attributes
self.collection_only = collection_only
self.collection_id = collection_id
# Initialize the charge rescaler
self.rescaler = ChargeRescaler(collection_only, collection_id)

def __call__(self, trees):
"""Parse one entry.
Expand Down Expand Up @@ -497,10 +496,7 @@ def process_rescale(self, sparse_event_list):

# Use individual hit informations to compute a rescaled charge
deghost_mask = np.where(tensor.features[:, -1] < GHOST_SHP)[0]
charges = compute_rescaled_charge(
tensor.features[deghost_mask, :-1],
collection_only=self.collection_only,
collection_id=self.collection_id)
charges = self.rescaler.process_single(tensor.features[deghost_mask, :-1])

tensor.features = charges[:, None]

Expand Down
11 changes: 8 additions & 3 deletions spine/model/full_chain.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,14 @@
# TODO: raname it something more generic like ParticleClusterImageClassifier?

from spine.data import TensorBatch, IndexBatch, RunInfo

from spine.utils.logger import logger
from spine.utils.globals import (
COORD_COLS, VALUE_COL, CLUST_COL, SHAPE_COL, SHOWR_SHP, TRACK_SHP,
MICHL_SHP, DELTA_SHP, GHOST_SHP)
from spine.utils.ghost import ChargeRescaler
from spine.utils.calib import CalibrationManager
from spine.utils.ppn import ParticlePointPredictor
from spine.utils.ghost import compute_rescaled_charge_batch
from spine.utils.cluster.label import ClusterLabelAdapter
from spine.utils.gnn.cluster import (
form_clusters_batch, get_cluster_label_batch)
Expand Down Expand Up @@ -164,6 +165,11 @@ def __init__(self, chain, uresnet_deghost=None, uresnet=None,
"`uresnet_deghost` configuration block.")
self.uresnet_deghost = UResNetSegmentation(uresnet_deghost)

# Initialize the charge rescaling process (adapt to ghost predictions)
if self.charge_rescaling is not None:
self.charge_rescaler = ChargeRescaler(
collection_only=self.charge_rescaling == 'collection')

# Initialize the semantic segmentation model (+ point proposal)
if self.segmentation is not None and self.segmentation == 'uresnet':
assert (uresnet is not None) ^ (uresnet_ppn is not None), (
Expand Down Expand Up @@ -363,8 +369,7 @@ def run_deghosting(self, data, sources=None, seg_label=None,

# Rescale the charge, if requested
if self.charge_rescaling is not None:
charges = compute_rescaled_charge_batch(
data_adapt, self.charge_rescaling == 'collection')
charges = self.charge_rescaler(data_adapt)
tensor_deghost = data_adapt.tensor[:, :-6]
tensor_deghost[:, VALUE_COL] = charges
data_adapt.data = tensor_deghost
Expand Down
Loading