From 2149aa08c0e447e95c7bf7150eee7757d860a70c Mon Sep 17 00:00:00 2001 From: Michael Hanna Date: Tue, 21 Oct 2025 12:58:35 +0000 Subject: [PATCH 01/18] Release: 2025-10-21 Squashed changes from arbitrary-logit-attribution branch. --- circuit_tracer/attribution/attribute.py | 30 +++++++++++++++------- circuit_tracer/graph.py | 4 +-- circuit_tracer/utils/create_graph_files.py | 15 +++++++++-- 3 files changed, 36 insertions(+), 13 deletions(-) diff --git a/circuit_tracer/attribution/attribute.py b/circuit_tracer/attribution/attribute.py index 72431267..9d1e98f9 100644 --- a/circuit_tracer/attribution/attribute.py +++ b/circuit_tracer/attribution/attribute.py @@ -93,6 +93,7 @@ def attribute( prompt: str | torch.Tensor | list[int], model: ReplacementModel, *, + quantity_to_attribute: list[tuple[str, float, torch.Tensor]] | None = None, max_n_logits: int = 10, desired_logit_prob: float = 0.95, batch_size: int = 512, @@ -106,6 +107,9 @@ def attribute( Args: prompt: Text, token ids, or tensor - will be tokenized if str. model: Frozen ``ReplacementModel`` + quantity_to_attribute: Custom target nodes for attribution. Each tuple contains + (token_id, probability, vector). If None, automatically + selects top logits based on desired_logit_prob. max_n_logits: Max number of logit nodes. desired_logit_prob: Keep logits until cumulative prob >= this value. batch_size: How many source nodes to process per backward pass. @@ -137,6 +141,7 @@ def attribute( return _run_attribution( model=model, prompt=prompt, + quantity_to_attribute=quantity_to_attribute, max_n_logits=max_n_logits, desired_logit_prob=desired_logit_prob, batch_size=batch_size, @@ -158,6 +163,7 @@ def attribute( def _run_attribution( model, prompt, + quantity_to_attribute, max_n_logits, desired_logit_prob, batch_size, @@ -201,15 +207,21 @@ def _run_attribution( n_layers, n_pos, _ = activation_matrix.shape total_active_feats = activation_matrix._nnz() - logit_idx, logit_p, logit_vecs = compute_salient_logits( - ctx.logits[0, -1], - model.unembed.W_U, - max_n_logits=max_n_logits, - desired_logit_prob=desired_logit_prob, - ) - logger.info( - f"Selected {len(logit_idx)} logits with cumulative probability {logit_p.sum().item():.4f}" - ) + if quantity_to_attribute is not None: + logit_idx, logit_p, logit_vecs = zip(*quantity_to_attribute) + logit_p = torch.tensor(logit_p) + logit_vecs = torch.stack(logit_vecs) + else: + logit_idx, logit_p, logit_vecs = compute_salient_logits( + ctx.logits[0, -1], + model.unembed.W_U, + max_n_logits=max_n_logits, + desired_logit_prob=desired_logit_prob, + ) + logger.info( + f"Selected {len(logit_idx)} logits with cumulative probability \ + {logit_p.sum().item():.4f}" + ) if offload: offload_handles += offload_modules([model.unembed, model.embed], offload) diff --git a/circuit_tracer/graph.py b/circuit_tracer/graph.py index 5f1a391d..47076a88 100644 --- a/circuit_tracer/graph.py +++ b/circuit_tracer/graph.py @@ -7,7 +7,7 @@ class Graph: input_string: str input_tokens: torch.Tensor - logit_tokens: torch.Tensor + logit_tokens: torch.Tensor | list[str] active_features: torch.Tensor adjacency_matrix: torch.Tensor selected_features: torch.Tensor @@ -23,7 +23,7 @@ def __init__( active_features: torch.Tensor, adjacency_matrix: torch.Tensor, cfg: HookedTransformerConfig, - logit_tokens: torch.Tensor, + logit_tokens: torch.Tensor | list[str], logit_probabilities: torch.Tensor, selected_features: torch.Tensor, activation_values: torch.Tensor, diff --git a/circuit_tracer/utils/create_graph_files.py b/circuit_tracer/utils/create_graph_files.py index 2258f866..33e1faea 100644 --- a/circuit_tracer/utils/create_graph_files.py +++ b/circuit_tracer/utils/create_graph_files.py @@ -32,6 +32,7 @@ def create_nodes(graph: Graph, node_mask, tokenizer, cumulative_scores): layers = graph.cfg.n_layers error_end_idx = n_features + graph.n_pos * layers token_end_idx = error_end_idx + len(graph.input_tokens) + logit_node_counter = 0 for node_idx in node_mask.nonzero().squeeze().tolist(): if node_idx in range(n_features): @@ -53,10 +54,20 @@ def create_nodes(graph: Graph, node_mask, tokenizer, cumulative_scores): ) elif node_idx in range(token_end_idx, len(cumulative_scores)): pos = node_idx - token_end_idx + + logit_token = graph.logit_tokens[pos] + if isinstance(logit_token, torch.Tensor): + vocab_idx = logit_token + token = tokenizer.decode(logit_token) + else: + token = logit_token + vocab_idx = logit_node_counter + logit_node_counter += 1 + nodes[node_idx] = Node.logit_node( pos=graph.n_pos - 1, - vocab_idx=graph.logit_tokens[pos], - token=tokenizer.decode(graph.logit_tokens[pos]), + vocab_idx=vocab_idx, + token=tokenizer.decode(token), target_logit=pos == 0, token_prob=graph.logit_probabilities[pos].item(), num_layers=layers, From b6bf15ffd2e279780f4768cd438c852f024f4051 Mon Sep 17 00:00:00 2001 From: Daniel Dale Date: Wed, 5 Nov 2025 16:01:14 -0800 Subject: [PATCH 02/18] A proposed API enhancement (`AttributionTargets`) that encapsulates the functionality proposed in PR https://github.com/safety-research/circuit-tracer/pull/44 and provides a unified foundation for future attribution target enhancements. --- circuit_tracer/attribution/attribute.py | 96 ++--- circuit_tracer/attribution/targets.py | 432 +++++++++++++++++++++ circuit_tracer/graph.py | 123 +++++- circuit_tracer/utils/create_graph_files.py | 15 +- demos/attribute_demo.ipynb | 4 +- tests/test_attribution_clt.py | 2 +- tests/test_attribution_targets.py | 384 ++++++++++++++++++ tests/test_attributions_gemma.py | 4 +- tests/test_graph.py | 157 +++++++- 9 files changed, 1127 insertions(+), 90 deletions(-) create mode 100644 circuit_tracer/attribution/targets.py create mode 100644 tests/test_attribution_targets.py diff --git a/circuit_tracer/attribution/attribute.py b/circuit_tracer/attribution/attribute.py index 9d1e98f9..d0698253 100644 --- a/circuit_tracer/attribution/attribute.py +++ b/circuit_tracer/attribution/attribute.py @@ -27,45 +27,13 @@ import torch from tqdm import tqdm +from circuit_tracer.attribution.targets import AttributionTargets from circuit_tracer.graph import Graph from circuit_tracer.replacement_model import ReplacementModel from circuit_tracer.utils import get_default_device from circuit_tracer.utils.disk_offload import offload_modules -@torch.no_grad() -def compute_salient_logits( - logits: torch.Tensor, - unembed_proj: torch.Tensor, - *, - max_n_logits: int = 10, - desired_logit_prob: float = 0.95, -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """Pick the smallest logit set whose cumulative prob >= *desired_logit_prob*. - - Args: - logits: ``(d_vocab,)`` vector (single position). - unembed_proj: ``(d_model, d_vocab)`` unembedding matrix. - max_n_logits: Hard cap *k*. - desired_logit_prob: Cumulative probability threshold *p*. - - Returns: - Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - * logit_indices - ``(k,)`` vocabulary ids. - * logit_probs - ``(k,)`` softmax probabilities. - * demeaned_vecs - ``(k, d_model)`` unembedding columns, demeaned. - """ - - probs = torch.softmax(logits, dim=-1) - top_p, top_idx = torch.topk(probs, max_n_logits) - cutoff = int(torch.searchsorted(torch.cumsum(top_p, 0), desired_logit_prob)) + 1 - top_p, top_idx = top_p[:cutoff], top_idx[:cutoff] - - cols = unembed_proj[:, top_idx] - demeaned = cols - unembed_proj.mean(dim=-1, keepdim=True) - return top_idx, top_p, demeaned.T - - def compute_partial_influences(edge_matrix, logit_p, row_to_node_index, max_iter=128, device=None): """Compute partial influences using power iteration method.""" device = device or get_default_device() @@ -93,7 +61,9 @@ def attribute( prompt: str | torch.Tensor | list[int], model: ReplacementModel, *, - quantity_to_attribute: list[tuple[str, float, torch.Tensor]] | None = None, + attribution_targets: ( + list[tuple[str, float, torch.Tensor] | int | str] | torch.Tensor | None + ) = None, max_n_logits: int = 10, desired_logit_prob: float = 0.95, batch_size: int = 512, @@ -107,11 +77,19 @@ def attribute( Args: prompt: Text, token ids, or tensor - will be tokenized if str. model: Frozen ``ReplacementModel`` - quantity_to_attribute: Custom target nodes for attribution. Each tuple contains - (token_id, probability, vector). If None, automatically - selects top logits based on desired_logit_prob. - max_n_logits: Max number of logit nodes. - desired_logit_prob: Keep logits until cumulative prob >= this value. + attribution_targets: Flexible attribution target specification in one of several formats: + - None: Auto-select salient logits based on probability threshold + - torch.Tensor: Tensor of token indices + - list[tuple[str, float, torch.Tensor] | int | str]: List where + each element can be: + * int or str: Token ID/string (auto-computes probability & vector, + returns tensor of indices) + * tuple[str, float, torch.Tensor]: Fully specified logit spec with + arbitrary string tokens (or functions thereof) that may not be in + vocabulary + max_n_logits: Max number of logit nodes (used when attribution_targets is None). + desired_logit_prob: Keep logits until cumulative prob >= this value + (used when attribution_targets is None). batch_size: How many source nodes to process per backward pass. max_feature_nodes: Max number of feature nodes to include in the graph. offload: Method for offloading model parameters to save memory. @@ -141,7 +119,7 @@ def attribute( return _run_attribution( model=model, prompt=prompt, - quantity_to_attribute=quantity_to_attribute, + attribution_targets=attribution_targets, max_n_logits=max_n_logits, desired_logit_prob=desired_logit_prob, batch_size=batch_size, @@ -163,7 +141,7 @@ def attribute( def _run_attribution( model, prompt, - quantity_to_attribute, + attribution_targets, max_n_logits, desired_logit_prob, batch_size, @@ -207,27 +185,26 @@ def _run_attribution( n_layers, n_pos, _ = activation_matrix.shape total_active_feats = activation_matrix._nnz() - if quantity_to_attribute is not None: - logit_idx, logit_p, logit_vecs = zip(*quantity_to_attribute) - logit_p = torch.tensor(logit_p) - logit_vecs = torch.stack(logit_vecs) - else: - logit_idx, logit_p, logit_vecs = compute_salient_logits( - ctx.logits[0, -1], - model.unembed.W_U, - max_n_logits=max_n_logits, - desired_logit_prob=desired_logit_prob, - ) + targets = AttributionTargets( + attribution_targets=attribution_targets, + logits=ctx.logits[0, -1], + unembed_proj=model.unembed.W_U, + tokenizer=model.tokenizer, + max_n_logits=max_n_logits, + desired_logit_prob=desired_logit_prob, + ) + + if attribution_targets is None: logger.info( - f"Selected {len(logit_idx)} logits with cumulative probability \ - {logit_p.sum().item():.4f}" + f"Selected {len(targets)} logits with cumulative probability " + f"{targets.logit_probabilities.sum().item():.4f}" ) if offload: offload_handles += offload_modules([model.unembed, model.embed], offload) logit_offset = len(feat_layers) + (n_layers + 1) * n_pos - n_logits = len(logit_idx) + n_logits = len(targets) total_nodes = logit_offset + n_logits max_feature_nodes = min(max_feature_nodes or total_active_feats, total_active_feats) @@ -242,8 +219,8 @@ def _run_attribution( # Phase 3: logit attribution logger.info("Phase 3: Computing logit attributions") phase_start = time.time() - for i in range(0, len(logit_idx), batch_size): - batch = logit_vecs[i : i + batch_size] + for i in range(0, len(targets), batch_size): + batch = targets.logit_vectors[i : i + batch_size] rows = ctx.compute_batch( layers=torch.full((batch.shape[0],), n_layers), positions=torch.full((batch.shape[0],), n_pos - 1), @@ -269,7 +246,7 @@ def _run_attribution( pending = torch.arange(total_active_feats) else: influences = compute_partial_influences( - edge_matrix[:st], logit_p, row_to_node_index[:st] + edge_matrix[:st], targets.logit_probabilities, row_to_node_index[:st] ) feature_rank = torch.argsort(influences[:total_active_feats], descending=True).cpu() queue_size = min(update_interval * batch_size, max_feature_nodes - n_visited) @@ -314,8 +291,7 @@ def _run_attribution( graph = Graph( input_string=model.tokenizer.decode(input_ids), input_tokens=input_ids, - logit_tokens=logit_idx, - logit_probabilities=logit_p, + attribution_targets=targets, active_features=activation_matrix.indices().T, activation_values=activation_matrix.values(), selected_features=selected_features, diff --git a/circuit_tracer/attribution/targets.py b/circuit_tracer/attribution/targets.py new file mode 100644 index 00000000..b46c6d05 --- /dev/null +++ b/circuit_tracer/attribution/targets.py @@ -0,0 +1,432 @@ +"""Attribution target specification and processing. + +This module provides the AttributionTargets container class and LogitTarget record +structure for specifying and processing attribution targets in the format required +for attribution graph computation. + +Key concepts: +- AttributionTargets: High-level container that encapsulates target specifications +- LogitTarget: Low-level data transfer object (DTO) storing token metadata +- Virtual indices: Technique for representing out-of-vocabulary (OOV) tokens using + synthetic indices >= vocab_size. Required to support arbitrary string token (or functions thereof) + attribution functionality. +""" + +from collections.abc import Sequence +from typing import NamedTuple + +import torch + + +class LogitTarget(NamedTuple): + """Data transfer object (DTO) for logit attribution targets. + + A lightweight record structure containing token metadata for attribution. + + Attributes: + token_str: String representation of the token (decoded from vocab or arbitrary) + vocab_idx: Vocabulary index - either a real token ID (< vocab_size) or + a virtual index for OOV tokens (>= vocab_size) + """ + + token_str: str + vocab_idx: int + + +class AttributionTargets: + """Container for processed attribution target specifications. + + High-level data structure that encapsulates target identifiers, softmax probabilities, + and demeaned unembedding vectors needed for attribution graph computation. + + Supports multiple input formats for flexible target specification: + - None: Auto-select salient logits by probability threshold + - torch.Tensor: Specific vocabulary indices (i.e. token_ids) + - list: Mixed targets (tuples for OOV tokens, ints/strs for valid token_ids) + + Attributes: + logit_targets: List of LogitTarget records with token strings and vocab indices + logit_probabilities: Softmax probabilities for each target (k,) + logit_vectors: Demeaned unembedding vectors (k, d_model) + """ + + def __init__( + self, + attribution_targets: ( + Sequence[tuple[str, float, torch.Tensor] | int | str] | torch.Tensor | None + ), + logits: torch.Tensor, + unembed_proj: torch.Tensor, + tokenizer, + *, + max_n_logits: int = 10, + desired_logit_prob: float = 0.95, + ): + """Build attribution targets from user specification. + + Args: + attribution_targets: Target specification in one of several formats: + - None: Auto-select salient logits based on probability threshold + - torch.Tensor: Tensor of vocabulary token IDs + - list[tuple[str, float, torch.Tensor] | int | str]: List where + each element can be: + * int or str: Token ID/string (auto-computes probability & vector) + * tuple[str, float, torch.Tensor]: Fully specified target logit with arbitrary + string token (or function thereof) (may use virtual index for OOV tokens) + logits: ``(d_vocab,)`` logit vector for single position + unembed_proj: ``(d_model, d_vocab)`` unembedding matrix + tokenizer: Tokenizer for string→int conversion + max_n_logits: Max targets when auto-selecting (salient mode) + desired_logit_prob: Probability threshold for salient mode + """ + # Store tokenizer ref for decoding vocab indices to token strings + self.tokenizer = tokenizer + ctor_shared = {"logits": logits, "unembed_proj": unembed_proj, "tokenizer": tokenizer} + + # Dispatch to appropriate constructor based on input type + if attribution_targets is None: + salient_ctor = {"max_n_logits": max_n_logits, "desired_logit_prob": desired_logit_prob} + attr_spec = self._from_salient(**salient_ctor, **ctor_shared) + elif isinstance(attribution_targets, torch.Tensor): + attr_spec = self._from_indices(indices=attribution_targets, **ctor_shared) + elif isinstance(attribution_targets, list): + if not attribution_targets: + raise ValueError("attribution_targets list cannot be empty") + attr_spec = self._from_list(target_list=attribution_targets, **ctor_shared) + else: + raise TypeError( + f"attribution_targets must be None, torch.Tensor, or list, " + f"got {type(attribution_targets)}" + ) + self.logit_targets, self.logit_probabilities, self.logit_vectors = attr_spec + + def __len__(self) -> int: + """Number of attribution targets.""" + return len(self.logit_targets) + + def __repr__(self) -> str: + """String representation showing key info.""" + if len(self.logit_targets) > 3: + targets_preview = self.logit_targets[:3] + suffix = "..." + else: + targets_preview = self.logit_targets + suffix = "" + return f"AttributionTargets(n={len(self)}, targets={targets_preview}{suffix})" + + @property + def tokens(self) -> list[str]: + """Get token strings for all targets. + + Returns: + List of token strings (decoded vocab tokens or arbitrary strings) + """ + return [target.token_str for target in self.logit_targets] + + @property + def vocab_size(self) -> int: + """Vocabulary size from the tokenizer. + + Returns: + Vocabulary size for determining virtual vs real indices + """ + return self.tokenizer.vocab_size + + @property + def vocab_indices(self) -> list[int]: + """All vocabulary indices including virtual indices (>= vocab_size). + Vocab indices are a generalization of token IDs that can represent: + - Real vocab indices (< vocab_size) for token_ids valid in the current tokenizer vocab space + - Virtual indices (>= vocab_size) for arbitrary string tokens (or functions thereof) + + Use has_virtual_indices to check if any virtual indices are present. + Use token_ids to get a tensor of only real vocabulary indices. + + Returns: + List of vocabulary indices (including virtual indices) + """ + return [target.vocab_idx for target in self.logit_targets] + + @property + def has_virtual_indices(self) -> bool: + """Check if any targets use virtual indices (OOV tokens). + + Virtual indices (vocab_idx >= vocab_size) are a technique for representing + arbitrary string tokens not in the model's vocabulary. + + Returns: + True if virtual indices are present, False otherwise + """ + vocab_size = self.tokenizer.vocab_size + return any(t.vocab_idx >= vocab_size for t in self.logit_targets) + + @property + def token_ids(self) -> torch.Tensor: + """Tensor of valid vocabulary indices (< vocab_size only). + + Returns a torch.Tensor of vocab indices on the same device as other tensors, + suitable for indexing into logit vectors or embeddings. This property will + raise a ValueError if any targets use virtual indices (arbitrary strings). + + Raises: + ValueError: If any targets have virtual indices (vocab_idx >= vocab_size) + + Returns: + torch.Tensor: Long tensor of vocabulary indices + """ + if self.has_virtual_indices: + raise ValueError( + "Cannot create token_ids tensor: some targets use virtual indices " + "(arbitrary strings not in vocabulary). Check has_virtual_indices " + "before accessing token_ids, or use vocab_indices to get all indices." + ) + return torch.tensor( + self.vocab_indices, dtype=torch.long, device=self.logit_probabilities.device + ) + + def to(self, device: str | torch.device) -> "AttributionTargets": + """Transfer AttributionTargets to specified device. + + Only moves torch.Tensor attributes (logit_probabilities, logit_vectors); + logit_targets list stays unchanged. + + Args: + device: Target device (e.g., "cuda", "cpu") + + Returns: + Self with tensors on new device + """ + self.logit_probabilities = self.logit_probabilities.to(device) + self.logit_vectors = self.logit_vectors.to(device) + return self + + @staticmethod + def _from_salient( + logits: torch.Tensor, + unembed_proj: torch.Tensor, + max_n_logits: int, + desired_logit_prob: float, + tokenizer, + ) -> tuple[list[LogitTarget], torch.Tensor, torch.Tensor]: + """Auto-select salient logits by cumulative probability. + + Picks the smallest set of logits whose cumulative probability + exceeds the threshold, up to max_n_logits. + + Args: + logits: ``(d_vocab,)`` logit vector + unembed_proj: ``(d_model, d_vocab)`` unembedding matrix + max_n_logits: Hard cap on number of logits + desired_logit_prob: Cumulative probability threshold + tokenizer: Tokenizer for decoding vocab indices to strings + + Returns: + Tuple of (logit_targets, probabilities, vectors) where logit_targets + contains LogitTarget instances with actual vocab indices + """ + probs = torch.softmax(logits, dim=-1) + top_p, top_idx = torch.topk(probs, max_n_logits) + cutoff = int(torch.searchsorted(torch.cumsum(top_p, 0), desired_logit_prob)) + 1 + indices, probs, vecs = AttributionTargets._compute_logit_vecs( + top_idx[:cutoff], logits, unembed_proj + ) + logit_targets = [ + LogitTarget(token_str=tokenizer.decode(idx), vocab_idx=idx) for idx in indices.tolist() + ] + return logit_targets, probs, vecs + + @staticmethod + def _from_indices( + indices: torch.Tensor, + logits: torch.Tensor, + unembed_proj: torch.Tensor, + tokenizer, + ) -> tuple[list[LogitTarget], torch.Tensor, torch.Tensor]: + """Construct from specific vocabulary indices. + + Args: + indices: ``(k,)`` tensor of vocabulary indices + logits: ``(d_vocab,)`` logit vector + unembed_proj: ``(d_model, d_vocab)`` unembedding matrix + tokenizer: Tokenizer for decoding vocab indices to strings + + Returns: + Tuple of (logit_targets, probabilities, vectors) where logit_targets + contains LogitTarget instances with actual vocab indices + + Raises: + ValueError: If any index is out of vocabulary range + """ + vocab_size = logits.shape[0] + + # Validate all indices are within vocab range + if (indices < 0).any() or (indices >= vocab_size).any(): + invalid = indices[(indices < 0) | (indices >= vocab_size)] + raise ValueError( + f"Token indices must be in range [0, {vocab_size}), " + f"but found invalid indices: {invalid.tolist()}" + ) + + indices_out, probs, vecs = AttributionTargets._compute_logit_vecs( + indices, logits, unembed_proj + ) + + # Create LogitTarget instances with decoded token strings + logit_targets = [ + LogitTarget(token_str=tokenizer.decode(idx), vocab_idx=idx) + for idx in indices_out.tolist() + ] + return logit_targets, probs, vecs + + @staticmethod + def _from_list( + target_list: Sequence[tuple[str, float, torch.Tensor] | int | str], + logits: torch.Tensor, + unembed_proj: torch.Tensor, + tokenizer, + ) -> tuple[list[LogitTarget], torch.Tensor, torch.Tensor]: + """Construct from mixed list of targets. + + Supports heterogeneous list where each element can be: + - int: Vocabulary index (auto-compute prob/vec) + - str: Token string (tokenize, auto-compute) + - tuple[str, float, Tensor]: Fully specified arbitrary string or function thereof + + Args: + targets: List of mixed target specifications + logits: ``(d_vocab,)`` logit vector + unembed_proj: ``(d_model, d_vocab)`` unembedding matrix + tokenizer: Tokenizer for string→int conversion + + Returns: + Tuple of (logit_targets, probabilities, vectors) + """ + return AttributionTargets._process_target_list(target_list, logits, unembed_proj, tokenizer) + + @staticmethod + def _compute_logit_vecs( + indices: torch.Tensor, + logits: torch.Tensor, + unembed_proj: torch.Tensor, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Compute probabilities and demeaned vectors for indices. + + Args: + indices: ``(k,)`` vocabulary indices to compute vectors for + logits: ``(d_vocab,)`` logit vector for single position + unembed_proj: ``(d_model, d_vocab)`` unembedding matrix + + Returns: + Tuple of: + * indices - ``(k,)`` vocabulary ids (same as input) + * probabilities - ``(k,)`` softmax probabilities + * demeaned_vecs - ``(k, d_model)`` unembedding columns, demeaned + """ + probs = torch.softmax(logits, dim=-1) + selected_probs = probs[indices] + cols = unembed_proj[:, indices] + demeaned = cols - unembed_proj.mean(dim=-1, keepdim=True) + return indices, selected_probs, demeaned.T + + @staticmethod + def _process_target_list( + targets: Sequence[tuple[str, float, torch.Tensor] | int | str], + logits: torch.Tensor, + unembed_proj: torch.Tensor, + tokenizer, + ) -> tuple[list[LogitTarget], torch.Tensor, torch.Tensor]: + """Process mixed target list into LogitTarget instances, probabilities, vectors. + + Supports flexible mixed-mode targets where each element can be: + - int: Token ID (computes probability and vector, uses actual vocab index) + - str: Token string (tokenizes, computes probability and vector, uses actual token_id) + - tuple[str, float, torch.Tensor]: Arbitrary string or function thereof with custom prob/vec + (uses virtual index) + + Args: + targets: List of attribution targets in any combination of the above formats + logits: ``(d_vocab,)`` vector for computing probabilities + unembed_proj: ``(d_model, d_vocab)`` unembedding matrix for computing vectors + tokenizer: Tokenizer to use for string token conversion and to get vocab_size + + Returns: + Tuple of: + * logit_targets - List of LogitTarget instances where: + - For int/str tokens: vocab_idx is actual vocab index, token_str is decoded + - For tuple targets: vocab_idx is virtual (vocab_size + position), + token_str is the arbitrary string or function thereof + * probabilities - ``(k,)`` probabilities + * vectors - ``(k, d_model)`` demeaned vectors + + Raises: + ValueError: If str token cannot be encoded or int token is out of vocab range + """ + vocab_size = logits.shape[0] + + def validate_token_id(token_id: int, original_token: int | str) -> None: + """Validate that token_id is within valid vocabulary range.""" + if not (0 <= token_id < vocab_size): + raise ValueError( + f"Token {original_token!r} resolved to index {token_id}, which is " + f"out of vocabulary range [0, {vocab_size})" + ) + + def token_to_idx(token: int | str) -> int: + """Convert token (int or str) to token index with validation.""" + if isinstance(token, str): + try: + ids = tokenizer.encode(token, add_special_tokens=False) + except Exception as e: + raise ValueError( + f"Failed to encode string token {token!r} using tokenizer: {e}" + ) from e + + if not ids: + raise ValueError( + f"String token {token!r} encoded to empty token sequence. " + f"Cannot determine valid token ID." + ) + + token_id = ids[-1] + validate_token_id(token_id, token) + return token_id + else: + validate_token_id(token, token) + return token + + logit_targets, probs, vecs = [], [], [] + + for position, target in enumerate(targets): + if isinstance(target, tuple): + # Fully specified tuple: (str_token, probability, vector) + # This is an arbitrary string or function of one, so we use virtual indices + if len(target) != 3: + raise ValueError( + f"Tuple targets must have exactly 3 elements " + f"(token_str, probability, vector), got {len(target)}" + ) + token_str, prob, vec = target + if not isinstance(token_str, str): + raise ValueError( + f"Tuple targets must have str as first element, got {type(token_str)}" + ) + + # Use virtual index for arbitrary string/function thereof + virtual_idx = vocab_size + position + logit_targets.append(LogitTarget(token_str=token_str, vocab_idx=virtual_idx)) + probs.append(prob) + vecs.append(vec) + else: + # Single token (int | str) - compute probability and vector, use valid token_ids + idx = token_to_idx(target) + idx_tensor = torch.tensor([idx], dtype=torch.long) + _, prob_tensor, vec_tensor = AttributionTargets._compute_logit_vecs( + idx_tensor, logits, unembed_proj + ) + + token_str = tokenizer.decode(idx) + logit_targets.append(LogitTarget(token_str=token_str, vocab_idx=idx)) + probs.append(prob_tensor.item()) + vecs.append(vec_tensor.squeeze(0)) + + return logit_targets, torch.tensor(probs), torch.stack(vecs) diff --git a/circuit_tracer/graph.py b/circuit_tracer/graph.py index 47076a88..d4fd7bcf 100644 --- a/circuit_tracer/graph.py +++ b/circuit_tracer/graph.py @@ -1,20 +1,27 @@ +"""Graph data structures for attribution results.""" + from typing import NamedTuple +import warnings import torch from transformer_lens import HookedTransformerConfig +from circuit_tracer.attribution.targets import AttributionTargets, LogitTarget + class Graph: input_string: str input_tokens: torch.Tensor - logit_tokens: torch.Tensor | list[str] + logit_targets: list[LogitTarget] active_features: torch.Tensor adjacency_matrix: torch.Tensor selected_features: torch.Tensor activation_values: torch.Tensor logit_probabilities: torch.Tensor + vocab_size: int cfg: HookedTransformerConfig scan: str | list[str] | None + n_pos: int def __init__( self, @@ -23,11 +30,13 @@ def __init__( active_features: torch.Tensor, adjacency_matrix: torch.Tensor, cfg: HookedTransformerConfig, - logit_tokens: torch.Tensor | list[str], - logit_probabilities: torch.Tensor, selected_features: torch.Tensor, activation_values: torch.Tensor, scan: str | list[str] | None = None, + attribution_targets: AttributionTargets | None = None, + logit_targets: list[LogitTarget] | torch.Tensor | None = None, + logit_probabilities: torch.Tensor | None = None, + vocab_size: int | None = None, ): """ A graph object containing the adjacency matrix describing the direct effect of each @@ -39,30 +48,62 @@ def __init__( Args: input_string (str): The input string attributed. - input_tokens (List[str]): The input tokens attributed. + input_tokens (torch.Tensor): The input tokens attributed. active_features (torch.Tensor): A tensor of shape (n_active_features, 3) containing the indices (layer, pos, feature_idx) of the non-zero features of the model on the given input string. adjacency_matrix (torch.Tensor): The adjacency matrix. Organized as [active_features, error_nodes, embed_nodes, logit_nodes], where there are model.cfg.n_layers * len(input_tokens) error nodes, len(input_tokens) embed - nodes, len(logit_tokens) logit nodes. The rows represent target nodes, while + nodes, len(logit_targets) logit nodes. The rows represent target nodes, while columns represent source nodes. cfg (HookedTransformerConfig): The cfg of the model. - logit_tokens (List[str]): The logit tokens attributed from. - logit_probabilities (torch.Tensor): The probabilities of each logit token, given - the input string. + selected_features (torch.Tensor): Indices into active_features for selected nodes. + activation_values (torch.Tensor): Activation values for selected features. scan (Optional[Union[str,List[str]]], optional): The identifier of the transcoders used in the graph. Without a scan, the graph cannot be uploaded (since we won't know what transcoders were used). Defaults to None + attribution_targets (Optional[AttributionTargets]): Attribution targets container. + When provided, logit_targets, logit_probabilities, and vocab_size are + extracted from it. + logit_targets (Optional[Union[List[LogitTarget], torch.Tensor]]): Either a list + of LogitTarget records or a tensor of token_ids. When using tensor + format, token_str fields will be empty strings. + logit_probabilities (Optional[torch.Tensor]): Logit probabilities. Required if + attribution_targets is not provided. + vocab_size (Optional[int]): Vocabulary size for determining virtual indices. + If not provided, defaults to cfg.d_vocab. """ + if attribution_targets is not None: + if logit_targets is not None or logit_probabilities is not None: + raise ValueError( + "Cannot specify both attribution_targets and " + "(logit_targets, logit_probabilities). Use one or the other." + ) + self.logit_targets = attribution_targets.logit_targets + self.logit_probabilities = attribution_targets.logit_probabilities + self.vocab_size = attribution_targets.vocab_size + elif logit_targets is not None and logit_probabilities is not None: + if isinstance(logit_targets, torch.Tensor): + # When reconstructing from tensor, token_str is not available + self.logit_targets = [ + LogitTarget(token_str="", vocab_idx=int(idx)) for idx in logit_targets.tolist() + ] + else: + self.logit_targets = logit_targets + self.logit_probabilities = logit_probabilities + self.vocab_size = vocab_size if vocab_size is not None else cfg.d_vocab + else: + raise ValueError( + "Must provide either attribution_targets or both logit_targets and " + "logit_probabilities" + ) + self.input_string = input_string self.adjacency_matrix = adjacency_matrix self.cfg = cfg self.n_pos = len(input_tokens) self.active_features = active_features - self.logit_tokens = logit_tokens - self.logit_probabilities = logit_probabilities self.input_tokens = input_tokens if scan is None: print("Graph loaded without scan to identify it. Uploading will not be possible.") @@ -78,9 +119,62 @@ def to(self, device): """ self.adjacency_matrix = self.adjacency_matrix.to(device) self.active_features = self.active_features.to(device) - self.logit_tokens = self.logit_tokens.to(device) + # logit_targets is list[LogitTarget], no device transfer needed self.logit_probabilities = self.logit_probabilities.to(device) + @property + def vocab_indices(self) -> list[int]: + """All vocabulary indices including virtual indices (>= vocab_size). + + Provides the same interface as AttributionTargets.vocab_indices. + """ + return [target.vocab_idx for target in self.logit_targets] + + @property + def has_virtual_indices(self) -> bool: + """Check if any targets use virtual indices (OOV tokens). + + Virtual indices (vocab_idx >= vocab_size) are a technique used to represent + arbitrary string tokens (or functions thereof) not in the tokenizer's vocabulary. + """ + return any(t.vocab_idx >= self.vocab_size for t in self.logit_targets) + + @property + def logit_token_ids(self) -> torch.Tensor: + """Tensor of logit target token IDs (< vocab_size only). + + Returns token IDs for logit targets on the same device as other graph tensors. + Provides the same interface as AttributionTargets.token_ids. + + Raises: + ValueError: If any targets have virtual indices + """ + if self.has_virtual_indices: + raise ValueError( + "Cannot create logit_token_ids tensor: some targets use virtual indices. " + "Use vocab_indices to get all indices including virtual ones." + ) + return torch.tensor( + self.vocab_indices, dtype=torch.long, device=self.logit_probabilities.device + ) + + @property + def logit_tokens(self) -> torch.Tensor: + """Get logit target token IDs tensor (legacy compatibility). + + .. deprecated:: + Use `logit_token_ids` property instead. This is an alias for backward compatibility. + + Raises: + ValueError: If any targets have virtual indices + """ + warnings.warn( + "logit_tokens property is deprecated. Use logit_token_ids property instead.", + DeprecationWarning, + stacklevel=2, + ) + return self.logit_token_ids + def to_pt(self, path: str): """Saves the graph at the given path @@ -92,8 +186,9 @@ def to_pt(self, path: str): "adjacency_matrix": self.adjacency_matrix, "cfg": self.cfg, "active_features": self.active_features, - "logit_tokens": self.logit_tokens, + "logit_targets": self.logit_targets, "logit_probabilities": self.logit_probabilities, + "vocab_size": self.vocab_size, "input_tokens": self.input_tokens, "selected_features": self.selected_features, "activation_values": self.activation_values, @@ -194,7 +289,7 @@ def prune_graph( # Extract dimensions n_tokens = len(graph.input_tokens) - n_logits = len(graph.logit_tokens) + n_logits = len(graph.logit_targets) n_features = len(graph.selected_features) logit_weights = torch.zeros( @@ -271,7 +366,7 @@ def compute_graph_scores(graph: Graph) -> tuple[float, float]: reconstruction where all computation flows through interpretable features. Lower scores indicate more reliance on error nodes, suggesting incomplete feature coverage. """ - n_logits = len(graph.logit_tokens) + n_logits = len(graph.logit_targets) n_tokens = len(graph.input_tokens) n_features = len(graph.selected_features) error_start = n_features diff --git a/circuit_tracer/utils/create_graph_files.py b/circuit_tracer/utils/create_graph_files.py index 33e1faea..bdcff6db 100644 --- a/circuit_tracer/utils/create_graph_files.py +++ b/circuit_tracer/utils/create_graph_files.py @@ -32,7 +32,6 @@ def create_nodes(graph: Graph, node_mask, tokenizer, cumulative_scores): layers = graph.cfg.n_layers error_end_idx = n_features + graph.n_pos * layers token_end_idx = error_end_idx + len(graph.input_tokens) - logit_node_counter = 0 for node_idx in node_mask.nonzero().squeeze().tolist(): if node_idx in range(n_features): @@ -55,19 +54,15 @@ def create_nodes(graph: Graph, node_mask, tokenizer, cumulative_scores): elif node_idx in range(token_end_idx, len(cumulative_scores)): pos = node_idx - token_end_idx - logit_token = graph.logit_tokens[pos] - if isinstance(logit_token, torch.Tensor): - vocab_idx = logit_token - token = tokenizer.decode(logit_token) - else: - token = logit_token - vocab_idx = logit_node_counter - logit_node_counter += 1 + # vocab_idx can be either a valid token_id (< vocab_size) or a virtual + # index (>= vocab_size) for arbitrary strings/functions thereof. The virtual indices + # encode the position in the list as: vocab_size + position. + token, vocab_idx = graph.logit_targets[pos] nodes[node_idx] = Node.logit_node( pos=graph.n_pos - 1, vocab_idx=vocab_idx, - token=tokenizer.decode(token), + token=token, target_logit=pos == 0, token_prob=graph.logit_probabilities[pos].item(), num_layers=layers, diff --git a/demos/attribute_demo.ipynb b/demos/attribute_demo.ipynb index 0223ce53..19b0349f 100644 --- a/demos/attribute_demo.ipynb +++ b/demos/attribute_demo.ipynb @@ -273,7 +273,7 @@ "id": "IGnU9l1zmS8m" }, "source": [ - "Earlier, you created a graph object. Its adjacency matrix / edge weights are stored in `graph.adjacency_matrix` in a dense format; rows are target nodes and columns are source nodes. The first `len(graph.real_features)` entries of the matrix represent features; the `i`th entry corresponds to the `i`th feature in `graph.real_features`, given in `(layer, position, feature_idx)` format. The next `graph.cfg.n_layers * graph.n_pos` entries are error_nodes. The next `graph.n_pos` entries are token nodes. The final `len(graph.logit_tokens)` entries are logit nodes.\n", + "Earlier, you created a graph object. Its adjacency matrix / edge weights are stored in `graph.adjacency_matrix` in a dense format; rows are target nodes and columns are source nodes. The first `len(graph.real_features)` entries of the matrix represent features; the `i`th entry corresponds to the `i`th feature in `graph.real_features`, given in `(layer, position, feature_idx)` format. The next `graph.cfg.n_layers * graph.n_pos` entries are error_nodes. The next `graph.n_pos` entries are token nodes. The final `len(graph.logit_targets)` entries are logit nodes.\n", "\n", "The value of the cell `graph.adjacency_matrix[target, source]` is the direct effect of the source node on the target node. That is, it tells you how much the target node's value would change if the source node were set to 0, while holding the attention patterns, layernorm denominators, and other feature activations constatnt. Thus, if the target node is a feature, this tells you how much the target feature would change; if the target node is a logit, this tells you how much the (de-meaned) value of the logit would change.\n", "\n", @@ -338,4 +338,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/tests/test_attribution_clt.py b/tests/test_attribution_clt.py index b79a99a4..166136c4 100644 --- a/tests/test_attribution_clt.py +++ b/tests/test_attribution_clt.py @@ -53,7 +53,7 @@ def verify_feature_edges( s = graph.input_tokens adjacency_matrix = graph.adjacency_matrix.to(get_default_device()) active_features = graph.active_features.to(get_default_device()) - logit_tokens = graph.logit_tokens.to(get_default_device()) + logit_tokens = graph.logit_token_ids total_active_features = active_features.size(0) logits, activation_cache = model.get_activations(s, apply_activation_function=False) diff --git a/tests/test_attribution_targets.py b/tests/test_attribution_targets.py new file mode 100644 index 00000000..a035b2c7 --- /dev/null +++ b/tests/test_attribution_targets.py @@ -0,0 +1,384 @@ +"""Unit tests for AttributionTargets class.""" + +import torch +import pytest + +from circuit_tracer.attribution.targets import AttributionTargets + + +class MockTokenizer: + """Mock tokenizer for testing.""" + + vocab_size = 100 # Define vocab size for testing + + def encode(self, text, add_special_tokens=False): + # Simple mock: return token indices within valid range (0-99) + if not text: + return [] + # Use hash to generate consistent indices within range + return [hash(text) % 100] + + def decode(self, token_id): + """Decode a single token ID to a string.""" + # Simple mock: return string representation prefixed with "tok_" + if isinstance(token_id, int): + return f"tok_{token_id}" + return str(token_id) + + +@pytest.fixture +def mock_data(): + """Create mock logits and unembedding projection.""" + vocab_size = 100 + d_model = 64 + + # Create reproducible random data + torch.manual_seed(42) + logits = torch.randn(vocab_size) + unembed_proj = torch.randn(d_model, vocab_size) + tokenizer = MockTokenizer() + + return logits, unembed_proj, tokenizer + + +@pytest.mark.parametrize( + "targets_list,expected_len,expected_key_types,expected_keys,test_id", + [ + ( + [("arbitrary_token", 0.5, torch.randn(64)), 5, ("another", 0.3, torch.randn(64))], + 3, + # LogitTarget instances have both str and int, but check token_str type + ["str", "int", "str"], + ["arbitrary_token", None, "another"], # None for dynamic int keys + "mixed", + ), + ( + [ + ("token1", 0.4, torch.randn(64)), + ("token2", 0.3, torch.randn(64)), + ("token3", 0.3, torch.randn(64)), + ], + 3, + ["str", "str", "str"], + ["token1", "token2", "token3"], + "all_tuples", + ), + ( + ["hello", "world", "test"], + 3, + ["int", "int", "int"], # Strings get tokenized to ints + [None, None, None], # Dynamic keys + "all_strings", + ), + ], + ids=["mixed", "all_tuples", "all_strings"], +) +def test_attribution_targets_list_mode( + mock_data, targets_list, expected_len, expected_key_types, expected_keys, test_id +): + """Test AttributionTargets with list input (most flexible mode).""" + logits, unembed_proj, tokenizer = mock_data + + targets = AttributionTargets( + attribution_targets=targets_list, + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + # Verify basic structure + from circuit_tracer.attribution.targets import LogitTarget + + assert isinstance(targets.logit_targets, list) + assert len(targets) == expected_len + assert all(isinstance(t, LogitTarget) for t in targets.logit_targets) + assert targets.logit_probabilities.shape == (expected_len,) + assert targets.logit_vectors.shape == (expected_len, 64) + + # Verify token_str and vocab_idx based on expected types + for i, expected_type in enumerate(expected_key_types): + target = targets.logit_targets[i] + assert isinstance(target.token_str, str), f"Target {i} token_str should be str" + assert isinstance(target.vocab_idx, int), f"Target {i} vocab_idx should be int" + + # Check token_str matches expected_keys when provided + expected_key = expected_keys[i] + if expected_key is not None: + assert target.token_str == expected_key, f"Target {i} token_str mismatch" + + # Check vocab_idx type based on whether this was an arbitrary string/ + # function thereof (tuple) + if expected_type == "str": # Was a tuple with arbitrary string + # Should have virtual index >= vocab_size + assert target.vocab_idx >= tokenizer.vocab_size, f"Target {i} should have virtual index" + else: # Was int or tokenized string + # Should have real vocab index < vocab_size + assert target.vocab_idx < tokenizer.vocab_size, ( + f"Target {i} should have real vocab index" + ) + + # Test-specific assertions + if test_id == "mixed": + # First and third elements from tuples should have provided probs + assert abs(targets.logit_probabilities[0].item() - 0.5) < 1e-6 + assert abs(targets.logit_probabilities[2].item() - 0.3) < 1e-6 + elif test_id == "all_tuples": + assert torch.allclose(targets.logit_probabilities, torch.tensor([0.4, 0.3, 0.3])) + elif test_id == "all_strings": + # All should be tokenized - check via tokens property + assert all(len(t) > 0 for t in targets.tokens) + + +@pytest.mark.parametrize( + "attribution_targets,max_n_logits,desired_prob,test_id", + [ + (None, 5, 0.8, "salient"), + (torch.tensor([5, 10, 15]), None, None, "specific_indices"), + ], + ids=["salient", "specific_indices"], +) +def test_attribution_targets_auto_modes( + mock_data, attribution_targets, max_n_logits, desired_prob, test_id +): + """Test AttributionTargets with automatic modes (None and Tensor).""" + logits, unembed_proj, tokenizer = mock_data + + kwargs = {} + if max_n_logits is not None: + kwargs["max_n_logits"] = max_n_logits + if desired_prob is not None: + kwargs["desired_logit_prob"] = desired_prob + + targets = AttributionTargets( + attribution_targets=attribution_targets, + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + **kwargs, + ) + + # Verify basic structure - all targets should be LogitTarget instances + from circuit_tracer.attribution.targets import LogitTarget + + assert isinstance(targets.logit_targets, list) + assert all(isinstance(t, LogitTarget) for t in targets.logit_targets) + # All should have real vocab indices (< vocab_size) + assert all(t.vocab_idx < tokenizer.vocab_size for t in targets.logit_targets) + + if test_id == "salient": + assert len(targets) <= max_n_logits + assert len(targets) >= 1 + # Probabilities should sum to at least desired_prob (or hit max_n_logits) + prob_sum = targets.logit_probabilities.sum().item() + assert prob_sum >= desired_prob or len(targets) == max_n_logits + elif test_id == "specific_indices": + # Check vocab_idx matches expected + assert [t.vocab_idx for t in targets.logit_targets] == [5, 10, 15] + assert targets.logit_probabilities.shape == (3,) + assert targets.logit_vectors.shape == (3, 64) + + +@pytest.mark.parametrize( + "targets_list,error_match", + [ + ( + [("token", 0.5)], # Only 2 elements, should be 3 + "exactly 3 elements", + ), + ( + [(5, 0.5, torch.randn(64))], # int instead of str + "str as first element", + ), + ( + [], # Empty list + "cannot be empty", + ), + ], + ids=["invalid_tuple_length", "invalid_tuple_token_type", "empty_list"], +) +def test_attribution_targets_errors(mock_data, targets_list, error_match): + """Test AttributionTargets error handling.""" + logits, unembed_proj, tokenizer = mock_data + + with pytest.raises(ValueError, match=error_match): + AttributionTargets( + attribution_targets=targets_list, # type: ignore + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + +def test_attribution_targets_consistency(mock_data): + """Test that the same inputs produce consistent results.""" + logits, unembed_proj, tokenizer = mock_data + + targets_list = [5, "hello", ("custom", 0.5, torch.randn(64))] + + targets1 = AttributionTargets( + attribution_targets=targets_list, + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + targets2 = AttributionTargets( + attribution_targets=targets_list, + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + assert targets1.logit_targets == targets2.logit_targets + + +def test_attribution_targets_tokens_property(mock_data): + """Test tokens property decodes ints and preserves strings.""" + logits, unembed_proj, tokenizer = mock_data + + targets_list = [ + 5, + ("arbitrary", 0.5, torch.randn(64)), + 10, + ] + + targets = AttributionTargets( + attribution_targets=targets_list, + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + tokens = targets.tokens + + assert isinstance(tokens, list) + assert len(tokens) == 3 + assert tokens[0] == "tok_5" # int decoded with tokenizer + assert tokens[1] == "arbitrary" # str kept as-is + assert tokens[2] == "tok_10" # int decoded with tokenizer + + +@pytest.mark.parametrize( + "test_method,expected_value", + [ + ("to_device", "cpu"), + ("repr", "AttributionTargets(n=5, keys=[1, 2, 3]...)"), + ("len", 5), + ], + ids=["to_device", "repr", "len"], +) +def test_attribution_targets_utility_methods(mock_data, test_method, expected_value): + """Test utility methods: to(), __repr__(), and __len__().""" + logits, unembed_proj, tokenizer = mock_data + + # Use same targets for all tests + targets_list = [1, 2, 3, 4, 5] + + targets = AttributionTargets( + attribution_targets=targets_list, + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + if test_method == "to_device": + # Test device transfer + targets_cpu = targets.to("cpu") + assert isinstance(targets_cpu, AttributionTargets) + assert targets_cpu.logit_targets == targets.logit_targets + assert targets_cpu.logit_probabilities.device.type == expected_value + assert targets_cpu.logit_vectors.device.type == expected_value + assert targets_cpu.tokenizer is tokenizer # Verify tokenizer preserved + elif test_method == "repr": + # Test string representation + repr_str = repr(targets) + assert "AttributionTargets" in repr_str + assert "n=5" in repr_str + # Check for "targets=" since keys are now LogitTarget instances + assert "targets=" in repr_str + elif test_method == "len": + # Test __len__ + assert len(targets) == expected_value + + +@pytest.mark.parametrize( + "targets_list,expected_indices,test_id", + [ + # All real vocab tokens + ([5, 10, 15], [5, 10, 15], "all_real"), + # Mixed real and virtual (arbitrary strings) + ([5, ("arb", 0.5, torch.randn(64)), 10], lambda vs: [5, vs + 1, 10], "mixed"), + # All virtual (arbitrary strings) + ( + [ + ("t1", 0.3, torch.randn(64)), + ("t2", 0.4, torch.randn(64)), + ("t3", 0.3, torch.randn(64)), + ], + lambda vs: [vs + 0, vs + 1, vs + 2], + "all_virtual", + ), + ], + ids=["all_real", "mixed", "all_virtual"], +) +def test_attribution_targets_vocab_indices(mock_data, targets_list, expected_indices, test_id): + """Test vocab_indices property with various combinations of real and virtual tokens.""" + logits, unembed_proj, tokenizer = mock_data + vocab_size = tokenizer.vocab_size # 100 + + targets = AttributionTargets( + attribution_targets=targets_list, + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + # Compute expected indices (may depend on vocab_size for virtual indices) + if callable(expected_indices): + expected = expected_indices(vocab_size) + else: + expected = expected_indices + + vocab_indices = targets.vocab_indices + assert vocab_indices == expected + assert all(isinstance(idx, int) for idx in vocab_indices) + + # Verify virtual index detection + if test_id == "all_real": + assert not targets.has_virtual_indices + # Should be able to get token_ids + token_ids = targets.token_ids + assert torch.equal(token_ids, torch.tensor(expected, dtype=torch.long)) + else: + assert targets.has_virtual_indices + # Should raise when trying to get token_ids + with pytest.raises(ValueError, match="virtual indices"): + _ = targets.token_ids + + +@pytest.mark.parametrize( + "targets_list,error_match", + [ + # Out of range token ID + ([110], "out of vocabulary range.*100"), + # Negative token ID + ([-5], "out of vocabulary range"), + # Tensor with out of range + (torch.tensor([5, 105, 10]), "Token indices must be in range"), + ], + ids=["token_id_out_of_range", "token_id_negative", "tensor_out_of_range"], +) +def test_attribution_targets_validation_errors(mock_data, targets_list, error_match): + """Test validation catches various invalid token ID errors.""" + logits, unembed_proj, tokenizer = mock_data + + with pytest.raises(ValueError, match=error_match): + AttributionTargets( + attribution_targets=targets_list, # type: ignore + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/test_attributions_gemma.py b/tests/test_attributions_gemma.py index c86efe28..5e742bdf 100644 --- a/tests/test_attributions_gemma.py +++ b/tests/test_attributions_gemma.py @@ -24,7 +24,7 @@ def verify_token_and_error_edges( s = graph.input_tokens adjacency_matrix = graph.adjacency_matrix.to(get_default_device()) active_features = graph.active_features.to(get_default_device()) - logit_tokens = graph.logit_tokens.to(get_default_device()) + logit_tokens = graph.logit_token_ids total_active_features = active_features.size(0) pos_start = 1 # ignore first token (BOS) @@ -119,7 +119,7 @@ def verify_feature_edges( s = graph.input_tokens adjacency_matrix = graph.adjacency_matrix.to(get_default_device()) active_features = graph.active_features.to(get_default_device()) - logit_tokens = graph.logit_tokens.to(get_default_device()) + logit_tokens = graph.logit_token_ids total_active_features = active_features.size(0) logits, activation_cache = model.get_activations(s, apply_activation_function=False) diff --git a/tests/test_graph.py b/tests/test_graph.py index ab35f93f..e24b6cbc 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -1,7 +1,9 @@ import numpy as np +import pytest import torch from transformer_lens import HookedTransformerConfig +from circuit_tracer.attribution.targets import LogitTarget from circuit_tracer.graph import Graph, compute_edge_influence, compute_node_influence from circuit_tracer.utils import get_default_device @@ -113,7 +115,7 @@ def test_small_graph(): active_features=torch.tensor([1, 2, 3, 4, 5]), adjacency_matrix=adjacency_matrix, cfg=cfg, - logit_tokens=torch.tensor([0]), + logit_targets=[LogitTarget(token_str="tok_0", vocab_idx=0)], logit_probabilities=torch.tensor([1.0]), selected_features=torch.tensor([1, 2, 3, 4, 5]), activation_values=torch.tensor([1, 2, 3, 4, 5]) * 2, @@ -131,3 +133,156 @@ def test_small_graph(): edge_influence_on_logits = compute_edge_influence(pruned_adjacency_matrix, logit_weights) assert torch.allclose(edge_influence_on_logits, post_pruning_edge_matrix) + + +def test_graph_with_tensor_logit_targets(): + """Test that Graph accepts legacy tensor format for logit_targets.""" + cfg = HookedTransformerConfig.from_dict( + { + "n_layers": 2, + "d_model": 8, + "n_ctx": 32, + "d_head": 4, + "n_heads": 2, + "d_mlp": 16, + "act_fn": "gelu", + "d_vocab": 50257, # GPT-2 vocab size + "model_name": "test-model", + "device": get_default_device(), + } + ) + + adjacency_matrix = torch.zeros([10, 10]) + adjacency_matrix[9, 5] = 1.0 + + # Test with tensor format - token_str will be empty + graph_tensor = Graph( + input_string="test", + input_tokens=torch.tensor([1, 2, 3]), + active_features=torch.tensor([[0, 0, 5]]), + adjacency_matrix=adjacency_matrix, + cfg=cfg, + logit_targets=torch.tensor([262, 290, 314]), # Tensor format + logit_probabilities=torch.tensor([0.5, 0.3, 0.2]), + selected_features=torch.tensor([0]), + activation_values=torch.tensor([1.5]), + ) + + # Verify conversion to LogitTarget list with empty token strings + assert len(graph_tensor.logit_targets) == 3 + assert graph_tensor.logit_targets[0].vocab_idx == 262 + assert graph_tensor.logit_targets[1].vocab_idx == 290 + assert graph_tensor.logit_targets[2].vocab_idx == 314 + # Token strings are empty when constructed from tensor + assert graph_tensor.logit_targets[0].token_str == "" + assert graph_tensor.logit_targets[1].token_str == "" + assert graph_tensor.logit_targets[2].token_str == "" + + # Verify properties work + assert graph_tensor.vocab_indices == [262, 290, 314] + assert not graph_tensor.has_virtual_indices + assert torch.equal(graph_tensor.logit_token_ids, torch.tensor([262, 290, 314])) + + # Test with LogitTarget list format (current) + graph_list = Graph( + input_string="test", + input_tokens=torch.tensor([1, 2, 3]), + active_features=torch.tensor([[0, 0, 5]]), + adjacency_matrix=adjacency_matrix, + cfg=cfg, + logit_targets=[ + LogitTarget(token_str=" the", vocab_idx=262), + LogitTarget(token_str=" a", vocab_idx=290), + LogitTarget(token_str=" and", vocab_idx=314), + ], + logit_probabilities=torch.tensor([0.5, 0.3, 0.2]), + selected_features=torch.tensor([0]), + activation_values=torch.tensor([1.5]), + ) + + # Verify both formats produce same vocab_indices + assert graph_tensor.vocab_indices == graph_list.vocab_indices + assert graph_tensor.vocab_size == graph_list.vocab_size + + +@pytest.mark.parametrize( + "logit_targets_input,expected_token_strs", + [ + pytest.param( + torch.tensor([262, 290, 314]), + ["", "", ""], + id="tensor_format", + ), + pytest.param( + [ + LogitTarget(token_str=" the", vocab_idx=262), + LogitTarget(token_str=" a", vocab_idx=290), + LogitTarget(token_str=" and", vocab_idx=314), + ], + [" the", " a", " and"], + id="logit_target_format", + ), + ], +) +def test_graph_serialization_with_logit_targets(logit_targets_input, expected_token_strs): + """Test that Graph serialization works with both tensor and LogitTarget formats.""" + import tempfile + import os + + cfg = HookedTransformerConfig.from_dict( + { + "n_layers": 2, + "d_model": 8, + "n_ctx": 32, + "d_head": 4, + "n_heads": 2, + "d_mlp": 16, + "act_fn": "gelu", + "d_vocab": 50257, + "model_name": "test-model", + "device": get_default_device(), + } + ) + + adjacency_matrix = torch.zeros([10, 10]) + adjacency_matrix[9, 5] = 1.0 + + # Create graph with parameterized format + original_graph = Graph( + input_string="test", + input_tokens=torch.tensor([1, 2, 3]), + active_features=torch.tensor([[0, 0, 5]]), + adjacency_matrix=adjacency_matrix, + cfg=cfg, + logit_targets=logit_targets_input, + logit_probabilities=torch.tensor([0.5, 0.3, 0.2]), + selected_features=torch.tensor([0]), + activation_values=torch.tensor([1.5]), + vocab_size=50257, + ) + + # Save and load + with tempfile.NamedTemporaryFile(delete=False, suffix=".pt") as tmp: + tmp_path = tmp.name + + try: + original_graph.to_pt(tmp_path) + loaded_graph = Graph.from_pt(tmp_path) + + # Verify loaded graph has correct data + assert loaded_graph.vocab_indices == [262, 290, 314] + assert loaded_graph.vocab_size == 50257 + assert not loaded_graph.has_virtual_indices + assert torch.equal(loaded_graph.logit_token_ids, torch.tensor([262, 290, 314])) + assert torch.equal(loaded_graph.logit_probabilities, torch.tensor([0.5, 0.3, 0.2])) + + # Verify LogitTarget objects were preserved with expected token strings + assert len(loaded_graph.logit_targets) == 3 + assert all(isinstance(lt, LogitTarget) for lt in loaded_graph.logit_targets) + assert loaded_graph.logit_targets[0].token_str == expected_token_strs[0] + assert loaded_graph.logit_targets[1].token_str == expected_token_strs[1] + assert loaded_graph.logit_targets[2].token_str == expected_token_strs[2] + + finally: + if os.path.exists(tmp_path): + os.unlink(tmp_path) From 2b52ba6e8692d6d024f6e0600089c92329794e75 Mon Sep 17 00:00:00 2001 From: Daniel Dale Date: Fri, 7 Nov 2025 10:01:13 -0800 Subject: [PATCH 03/18] slight clarification in a couple comments based on copilot review --- circuit_tracer/attribution/attribute.py | 4 ++-- circuit_tracer/utils/create_graph_files.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/circuit_tracer/attribution/attribute.py b/circuit_tracer/attribution/attribute.py index d0698253..60a112ef 100644 --- a/circuit_tracer/attribution/attribute.py +++ b/circuit_tracer/attribution/attribute.py @@ -82,8 +82,8 @@ def attribute( - torch.Tensor: Tensor of token indices - list[tuple[str, float, torch.Tensor] | int | str]: List where each element can be: - * int or str: Token ID/string (auto-computes probability & vector, - returns tensor of indices) + * int or str: Token ID/string (auto-resolves probability and + unembed vector) * tuple[str, float, torch.Tensor]: Fully specified logit spec with arbitrary string tokens (or functions thereof) that may not be in vocabulary diff --git a/circuit_tracer/utils/create_graph_files.py b/circuit_tracer/utils/create_graph_files.py index bdcff6db..34a06d7f 100644 --- a/circuit_tracer/utils/create_graph_files.py +++ b/circuit_tracer/utils/create_graph_files.py @@ -56,7 +56,7 @@ def create_nodes(graph: Graph, node_mask, tokenizer, cumulative_scores): # vocab_idx can be either a valid token_id (< vocab_size) or a virtual # index (>= vocab_size) for arbitrary strings/functions thereof. The virtual indices - # encode the position in the list as: vocab_size + position. + # encode the position in the logit_targets list as: vocab_size + position. token, vocab_idx = graph.logit_targets[pos] nodes[node_idx] = Node.logit_node( From 418b17fd2b40cbec1ded9891e918d3e577587bca Mon Sep 17 00:00:00 2001 From: Dan Dale Date: Tue, 16 Dec 2025 05:40:08 -0800 Subject: [PATCH 04/18] Allow `offload_modules` to handle single module and container offloading cases (#51) * fix: handle single module in offload_modules Fix TypeError when passing a single module (e.g., CrossLayerTranscoder) to offload_modules instead of a list. Now properly handles single modules, lists, and PyTorch container types (ModuleList, ModuleDict, Sequential). * minor aesthetic change, slight simplification of logic --- circuit_tracer/utils/disk_offload.py | 31 ++++- tests/utils/test_disk_offload.py | 201 +++++++++++++++++++++++++++ 2 files changed, 228 insertions(+), 4 deletions(-) create mode 100644 tests/utils/test_disk_offload.py diff --git a/circuit_tracer/utils/disk_offload.py b/circuit_tracer/utils/disk_offload.py index b605f6af..25ae78db 100644 --- a/circuit_tracer/utils/disk_offload.py +++ b/circuit_tracer/utils/disk_offload.py @@ -3,6 +3,7 @@ import tempfile from typing import Literal +from torch import nn from safetensors.torch import load_file, save_file _offload_files = set() @@ -13,7 +14,8 @@ @atexit.register def cleanup_offload_files(): for f in _offload_files: - os.remove(f) + if os.path.exists(f): + os.remove(f) def cleanup_all_offload_files(): @@ -35,7 +37,8 @@ def disk_offload_module(module): module.to(device="meta") def reload_handle(device=None): - module.load_state_dict(load_file(f.name, device=(device or str(org_device))), assign=True) + target_device = str(device or org_device) + module.load_state_dict(load_file(f.name, device=target_device), assign=True) os.remove(f.name) _offload_files.remove(f.name) @@ -52,6 +55,26 @@ def reload_handle(): return reload_handle -def offload_modules(modules, offload_type: Literal["cpu", "disk"]): +def offload_modules( + modules: list | nn.Module | nn.ModuleList | nn.ModuleDict | nn.Sequential, + offload_type: Literal["cpu", "disk"], +) -> list: + """Offload one or more modules to CPU or disk. + + Args: + modules: A single module, list of modules, or PyTorch module container + (ModuleList, ModuleDict, Sequential) + offload_type: Type of offload - "cpu" or "disk" + + Returns: + List of reload handles, one per module + """ offload_fn = disk_offload_module if offload_type == "disk" else cpu_offload_module - return [offload_fn(module) for module in modules] + + if isinstance(modules, nn.ModuleDict): + mods = modules.values() + elif isinstance(modules, (list, nn.ModuleList, nn.Sequential)): + mods = modules + else: + mods = [modules] + return [offload_fn(module) for module in mods] diff --git a/tests/utils/test_disk_offload.py b/tests/utils/test_disk_offload.py new file mode 100644 index 00000000..a9c1889c --- /dev/null +++ b/tests/utils/test_disk_offload.py @@ -0,0 +1,201 @@ +"""Tests for disk_offload module functions.""" + +import pytest +import torch + +from circuit_tracer.transcoder.cross_layer_transcoder import CrossLayerTranscoder +from circuit_tracer.transcoder.single_layer_transcoder import SingleLayerTranscoder +from circuit_tracer.utils.disk_offload import ( + cleanup_all_offload_files, + cpu_offload_module, + disk_offload_module, + offload_modules, +) + + +@pytest.fixture +def clt_module(): + """Create a small CLT.""" + return CrossLayerTranscoder( + n_layers=2, + d_transcoder=16, + d_model=8, + lazy_decoder=False, + lazy_encoder=False, + device=torch.device("cpu"), + ) + + +@pytest.fixture +def plt_module(): + """Create a small PLT.""" + return SingleLayerTranscoder( + d_model=8, + d_transcoder=16, + activation_function=torch.nn.functional.relu, + layer_idx=0, + lazy_decoder=False, + lazy_encoder=False, + device=torch.device("cpu"), + ) + + +@pytest.mark.parametrize("module_fixture", ["clt_module", "plt_module"]) +@pytest.mark.parametrize("explicit_device", [True, False]) +def test_disk_offload_module(module_fixture, explicit_device, request): + """Test disk offload with CLT and PLT architectures.""" + module = request.getfixturevalue(module_fixture) + + # Store original state + orig_param = next(module.parameters()).data.clone() + orig_device = next(module.parameters()).device + + # Offload to disk + reload_handle = disk_offload_module(module) + + # Verify module is on meta device + assert next(module.parameters()).device.type == "meta" + + # Reload with or without explicit device + if explicit_device: + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + reload_handle(device=device) + # Should be on the explicitly requested device + assert next(module.parameters()).device.type == device.type + assert torch.allclose(next(module.parameters()).data, orig_param.to(device)) + else: + reload_handle() + # Should be restored to original device + assert next(module.parameters()).device.type == orig_device.type + assert torch.allclose(next(module.parameters()).data, orig_param) + + +@pytest.mark.parametrize("module_fixture", ["clt_module", "plt_module"]) +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_cpu_offload_module_cuda(module_fixture, request): + """Test CPU offload with CLT and PLT on CUDA.""" + module = request.getfixturevalue(module_fixture) + + # Move to CUDA + module.to("cuda") + orig_param = next(module.parameters()).data.clone() + + # Offload to CPU + reload_handle = cpu_offload_module(module) + assert next(module.parameters()).device.type == "cpu" + + # Reload to CUDA + reload_handle() + assert next(module.parameters()).device.type == "cuda" + assert torch.allclose(next(module.parameters()).data, orig_param.to("cuda")) + + +def test_cpu_offload_module_cpu(clt_module): + """Test CPU offload when already on CPU.""" + orig_device = next(clt_module.parameters()).device + + reload_handle = cpu_offload_module(clt_module) + assert next(clt_module.parameters()).device.type == "cpu" + + reload_handle() + assert next(clt_module.parameters()).device == orig_device + + +@pytest.mark.parametrize( + "modules_factory,expected_count", + [ + # Single module + ( + lambda: CrossLayerTranscoder( + n_layers=2, d_transcoder=16, d_model=8, lazy_decoder=False, lazy_encoder=False + ), + 1, + ), + # List of CLTs + ( + lambda: [ + CrossLayerTranscoder( + n_layers=2, d_transcoder=16, d_model=8, lazy_decoder=False, lazy_encoder=False + ), + CrossLayerTranscoder( + n_layers=2, d_transcoder=16, d_model=8, lazy_decoder=False, lazy_encoder=False + ), + ], + 2, + ), + # ModuleDict with CLTs + ( + lambda: torch.nn.ModuleDict( + { + "clt1": CrossLayerTranscoder( + n_layers=2, + d_transcoder=16, + d_model=8, + lazy_decoder=False, + lazy_encoder=False, + ), + "clt2": CrossLayerTranscoder( + n_layers=2, + d_transcoder=16, + d_model=8, + lazy_decoder=False, + lazy_encoder=False, + ), + } + ), + 2, + ), + ], + ids=["single_clt", "list_clt", "moduledict_clt"], +) +@pytest.mark.parametrize("offload_type", ["cpu", "disk"]) +def test_offload_modules(modules_factory, expected_count, offload_type): + """Test offload_modules with various container types using CLT architecture.""" + modules = modules_factory() + expected_device = "cpu" if offload_type == "cpu" else "meta" + + handles = offload_modules(modules, offload_type=offload_type) + + # Verify handles + assert isinstance(handles, list) + assert len(handles) == expected_count + for handle in handles: + assert callable(handle) + + # Verify modules are offloaded + if isinstance(modules, torch.nn.Module) and not isinstance( + modules, (torch.nn.ModuleList, torch.nn.ModuleDict, torch.nn.Sequential) + ): + assert next(modules.parameters()).device.type == expected_device + else: + module_iter = modules.values() if isinstance(modules, torch.nn.ModuleDict) else modules + for module in module_iter: + assert next(module.parameters()).device.type == expected_device + + # Cleanup disk offloads + if offload_type == "disk": + for handle in handles: + handle() + + +def test_cleanup_offload_files(clt_module): + """Test cleanup removes offload files.""" + # Create some offload files + modules = [clt_module] + offload_modules(modules, offload_type="disk") + + # Cleanup + n_removed = cleanup_all_offload_files() + + # Should have removed files + assert n_removed >= 1 + + +def test_cleanup_when_no_files(): + """Test cleanup when no offload files exist.""" + # First cleanup any existing files + cleanup_all_offload_files() + + # Second cleanup should find nothing + n_removed = cleanup_all_offload_files() + assert n_removed == 0 From 45d6eee1cc4f1ed834043d20762ac56753961598 Mon Sep 17 00:00:00 2001 From: Dan Dale Date: Tue, 16 Dec 2025 06:24:15 -0800 Subject: [PATCH 05/18] use a smaller non-default batch_size for test_gemma_2_2b to expand the range of gpus the test suite runs with (#50) --- tests/test_attributions_gemma.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_attributions_gemma.py b/tests/test_attributions_gemma.py index 5e742bdf..a3392dbb 100644 --- a/tests/test_attributions_gemma.py +++ b/tests/test_attributions_gemma.py @@ -377,7 +377,7 @@ def verify_large_gemma_model(s: torch.Tensor): def verify_gemma_2_2b(s: str): model = ReplacementModel.from_pretrained("google/gemma-2-2b", "gemma") - graph = attribute(s, model) + graph = attribute(s, model, batch_size=256) print("Changing logit softcap to 0, as the logits will otherwise be off.") with model.zero_softcap(): From 3d8264a9c061a3cb5b1ea696f0e7a596ba3f2c83 Mon Sep 17 00:00:00 2001 From: Daniel Dale Date: Tue, 13 Jan 2026 12:32:06 -0800 Subject: [PATCH 06/18] initial changes to adapt original `AttributionTargets` encapsulation PR for new multi-backend arch --- circuit_tracer/attribution/attribute.py | 276 +---- .../attribution/attribute_nnsight.py | 307 +++++ .../attribution/attribute_transformerlens.py | 285 +++++ circuit_tracer/attribution/context_nnsight.py | 215 ++++ ...{context.py => context_transformerlens.py} | 11 +- circuit_tracer/graph.py | 68 +- circuit_tracer/replacement_model/__init__.py | 9 + .../replacement_model/replacement_model.py | 164 +++ .../replacement_model_nnsight.py | 1063 +++++++++++++++++ .../replacement_model_transformerlens.py} | 221 ++-- circuit_tracer/utils/__init__.py | 5 +- circuit_tracer/utils/create_graph_files.py | 23 +- circuit_tracer/utils/salient_logits.py | 47 + circuit_tracer/utils/tl_nnsight_mapping.py | 283 +++++ 14 files changed, 2607 insertions(+), 370 deletions(-) create mode 100644 circuit_tracer/attribution/attribute_nnsight.py create mode 100644 circuit_tracer/attribution/attribute_transformerlens.py create mode 100644 circuit_tracer/attribution/context_nnsight.py rename circuit_tracer/attribution/{context.py => context_transformerlens.py} (96%) create mode 100644 circuit_tracer/replacement_model/__init__.py create mode 100644 circuit_tracer/replacement_model/replacement_model.py create mode 100644 circuit_tracer/replacement_model/replacement_model_nnsight.py rename circuit_tracer/{replacement_model.py => replacement_model/replacement_model_transformerlens.py} (82%) create mode 100644 circuit_tracer/utils/salient_logits.py create mode 100644 circuit_tracer/utils/tl_nnsight_mapping.py diff --git a/circuit_tracer/attribution/attribute.py b/circuit_tracer/attribution/attribute.py index 60a112ef..f59f0b30 100644 --- a/circuit_tracer/attribution/attribute.py +++ b/circuit_tracer/attribution/attribute.py @@ -1,68 +1,31 @@ """ -Build an **attribution graph** that captures the *direct*, *linear* effects -between features and next-token logits for a *prompt-specific* -**local replacement model**. +Unified attribution interface that routes to the correct backend implementation. -High-level algorithm (matches the 2025 ``Attribution Graphs`` paper): -https://transformer-circuits.pub/2025/attribution-graphs/methods.html - -1. **Local replacement model** - we configure gradients to flow only through - linear components of the network, effectively bypassing attention mechanisms, - MLP non-linearities, and layer normalization scales. -2. **Forward pass** - record residual-stream activations and mark every active - feature. -3. **Backward passes** - for each source node (feature or logit), inject a - *custom* gradient that selects its encoder/decoder direction. Because the - model is linear in the residual stream under our freezes, this contraction - equals the *direct effect* A_{s->t}. -4. **Assemble graph** - store edge weights in a dense matrix and package a - ``Graph`` object. Downstream utilities can *prune* the graph to the subset - needed for interpretation. +This module provides a unified entry point for computing attribution graphs, +automatically dispatching to either the TransformerLens or NNSight implementation +based on the backend type of the provided ReplacementModel. """ -import logging -import time -from typing import Literal +from collections.abc import Sequence +from typing import TYPE_CHECKING, Literal import torch -from tqdm import tqdm -from circuit_tracer.attribution.targets import AttributionTargets from circuit_tracer.graph import Graph -from circuit_tracer.replacement_model import ReplacementModel -from circuit_tracer.utils import get_default_device -from circuit_tracer.utils.disk_offload import offload_modules - - -def compute_partial_influences(edge_matrix, logit_p, row_to_node_index, max_iter=128, device=None): - """Compute partial influences using power iteration method.""" - device = device or get_default_device() - - normalized_matrix = torch.empty_like(edge_matrix, device=device).copy_(edge_matrix) - normalized_matrix = normalized_matrix.abs_() - normalized_matrix /= normalized_matrix.sum(dim=1, keepdim=True).clamp(min=1e-8) - - influences = torch.zeros(edge_matrix.shape[1], device=normalized_matrix.device) - prod = torch.zeros(edge_matrix.shape[1], device=normalized_matrix.device) - prod[-len(logit_p) :] = logit_p - - for _ in range(max_iter): - prod = prod[row_to_node_index] @ normalized_matrix - if not prod.any(): - break - influences += prod - else: - raise RuntimeError("Failed to converge") - return influences +if TYPE_CHECKING: + from circuit_tracer.replacement_model.replacement_model_nnsight import NNSightReplacementModel + from circuit_tracer.replacement_model.replacement_model_transformerlens import ( + TransformerLensReplacementModel, + ) def attribute( prompt: str | torch.Tensor | list[int], - model: ReplacementModel, + model: "NNSightReplacementModel | TransformerLensReplacementModel", *, attribution_targets: ( - list[tuple[str, float, torch.Tensor] | int | str] | torch.Tensor | None + Sequence[tuple[str, float, torch.Tensor] | int | str] | torch.Tensor | None ) = None, max_n_logits: int = 10, desired_logit_prob: float = 0.95, @@ -74,13 +37,16 @@ def attribute( ) -> Graph: """Compute an attribution graph for *prompt*. + This function automatically routes to the correct attribution implementation + based on the type of ReplacementModel provided. + Args: prompt: Text, token ids, or tensor - will be tokenized if str. - model: Frozen ``ReplacementModel`` + model: Frozen ``ReplacementModel`` (either nnsight or transformerlens backend) attribution_targets: Flexible attribution target specification in one of several formats: - None: Auto-select salient logits based on probability threshold - torch.Tensor: Tensor of token indices - - list[tuple[str, float, torch.Tensor] | int | str]: List where + - Sequence[tuple[str, float, torch.Tensor] | int | str]: Sequence where each element can be: * int or str: Token ID/string (auto-resolves probability and unembed vector) @@ -102,23 +68,12 @@ def attribute( Graph: Fully dense adjacency (unpruned). """ - logger = logging.getLogger("attribution") - logger.propagate = False - handler = None - if verbose and not logger.handlers: - handler = logging.StreamHandler() - formatter = logging.Formatter("%(message)s") - handler.setFormatter(formatter) - logger.addHandler(handler) - logger.setLevel(logging.INFO) - else: - logger.setLevel(logging.WARNING) + if model.backend == "nnsight": + from .attribute_nnsight import attribute as attribute_nnsight - offload_handles = [] - try: - return _run_attribution( - model=model, + return attribute_nnsight( prompt=prompt, + model=model, # type: ignore[arg-type] attribution_targets=attribution_targets, max_n_logits=max_n_logits, desired_logit_prob=desired_logit_prob, @@ -126,181 +81,20 @@ def attribute( max_feature_nodes=max_feature_nodes, offload=offload, verbose=verbose, - offload_handles=offload_handles, update_interval=update_interval, - logger=logger, - ) - finally: - for reload_handle in offload_handles: - reload_handle() - - if handler: - logger.removeHandler(handler) - - -def _run_attribution( - model, - prompt, - attribution_targets, - max_n_logits, - desired_logit_prob, - batch_size, - max_feature_nodes, - offload, - verbose, - offload_handles, - logger, - update_interval=4, -): - start_time = time.time() - # Phase 0: precompute - logger.info("Phase 0: Precomputing activations and vectors") - phase_start = time.time() - input_ids = model.ensure_tokenized(prompt) - - ctx = model.setup_attribution(input_ids) - activation_matrix = ctx.activation_matrix - - logger.info(f"Precomputation completed in {time.time() - phase_start:.2f}s") - logger.info(f"Found {ctx.activation_matrix._nnz()} active features") - - if offload: - offload_handles += offload_modules(model.transcoders, offload) - - # Phase 1: forward pass - logger.info("Phase 1: Running forward pass") - phase_start = time.time() - with ctx.install_hooks(model): - residual = model.forward(input_ids.expand(batch_size, -1), stop_at_layer=model.cfg.n_layers) - ctx._resid_activations[-1] = model.ln_final(residual) - logger.info(f"Forward pass completed in {time.time() - phase_start:.2f}s") - - if offload: - offload_handles += offload_modules([block.mlp for block in model.blocks], offload) - - # Phase 2: build input vector list - logger.info("Phase 2: Building input vectors") - phase_start = time.time() - feat_layers, feat_pos, _ = activation_matrix.indices() - n_layers, n_pos, _ = activation_matrix.shape - total_active_feats = activation_matrix._nnz() - - targets = AttributionTargets( - attribution_targets=attribution_targets, - logits=ctx.logits[0, -1], - unembed_proj=model.unembed.W_U, - tokenizer=model.tokenizer, - max_n_logits=max_n_logits, - desired_logit_prob=desired_logit_prob, - ) - - if attribution_targets is None: - logger.info( - f"Selected {len(targets)} logits with cumulative probability " - f"{targets.logit_probabilities.sum().item():.4f}" ) + else: + from .attribute_transformerlens import attribute as attribute_transformerlens - if offload: - offload_handles += offload_modules([model.unembed, model.embed], offload) - - logit_offset = len(feat_layers) + (n_layers + 1) * n_pos - n_logits = len(targets) - total_nodes = logit_offset + n_logits - - max_feature_nodes = min(max_feature_nodes or total_active_feats, total_active_feats) - logger.info(f"Will include {max_feature_nodes} of {total_active_feats} feature nodes") - - edge_matrix = torch.zeros(max_feature_nodes + n_logits, total_nodes) - # Maps row indices in edge_matrix to original feature/node indices - # First populated with logit node IDs, then feature IDs in attribution order - row_to_node_index = torch.zeros(max_feature_nodes + n_logits, dtype=torch.int32) - logger.info(f"Input vectors built in {time.time() - phase_start:.2f}s") - - # Phase 3: logit attribution - logger.info("Phase 3: Computing logit attributions") - phase_start = time.time() - for i in range(0, len(targets), batch_size): - batch = targets.logit_vectors[i : i + batch_size] - rows = ctx.compute_batch( - layers=torch.full((batch.shape[0],), n_layers), - positions=torch.full((batch.shape[0],), n_pos - 1), - inject_values=batch, - ) - edge_matrix[i : i + batch.shape[0], :logit_offset] = rows.cpu() - row_to_node_index[i : i + batch.shape[0]] = ( - torch.arange(i, i + batch.shape[0]) + logit_offset + return attribute_transformerlens( + prompt=prompt, + model=model, # type: ignore[arg-type] + attribution_targets=attribution_targets, + max_n_logits=max_n_logits, + desired_logit_prob=desired_logit_prob, + batch_size=batch_size, + max_feature_nodes=max_feature_nodes, + offload=offload, + verbose=verbose, + update_interval=update_interval, ) - logger.info(f"Logit attributions completed in {time.time() - phase_start:.2f}s") - - # Phase 4: feature attribution - logger.info("Phase 4: Computing feature attributions") - phase_start = time.time() - st = n_logits - visited = torch.zeros(total_active_feats, dtype=torch.bool) - n_visited = 0 - - pbar = tqdm(total=max_feature_nodes, desc="Feature influence computation", disable=not verbose) - - while n_visited < max_feature_nodes: - if max_feature_nodes == total_active_feats: - pending = torch.arange(total_active_feats) - else: - influences = compute_partial_influences( - edge_matrix[:st], targets.logit_probabilities, row_to_node_index[:st] - ) - feature_rank = torch.argsort(influences[:total_active_feats], descending=True).cpu() - queue_size = min(update_interval * batch_size, max_feature_nodes - n_visited) - pending = feature_rank[~visited[feature_rank]][:queue_size] - - queue = [pending[i : i + batch_size] for i in range(0, len(pending), batch_size)] - - for idx_batch in queue: - n_visited += len(idx_batch) - - rows = ctx.compute_batch( - layers=feat_layers[idx_batch], - positions=feat_pos[idx_batch], - inject_values=ctx.encoder_vecs[idx_batch], - retain_graph=n_visited < max_feature_nodes, - ) - - end = min(st + batch_size, st + rows.shape[0]) - edge_matrix[st:end, :logit_offset] = rows.cpu() - row_to_node_index[st:end] = idx_batch - visited[idx_batch] = True - st = end - pbar.update(len(idx_batch)) - - pbar.close() - logger.info(f"Feature attributions completed in {time.time() - phase_start:.2f}s") - - # Phase 5: packaging graph - selected_features = torch.where(visited)[0] - if max_feature_nodes < total_active_feats: - non_feature_nodes = torch.arange(total_active_feats, total_nodes) - col_read = torch.cat([selected_features, non_feature_nodes]) - edge_matrix = edge_matrix[:, col_read] - - # sort rows such that features are in order - edge_matrix = edge_matrix[row_to_node_index.argsort()] - final_node_count = edge_matrix.shape[1] - full_edge_matrix = torch.zeros(final_node_count, final_node_count) - full_edge_matrix[:max_feature_nodes] = edge_matrix[:max_feature_nodes] - full_edge_matrix[-n_logits:] = edge_matrix[max_feature_nodes:] - - graph = Graph( - input_string=model.tokenizer.decode(input_ids), - input_tokens=input_ids, - attribution_targets=targets, - active_features=activation_matrix.indices().T, - activation_values=activation_matrix.values(), - selected_features=selected_features, - adjacency_matrix=full_edge_matrix, - cfg=model.cfg, - scan=model.scan, - ) - - total_time = time.time() - start_time - logger.info(f"Attribution completed in {total_time:.2f}s") - - return graph diff --git a/circuit_tracer/attribution/attribute_nnsight.py b/circuit_tracer/attribution/attribute_nnsight.py new file mode 100644 index 00000000..049d4a3c --- /dev/null +++ b/circuit_tracer/attribution/attribute_nnsight.py @@ -0,0 +1,307 @@ +""" +Build an **attribution graph** that captures the *direct*, *linear* effects +between features and next-token logits for a *prompt-specific* +**local replacement model** using the NNSight backend. + +High-level algorithm (matches the 2025 ``Attribution Graphs`` paper): +https://transformer-circuits.pub/2025/attribution-graphs/methods.html + +1. **Local replacement model** - we configure gradients to flow only through + linear components of the network, effectively bypassing attention mechanisms, + MLP non-linearities, and layer normalization scales. +2. **Forward pass** - record residual-stream activations and mark every active + feature. +3. **Backward passes** - for each source node (feature or logit), inject a + *custom* gradient that selects its encoder/decoder direction. Because the + model is linear in the residual stream under our freezes, this contraction + equals the *direct effect* A_{s->t}. +4. **Assemble graph** - store edge weights in a dense matrix and package a + ``Graph`` object. Downstream utilities can *prune* the graph to the subset + needed for interpretation. +""" + +import logging +import time +from collections.abc import Sequence +from typing import Literal + +import torch +from tqdm import tqdm + +from circuit_tracer.attribution.targets import AttributionTargets +from circuit_tracer.graph import Graph, compute_partial_influences +from circuit_tracer.replacement_model.replacement_model_nnsight import NNSightReplacementModel +from circuit_tracer.utils.disk_offload import offload_modules + + +def attribute( + prompt: str | torch.Tensor | list[int], + model: NNSightReplacementModel, + *, + attribution_targets: ( + Sequence[tuple[str, float, torch.Tensor] | int | str] | torch.Tensor | None + ) = None, + max_n_logits: int = 10, + desired_logit_prob: float = 0.95, + batch_size: int = 512, + max_feature_nodes: int | None = None, + offload: Literal["cpu", "disk", None] = None, + verbose: bool = False, + update_interval: int = 4, +) -> Graph: + """Compute an attribution graph for *prompt* using NNSight backend. + + Args: + prompt: Text, token ids, or tensor - will be tokenized if str. + model: Frozen ``NNSightReplacementModel`` + attribution_targets: Flexible attribution target specification in one of several formats: + - None: Auto-select salient logits based on probability threshold + - torch.Tensor: Tensor of token indices + - Sequence[tuple[str, float, torch.Tensor] | int | str]: Sequence where + each element can be: + * int or str: Token ID/string (auto-resolves probability and + unembed vector) + * tuple[str, float, torch.Tensor]: Fully specified logit spec with + arbitrary string tokens (or functions thereof) that may not be in + vocabulary + max_n_logits: Max number of logit nodes (used when attribution_targets is None). + desired_logit_prob: Keep logits until cumulative prob >= this value + (used when attribution_targets is None). + batch_size: How many source nodes to process per backward pass. + max_feature_nodes: Max number of feature nodes to include in the graph. + offload: Method for offloading model parameters to save memory. + Options are "cpu" (move to CPU), "disk" (save to disk), + or None (no offloading). + verbose: Whether to show progress information. + update_interval: Number of batches to process before updating the feature ranking. + + Returns: + Graph: Fully dense adjacency (unpruned). + """ + + logger = logging.getLogger("attribution") + logger.propagate = False + handler = None + if verbose and not logger.handlers: + handler = logging.StreamHandler() + formatter = logging.Formatter("%(message)s") + handler.setFormatter(formatter) + logger.addHandler(handler) + logger.setLevel(logging.INFO) + else: + logger.setLevel(logging.WARNING) + + offload_handles = [] + try: + return _run_attribution( + model=model, + prompt=prompt, + attribution_targets=attribution_targets, + max_n_logits=max_n_logits, + desired_logit_prob=desired_logit_prob, + batch_size=batch_size, + max_feature_nodes=max_feature_nodes, + offload=offload, + verbose=verbose, + offload_handles=offload_handles, + update_interval=update_interval, + logger=logger, + ) + finally: + for reload_handle in offload_handles: + reload_handle() + + if handler: + logger.removeHandler(handler) + + +def _run_attribution( + model: NNSightReplacementModel, + prompt, + attribution_targets, + max_n_logits: int, + desired_logit_prob: float, + batch_size: int, + max_feature_nodes: int | None, + offload: Literal["cpu", "disk", None], + verbose: bool, + offload_handles, + logger, + update_interval: int = 4, +): + start_time = time.time() + # Phase 0: precompute + logger.info("Phase 0: Precomputing activations and vectors") + phase_start = time.time() + input_ids = model.ensure_tokenized(prompt) + + ctx = model.setup_attribution(input_ids) + activation_matrix = ctx.activation_matrix + + logger.info(f"Precomputation completed in {time.time() - phase_start:.2f}s") + logger.info(f"Found {ctx.activation_matrix._nnz()} active features") + + if offload and not model.skip_transcoder: + offload_handles += offload_modules(model.transcoders, offload) + + # Phase 1: forward pass + logger.info("Phase 1: Running forward pass") + phase_start = time.time() + with model.trace() as tracer: + with tracer.invoke(input_ids.expand(batch_size, -1)): + pass + + detach_barrier = tracer.barrier(2) + + model.configure_gradient_flow(tracer) + model.configure_skip_connection(tracer, barrier=detach_barrier) + ctx.cache_residual(model, tracer, barrier=detach_barrier) + + logger.info(f"Forward pass completed in {time.time() - phase_start:.2f}s") + + if offload: + offload_handles += offload_modules( + [layer.mlp for layer in getattr(model.pre_logit_location, "layers")], offload + ) + if model.skip_transcoder: + offload_handles += offload_modules(model.transcoders, offload) + + # Phase 2: build input vector list + logger.info("Phase 2: Building input vectors") + phase2_start = time.time() + feat_layers, feat_pos, _ = activation_matrix.indices() + n_layers, n_pos, _ = activation_matrix.shape + total_active_feats = activation_matrix._nnz() + + # Create AttributionTargets using NNSight's unembed_weight accessor + targets = AttributionTargets( + attribution_targets=attribution_targets, + logits=ctx.logits[0, -1], + unembed_proj=model.unembed_weight, # NNSight uses unembed_weight + tokenizer=model.tokenizer, + max_n_logits=max_n_logits, + desired_logit_prob=desired_logit_prob, + ) + + if attribution_targets is None: + logger.info( + f"Selected {len(targets)} logits with cumulative probability " + f"{targets.logit_probabilities.sum().item():.4f}" + ) + + if offload: + offload_handles += offload_modules([model.embed_location], offload) + tied_embeds = ( + model.embed_weight.untyped_storage().data_ptr() # type:ignore + == model.unembed_weight.untyped_storage().data_ptr() # type:ignore + ) + if not tied_embeds: + offload_handles += offload_modules([model.lm_head], offload) + + logit_offset = len(feat_layers) + (n_layers + 1) * n_pos + n_logits = len(targets) + total_nodes = logit_offset + n_logits + + actual_max_feature_nodes = min(max_feature_nodes or total_active_feats, total_active_feats) + logger.info(f"Will include {actual_max_feature_nodes} of {total_active_feats} feature nodes") + + edge_matrix = torch.zeros(actual_max_feature_nodes + n_logits, total_nodes) + # Maps row indices in edge_matrix to original feature/node indices + # First populated with logit node IDs, then feature IDs in attribution order + row_to_node_index = torch.zeros(actual_max_feature_nodes + n_logits, dtype=torch.int32) + logger.info(f"Input vectors built in {time.time() - phase2_start:.2f}s") + + # Phase 3: logit attribution + logger.info("Phase 3: Computing logit attributions") + phase3_start = time.time() + i = -1 + for i in range(0, len(targets), batch_size): + batch = targets.logit_vectors[i : i + batch_size] + rows = ctx.compute_batch( + layers=torch.full((batch.shape[0],), n_layers), + positions=torch.full((batch.shape[0],), n_pos - 1), + inject_values=batch, + ) + edge_matrix[i : i + batch.shape[0], :logit_offset] = rows.cpu() + row_to_node_index[i : i + batch.shape[0]] = ( + torch.arange(i, i + batch.shape[0]) + logit_offset + ) + + logger.info(f"{i + 1} logit attribution(s) completed in {time.time() - phase3_start:.2f}s") + + # Phase 4: feature attribution + logger.info("Phase 4: Computing feature attributions") + phase4_start = time.time() + st = n_logits + visited = torch.zeros(total_active_feats, dtype=torch.bool) + n_visited = 0 + + pbar = tqdm( + total=actual_max_feature_nodes, + desc="Feature influence computation", + disable=not verbose, + ) + + while n_visited < actual_max_feature_nodes: + if actual_max_feature_nodes == total_active_feats: + pending = torch.arange(total_active_feats) + else: + influences = compute_partial_influences( + edge_matrix[:st], targets.logit_probabilities, row_to_node_index[:st] + ) + feature_rank = torch.argsort(influences[:total_active_feats], descending=True).cpu() + queue_size = min(update_interval * batch_size, actual_max_feature_nodes - n_visited) + pending = feature_rank[~visited[feature_rank]][:queue_size] + + queue = [pending[i : i + batch_size] for i in range(0, len(pending), batch_size)] + + for idx_batch in queue: + n_visited += len(idx_batch) + + rows = ctx.compute_batch( + layers=feat_layers[idx_batch], + positions=feat_pos[idx_batch], + inject_values=ctx.encoder_vecs[idx_batch], + retain_graph=n_visited < actual_max_feature_nodes, + ) + + end = min(st + batch_size, st + rows.shape[0]) + edge_matrix[st:end, :logit_offset] = rows.cpu() + row_to_node_index[st:end] = idx_batch + visited[idx_batch] = True + st = end + pbar.update(len(idx_batch)) + + pbar.close() + logger.info(f"Feature attributions completed in {time.time() - phase4_start:.2f}s") + + # Phase 5: packaging graph + selected_features = torch.where(visited)[0] + if actual_max_feature_nodes < total_active_feats: + non_feature_nodes = torch.arange(total_active_feats, total_nodes) + col_read = torch.cat([selected_features, non_feature_nodes]) + edge_matrix = edge_matrix[:, col_read] + + # sort rows such that features are in order + edge_matrix = edge_matrix[row_to_node_index.argsort()] + final_node_count = edge_matrix.shape[1] + full_edge_matrix = torch.zeros(final_node_count, final_node_count) + full_edge_matrix[:actual_max_feature_nodes] = edge_matrix[:actual_max_feature_nodes] + full_edge_matrix[-n_logits:] = edge_matrix[actual_max_feature_nodes:] + + graph = Graph( + input_string=model.tokenizer.decode(input_ids), + input_tokens=input_ids, + attribution_targets=targets, + active_features=activation_matrix.indices().T, + activation_values=activation_matrix.values(), + selected_features=selected_features, + adjacency_matrix=full_edge_matrix.detach(), + cfg=model.config, + scan=model.scan, + ) + + total_time = time.time() - start_time + logger.info(f"Attribution completed in {total_time:.2f}s") + + return graph diff --git a/circuit_tracer/attribution/attribute_transformerlens.py b/circuit_tracer/attribution/attribute_transformerlens.py new file mode 100644 index 00000000..efd57d39 --- /dev/null +++ b/circuit_tracer/attribution/attribute_transformerlens.py @@ -0,0 +1,285 @@ +""" +Build an **attribution graph** that captures the *direct*, *linear* effects +between features and next-token logits for a *prompt-specific* +**local replacement model** using the TransformerLens backend. + +High-level algorithm (matches the 2025 ``Attribution Graphs`` paper): +https://transformer-circuits.pub/2025/attribution-graphs/methods.html + +1. **Local replacement model** - we configure gradients to flow only through + linear components of the network, effectively bypassing attention mechanisms, + MLP non-linearities, and layer normalization scales. +2. **Forward pass** - record residual-stream activations and mark every active + feature. +3. **Backward passes** - for each source node (feature or logit), inject a + *custom* gradient that selects its encoder/decoder direction. Because the + model is linear in the residual stream under our freezes, this contraction + equals the *direct effect* A_{s->t}. +4. **Assemble graph** - store edge weights in a dense matrix and package a + ``Graph`` object. Downstream utilities can *prune* the graph to the subset + needed for interpretation. +""" + +import logging +import time +from collections.abc import Sequence +from typing import Literal + +import torch +from tqdm import tqdm + +from circuit_tracer.attribution.targets import AttributionTargets +from circuit_tracer.graph import Graph, compute_partial_influences +from circuit_tracer.replacement_model.replacement_model_transformerlens import ( + TransformerLensReplacementModel, +) +from circuit_tracer.utils.disk_offload import offload_modules + + +def attribute( + prompt: str | torch.Tensor | list[int], + model: TransformerLensReplacementModel, + *, + attribution_targets: ( + Sequence[tuple[str, float, torch.Tensor] | int | str] | torch.Tensor | None + ) = None, + max_n_logits: int = 10, + desired_logit_prob: float = 0.95, + batch_size: int = 512, + max_feature_nodes: int | None = None, + offload: Literal["cpu", "disk", None] = None, + verbose: bool = False, + update_interval: int = 4, +) -> Graph: + """Compute an attribution graph for *prompt* using TransformerLens backend. + + Args: + prompt: Text, token ids, or tensor - will be tokenized if str. + model: Frozen ``TransformerLensReplacementModel`` + attribution_targets: Flexible attribution target specification in one of several formats: + - None: Auto-select salient logits based on probability threshold + - torch.Tensor: Tensor of token indices + - Sequence[tuple[str, float, torch.Tensor] | int | str]: Sequence where + each element can be: + * int or str: Token ID/string (auto-resolves probability and + unembed vector) + * tuple[str, float, torch.Tensor]: Fully specified logit spec with + arbitrary string tokens (or functions thereof) that may not be in + vocabulary + max_n_logits: Max number of logit nodes (used when attribution_targets is None). + desired_logit_prob: Keep logits until cumulative prob >= this value + (used when attribution_targets is None). + batch_size: How many source nodes to process per backward pass. + max_feature_nodes: Max number of feature nodes to include in the graph. + offload: Method for offloading model parameters to save memory. + Options are "cpu" (move to CPU), "disk" (save to disk), + or None (no offloading). + verbose: Whether to show progress information. + update_interval: Number of batches to process before updating the feature ranking. + + Returns: + Graph: Fully dense adjacency (unpruned). + """ + + logger = logging.getLogger("attribution") + logger.propagate = False + handler = None + if verbose and not logger.handlers: + handler = logging.StreamHandler() + formatter = logging.Formatter("%(message)s") + handler.setFormatter(formatter) + logger.addHandler(handler) + logger.setLevel(logging.INFO) + else: + logger.setLevel(logging.WARNING) + + offload_handles = [] + try: + return _run_attribution( + model=model, + prompt=prompt, + attribution_targets=attribution_targets, + max_n_logits=max_n_logits, + desired_logit_prob=desired_logit_prob, + batch_size=batch_size, + max_feature_nodes=max_feature_nodes, + offload=offload, + verbose=verbose, + offload_handles=offload_handles, + update_interval=update_interval, + logger=logger, + ) + finally: + for reload_handle in offload_handles: + reload_handle() + + if handler: + logger.removeHandler(handler) + + +def _run_attribution( + model, + prompt, + attribution_targets, + max_n_logits, + desired_logit_prob, + batch_size, + max_feature_nodes, + offload, + verbose, + offload_handles, + logger, + update_interval=4, +): + start_time = time.time() + # Phase 0: precompute + logger.info("Phase 0: Precomputing activations and vectors") + phase_start = time.time() + input_ids = model.ensure_tokenized(prompt) + + ctx = model.setup_attribution(input_ids) + activation_matrix = ctx.activation_matrix + + logger.info(f"Precomputation completed in {time.time() - phase_start:.2f}s") + logger.info(f"Found {ctx.activation_matrix._nnz()} active features") + + if offload: + offload_handles += offload_modules(model.transcoders, offload) + + # Phase 1: forward pass + logger.info("Phase 1: Running forward pass") + phase_start = time.time() + with ctx.install_hooks(model): + residual = model.forward(input_ids.expand(batch_size, -1), stop_at_layer=model.cfg.n_layers) + ctx._resid_activations[-1] = model.ln_final(residual) + logger.info(f"Forward pass completed in {time.time() - phase_start:.2f}s") + + if offload: + offload_handles += offload_modules([block.mlp for block in model.blocks], offload) + + # Phase 2: build input vector list + logger.info("Phase 2: Building input vectors") + phase_start = time.time() + feat_layers, feat_pos, _ = activation_matrix.indices() + n_layers, n_pos, _ = activation_matrix.shape + total_active_feats = activation_matrix._nnz() + + targets = AttributionTargets( + attribution_targets=attribution_targets, + logits=ctx.logits[0, -1], + unembed_proj=model.unembed.W_U, + tokenizer=model.tokenizer, + max_n_logits=max_n_logits, + desired_logit_prob=desired_logit_prob, + ) + + if attribution_targets is None: + logger.info( + f"Selected {len(targets)} logits with cumulative probability " + f"{targets.logit_probabilities.sum().item():.4f}" + ) + + if offload: + offload_handles += offload_modules([model.unembed, model.embed], offload) + + logit_offset = len(feat_layers) + (n_layers + 1) * n_pos + n_logits = len(targets) + total_nodes = logit_offset + n_logits + + max_feature_nodes = min(max_feature_nodes or total_active_feats, total_active_feats) + logger.info(f"Will include {max_feature_nodes} of {total_active_feats} feature nodes") + + edge_matrix = torch.zeros(max_feature_nodes + n_logits, total_nodes) + # Maps row indices in edge_matrix to original feature/node indices + # First populated with logit node IDs, then feature IDs in attribution order + row_to_node_index = torch.zeros(max_feature_nodes + n_logits, dtype=torch.int32) + logger.info(f"Input vectors built in {time.time() - phase_start:.2f}s") + + # Phase 3: logit attribution + logger.info("Phase 3: Computing logit attributions") + phase_start = time.time() + for i in range(0, len(targets), batch_size): + batch = targets.logit_vectors[i : i + batch_size] + rows = ctx.compute_batch( + layers=torch.full((batch.shape[0],), n_layers), + positions=torch.full((batch.shape[0],), n_pos - 1), + inject_values=batch, + ) + edge_matrix[i : i + batch.shape[0], :logit_offset] = rows.cpu() + row_to_node_index[i : i + batch.shape[0]] = ( + torch.arange(i, i + batch.shape[0]) + logit_offset + ) + logger.info(f"Logit attributions completed in {time.time() - phase_start:.2f}s") + + # Phase 4: feature attribution + logger.info("Phase 4: Computing feature attributions") + phase_start = time.time() + st = n_logits + visited = torch.zeros(total_active_feats, dtype=torch.bool) + n_visited = 0 + + pbar = tqdm(total=max_feature_nodes, desc="Feature influence computation", disable=not verbose) + + while n_visited < max_feature_nodes: + if max_feature_nodes == total_active_feats: + pending = torch.arange(total_active_feats) + else: + influences = compute_partial_influences( + edge_matrix[:st], targets.logit_probabilities, row_to_node_index[:st] + ) + feature_rank = torch.argsort(influences[:total_active_feats], descending=True).cpu() + queue_size = min(update_interval * batch_size, max_feature_nodes - n_visited) + pending = feature_rank[~visited[feature_rank]][:queue_size] + + queue = [pending[i : i + batch_size] for i in range(0, len(pending), batch_size)] + + for idx_batch in queue: + n_visited += len(idx_batch) + + rows = ctx.compute_batch( + layers=feat_layers[idx_batch], + positions=feat_pos[idx_batch], + inject_values=ctx.encoder_vecs[idx_batch], + retain_graph=n_visited < max_feature_nodes, + ) + + end = min(st + batch_size, st + rows.shape[0]) + edge_matrix[st:end, :logit_offset] = rows.cpu() + row_to_node_index[st:end] = idx_batch + visited[idx_batch] = True + st = end + pbar.update(len(idx_batch)) + + pbar.close() + logger.info(f"Feature attributions completed in {time.time() - phase_start:.2f}s") + + # Phase 5: packaging graph + selected_features = torch.where(visited)[0] + if max_feature_nodes < total_active_feats: + non_feature_nodes = torch.arange(total_active_feats, total_nodes) + col_read = torch.cat([selected_features, non_feature_nodes]) + edge_matrix = edge_matrix[:, col_read] + + # sort rows such that features are in order + edge_matrix = edge_matrix[row_to_node_index.argsort()] + final_node_count = edge_matrix.shape[1] + full_edge_matrix = torch.zeros(final_node_count, final_node_count) + full_edge_matrix[:max_feature_nodes] = edge_matrix[:max_feature_nodes] + full_edge_matrix[-n_logits:] = edge_matrix[max_feature_nodes:] + + graph = Graph( + input_string=model.tokenizer.decode(input_ids), + input_tokens=input_ids, + attribution_targets=targets, + active_features=activation_matrix.indices().T, + activation_values=activation_matrix.values(), + selected_features=selected_features, + adjacency_matrix=full_edge_matrix, + cfg=model.cfg, + scan=model.scan, + ) + + total_time = time.time() - start_time + logger.info(f"Attribution completed in {total_time:.2f}s") + + return graph diff --git a/circuit_tracer/attribution/context_nnsight.py b/circuit_tracer/attribution/context_nnsight.py new file mode 100644 index 00000000..63f5e7d3 --- /dev/null +++ b/circuit_tracer/attribution/context_nnsight.py @@ -0,0 +1,215 @@ +""" +Attribution context for managing hooks during attribution computation. +""" + +import weakref +from typing import TYPE_CHECKING + +import numpy as np +import torch +from einops import einsum + + +if TYPE_CHECKING: + from circuit_tracer.replacement_model.replacement_model_nnsight import ( + NNSightReplacementModel, + ) + + +class AttributionContext: + """Manage hooks for computing attribution rows. + + This helper caches residual-stream activations **(forward pass)** and then + registers backward hooks that populate a write-only buffer with + *direct-effect rows* **(backward pass)**. + + The buffer layout concatenates rows for **feature nodes**, **error nodes**, + **token-embedding nodes** + + Args: + activation_matrix (torch.sparse.Tensor): + Sparse `(n_layers, n_pos, n_features)` tensor indicating **which** + features fired at each layer/position. + error_vectors (torch.Tensor): + `(n_layers, n_pos, d_model)` - *residual* the CLT / PLT failed to + reconstruct ("error nodes"). + token_vectors (torch.Tensor): + `(n_pos, d_model)` - embeddings of the prompt tokens. + decoder_vectors (torch.Tensor): + `(total_active_features, d_model)` - decoder rows **only for active + features**, already multiplied by feature activations so they + represent a_s * W^dec. + """ + + def __init__( + self, + activation_matrix: torch.sparse.Tensor, # type: ignore + error_vectors: torch.Tensor, + token_vectors: torch.Tensor, + decoder_vecs: torch.Tensor, + encoder_vecs: torch.Tensor, + encoder_to_decoder_map: torch.Tensor, + decoder_locations: torch.Tensor, + logits: torch.Tensor, + ) -> None: + n_layers, n_pos, _ = activation_matrix.shape + + # Forward-pass cache + self._resid_activations: list[torch.Tensor] = [] + self._feature_output_activations: list[torch.Tensor] = [] + self._batch_buffer: torch.Tensor | None = None + self.n_layers: int = n_layers + + self.logits = logits + self.activation_matrix = activation_matrix + self.error_vectors = error_vectors + self.token_vectors = token_vectors + self.decoder_vecs = decoder_vecs + self.encoder_vecs = encoder_vecs + + self.encoder_to_decoder_map = encoder_to_decoder_map + self.decoder_locations = decoder_locations + + total_active_feats = activation_matrix._nnz() + self._row_size: int = total_active_feats + (n_layers + 1) * n_pos # + logits later + + def cache_residual(self, model: "NNSightReplacementModel", tracer, barrier=None): + """Cache the model's residual for use in the attribution context.""" + with tracer.invoke(): + for feature_input_loc in model.feature_input_locs: + self._resid_activations.append(feature_input_loc.output) # type: ignore + + self._resid_activations.append(model.pre_logit_location.output.last_hidden_state) # type: ignore + + with tracer.invoke(): + self._feature_output_activations.append(model.embed_location.output) # type: ignore + for feature_output_loc_ in model.feature_output_locs: + if barrier: + barrier() + + self._feature_output_activations.append(feature_output_loc_.output) # type: ignore + + def compute_score( + self, + grads: torch.Tensor, + output_vecs: torch.Tensor, + write_index: slice, + read_index: slice | np.ndarray = np.s_[:], + ) -> None: + """ + Factory that contracts *gradients* with an **output vector set**. + The hook computes A_{s->t} and accumulates the result into an in-place buffer row. + """ + + proxy = weakref.proxy(self) + proxy._batch_buffer[write_index] += einsum( + grads[read_index], + # grads.to(output_vecs.dtype)[read_index], + output_vecs, + "batch position d_model, position d_model -> position batch", + ) + + def compute_feature_attributions(self, layer, grads): + nnz_layers, nnz_positions = self.decoder_locations + + # Feature nodes - use decoder_locations to find decoders that write to this layer + layer_mask = nnz_layers == layer + if layer_mask.any(): + self.compute_score( + grads, + self.decoder_vecs[layer_mask], + write_index=self.encoder_to_decoder_map[layer_mask], # type: ignore + read_index=np.s_[:, nnz_positions[layer_mask]], # type: ignore + ) + + def compute_error_attributions(self, layer, grads): + _, n_pos, _ = self.activation_matrix.shape + + # Error nodes + def error_offset(layer: int) -> int: # starting row for this layer + return self.activation_matrix._nnz() + layer * n_pos + + self.compute_score( + grads, + self.error_vectors[layer], + write_index=np.s_[error_offset(layer) : error_offset(layer + 1)], + ) + + def compute_token_attributions(self, grads): + n_layers, n_pos, _ = self.activation_matrix.shape + + # Token-embedding nodes + def error_offset(layer: int) -> int: # starting row for this layer + return self.activation_matrix._nnz() + layer * n_pos + + tok_start = error_offset(n_layers) + self.compute_score( + grads, + self.token_vectors, + write_index=np.s_[tok_start : tok_start + n_pos], + ) + + def compute_batch( + self, + layers: torch.Tensor, + positions: torch.Tensor, + inject_values: torch.Tensor, + retain_graph: bool = True, + ) -> torch.Tensor: + """Return attribution rows for a batch of (layer, pos) nodes. + + The routine overrides gradients at **exact** residual-stream locations + triggers one backward pass, and copies the rows from the internal buffer. + + Args: + layers: 1-D tensor of layer indices *l* for the source nodes. + positions: 1-D tensor of token positions *c* for the source nodes. + inject_values: `(batch, d_model)` tensor with outer product + a_s * W^(enc/dec) to inject as custom gradient. + + Returns: + torch.Tensor: ``(batch, row_size)`` matrix - one row per node. + """ + + batch_size = self._resid_activations[0].shape[0] + self._batch_buffer = torch.zeros( + self._row_size, + batch_size, + dtype=inject_values.dtype, + device=inject_values.device, + ) + + # Custom gradient injection (per-layer registration) + batch_idx = torch.arange(len(layers), device=layers.device) + + def _inject(grad_point, *, batch_indices, pos_indices, values): + grads_out = grad_point.grad.clone() + grads_out.index_put_((batch_indices, pos_indices), values.to(grads_out.dtype)) + grad_point.grad = grads_out + + layers_in_batch = sorted(layers.unique().tolist(), reverse=True) + + last_layer = max(layers_in_batch) + with self._resid_activations[last_layer].backward( + gradient=torch.zeros_like(self._resid_activations[last_layer]), + retain_graph=retain_graph, + ): + for layer in reversed(range(last_layer + 1)): + if layer != last_layer: + grad = self._feature_output_activations[layer + 1].grad.clone() # type:ignore + self.compute_feature_attributions(layer, grad) + self.compute_error_attributions(layer, grad) + + mask = layers == layer + if mask.any(): + _inject( + grad_point=self._resid_activations[layer], + batch_indices=batch_idx[mask], + pos_indices=positions[mask], + values=inject_values[mask], + ) + + self.compute_token_attributions(self._feature_output_activations[0].grad) + + buf, self._batch_buffer = self._batch_buffer, None + return buf.T[: len(layers)] diff --git a/circuit_tracer/attribution/context.py b/circuit_tracer/attribution/context_transformerlens.py similarity index 96% rename from circuit_tracer/attribution/context.py rename to circuit_tracer/attribution/context_transformerlens.py index 1c0ef3b4..bb9b33c1 100644 --- a/circuit_tracer/attribution/context.py +++ b/circuit_tracer/attribution/context_transformerlens.py @@ -5,8 +5,7 @@ import contextlib import weakref from functools import partial -from typing import TYPE_CHECKING -from collections.abc import Callable +from typing import TYPE_CHECKING, Callable import numpy as np import torch @@ -14,7 +13,9 @@ from transformer_lens.hook_points import HookPoint if TYPE_CHECKING: - from circuit_tracer.replacement_model import ReplacementModel + from circuit_tracer.replacement_model.replacement_model_transformerlens import ( + TransformerLensReplacementModel, + ) class AttributionContext: @@ -44,7 +45,7 @@ class AttributionContext: def __init__( self, - activation_matrix: torch.Tensor, + activation_matrix: torch.sparse.Tensor, # type: ignore error_vectors: torch.Tensor, token_vectors: torch.Tensor, decoder_vecs: torch.Tensor, @@ -156,7 +157,7 @@ def error_offset(layer: int) -> int: # starting row for this layer return feature_hooks + error_hooks + token_hook @contextlib.contextmanager - def install_hooks(self, model: "ReplacementModel"): + def install_hooks(self, model: "TransformerLensReplacementModel"): """Context manager instruments the hooks for the forward and backward passes.""" with model.hooks( fwd_hooks=self._caching_hooks(model.feature_input_hook), # type: ignore diff --git a/circuit_tracer/graph.py b/circuit_tracer/graph.py index d4fd7bcf..326e1cdd 100644 --- a/circuit_tracer/graph.py +++ b/circuit_tracer/graph.py @@ -1,12 +1,21 @@ """Graph data structures for attribution results.""" -from typing import NamedTuple +from __future__ import annotations + +from typing import TYPE_CHECKING, NamedTuple import warnings import torch -from transformer_lens import HookedTransformerConfig -from circuit_tracer.attribution.targets import AttributionTargets, LogitTarget +from circuit_tracer.utils.tl_nnsight_mapping import ( + convert_nnsight_config_to_transformerlens, + UnifiedConfig, +) +from circuit_tracer.utils import get_default_device +from circuit_tracer.attribution.targets import LogitTarget + +if TYPE_CHECKING: + from circuit_tracer.attribution.targets import AttributionTargets class Graph: @@ -19,7 +28,7 @@ class Graph: activation_values: torch.Tensor logit_probabilities: torch.Tensor vocab_size: int - cfg: HookedTransformerConfig + cfg: UnifiedConfig scan: str | list[str] | None n_pos: int @@ -29,7 +38,7 @@ def __init__( input_tokens: torch.Tensor, active_features: torch.Tensor, adjacency_matrix: torch.Tensor, - cfg: HookedTransformerConfig, + cfg, selected_features: torch.Tensor, activation_values: torch.Tensor, scan: str | list[str] | None = None, @@ -101,7 +110,8 @@ def __init__( self.input_string = input_string self.adjacency_matrix = adjacency_matrix - self.cfg = cfg + # Convert cfg to UnifiedConfig (handles both HookedTransformerConfig and NNSight configs) + self.cfg = convert_nnsight_config_to_transformerlens(cfg) self.n_pos = len(input_tokens) self.active_features = active_features self.input_tokens = input_tokens @@ -390,3 +400,49 @@ def compute_graph_scores(graph: Graph) -> tuple[float, float]: completeness_score = (non_error_fractions * output_influence).sum() / output_influence.sum() return replacement_score.item(), completeness_score.item() + + +def compute_partial_influences( + edge_matrix: torch.Tensor, + logit_p: torch.Tensor, + row_to_node_index: torch.Tensor, + max_iter: int = 128, + device=None, +): + """Compute partial influences using power iteration method. + + This function calculates the influence of each node on the output logits + based on the edge weights in the graph. + + Args: + edge_matrix: The edge weight matrix. + logit_p: The logit probabilities. + row_to_node_index: Mapping from row indices to node indices. + max_iter: Maximum number of iterations for convergence. + device: Device to perform computation on. + + Returns: + torch.Tensor: Influence values for each node. + + Raises: + RuntimeError: If computation fails to converge within max_iter. + """ + device = device or get_default_device() + + normalized_matrix = torch.empty_like(edge_matrix, device=device).copy_(edge_matrix) + normalized_matrix = normalized_matrix.abs_() + normalized_matrix /= normalized_matrix.sum(dim=1, keepdim=True).clamp(min=1e-8) + + influences = torch.zeros(edge_matrix.shape[1], device=normalized_matrix.device) + prod = torch.zeros(edge_matrix.shape[1], device=normalized_matrix.device) + prod[-len(logit_p) :] = logit_p + + for _ in range(max_iter): + prod = prod[row_to_node_index] @ normalized_matrix + if not prod.any(): + break + influences += prod + else: + raise RuntimeError("Failed to converge") + + return influences diff --git a/circuit_tracer/replacement_model/__init__.py b/circuit_tracer/replacement_model/__init__.py new file mode 100644 index 00000000..f99af378 --- /dev/null +++ b/circuit_tracer/replacement_model/__init__.py @@ -0,0 +1,9 @@ +""" +ReplacementModel implementations for different backends. +""" + +from .replacement_model import ReplacementModel + +__all__ = [ + "ReplacementModel", +] diff --git a/circuit_tracer/replacement_model/replacement_model.py b/circuit_tracer/replacement_model/replacement_model.py new file mode 100644 index 00000000..ad1a81fa --- /dev/null +++ b/circuit_tracer/replacement_model/replacement_model.py @@ -0,0 +1,164 @@ +""" +Unified ReplacementModel interface that supports both nnsight and transformerlens backends. +""" + +from typing import Literal +import torch + +from circuit_tracer.transcoder import TranscoderSet +from circuit_tracer.transcoder.cross_layer_transcoder import CrossLayerTranscoder +from circuit_tracer.utils import get_default_device +from circuit_tracer.utils.hf_utils import load_transcoder_from_hub + +Backend = Literal["nnsight", "transformerlens"] + + +class ReplacementModel: + """ + Unified ReplacementModel interface that supports both nnsight and transformerlens backends. + + This class acts as a factory that creates the appropriate backend-specific ReplacementModel + based on the backend parameter. + """ + + @classmethod + def from_pretrained( + cls, + model_name: str, + transcoder_set: str, + backend: Backend = "transformerlens", + device: torch.device | None = None, + dtype: torch.dtype = torch.float32, + lazy_encoder: bool = False, + lazy_decoder: bool = True, + **kwargs, + ): + """Create a ReplacementModel from model name and transcoder config. + + Args: + model_name (str): The name of the pretrained transformer model + transcoder_set (str): Either a predefined transcoder set name, or a config file + backend (Backend): Which backend to use - "nnsight" or "transformerlens" + device (Optional[torch.device]): Device to load the model on + dtype (Optional[torch.dtype]): Data type for the model + lazy_encoder (bool): Whether to lazily load encoder weights (default: False) + lazy_decoder (bool): Whether to lazily load decoder weights (default: True) + **kwargs: Additional arguments passed to the backend-specific implementation + + Returns: + ReplacementModel: The loaded ReplacementModel using the specified backend + """ + if device is None: + device = get_default_device() + + transcoders, _ = load_transcoder_from_hub( # type:ignore + transcoder_set, + device=device, + dtype=dtype, + lazy_encoder=lazy_encoder, + lazy_decoder=lazy_decoder, + ) + + return cls.from_pretrained_and_transcoders( + model_name=model_name, + transcoders=transcoders, + backend=backend, + device=device, + dtype=dtype, + **kwargs, + ) + + @classmethod + def from_pretrained_and_transcoders( + cls, + model_name: str, + transcoders: TranscoderSet | CrossLayerTranscoder, + backend: Backend = "transformerlens", + device: torch.device | None = None, + dtype: torch.dtype = torch.float32, + **kwargs, + ): + """Create a ReplacementModel from model name and transcoder objects. + + Args: + model_name (str): The name of the pretrained transformer model + transcoders (Union[TranscoderSet, CrossLayerTranscoder]): The transcoder set or + cross-layer transcoder + backend (Backend): Which backend to use - "nnsight" or "transformerlens" + device (Optional[torch.device]): Device to load the model on + dtype (Optional[torch.dtype]): Data type for the model + **kwargs: Additional arguments passed to the backend-specific implementation + + Returns: + ReplacementModel: The loaded ReplacementModel using the specified backend + """ + if device is None: + device = get_default_device() + + if backend == "nnsight": + # Import backend-specific implementations + from .replacement_model_nnsight import NNSightReplacementModel + + return NNSightReplacementModel.from_pretrained_and_transcoders( + model_name=model_name, + transcoders=transcoders, + device=device, + dtype=dtype, + **kwargs, + ) + elif backend == "transformerlens": + from .replacement_model_transformerlens import ( + TransformerLensReplacementModel, + ) + + return TransformerLensReplacementModel.from_pretrained_and_transcoders( + model_name=model_name, + transcoders=transcoders, + device=device, + dtype=dtype, + **kwargs, + ) + else: + raise ValueError(f"Unknown backend: {backend}. Must be 'nnsight' or 'transformerlens'") + + @classmethod + def from_config( + cls, + config, + transcoders: TranscoderSet | CrossLayerTranscoder, + backend: Backend = "transformerlens", + **kwargs, + ): + """Create a ReplacementModel from a config and transcoder objects. + + Args: + config: Model configuration (AutoConfig for nnsight, HookedTransformerConfig + for transformerlens) + transcoders (Union[TranscoderSet, CrossLayerTranscoder]): The transcoder set or + cross-layer transcoder + backend (Backend): Which backend to use - "nnsight" or "transformerlens" + **kwargs: Additional arguments passed to the backend-specific implementation + + Returns: + ReplacementModel: The loaded ReplacementModel using the specified backend + """ + if backend == "nnsight": + from .replacement_model_nnsight import NNSightReplacementModel + + return NNSightReplacementModel.from_config( + config=config, + transcoders=transcoders, + **kwargs, + ) + elif backend == "transformerlens": + from .replacement_model_transformerlens import ( + TransformerLensReplacementModel, + ) + + return TransformerLensReplacementModel.from_config( + config=config, + transcoders=transcoders, + **kwargs, + ) + else: + raise ValueError(f"Unknown backend: {backend}. Must be 'nnsight' or 'transformerlens'") diff --git a/circuit_tracer/replacement_model/replacement_model_nnsight.py b/circuit_tracer/replacement_model/replacement_model_nnsight.py new file mode 100644 index 00000000..4510bd51 --- /dev/null +++ b/circuit_tracer/replacement_model/replacement_model_nnsight.py @@ -0,0 +1,1063 @@ +import warnings +from collections import defaultdict +from collections.abc import Sequence +from contextlib import contextmanager +from functools import partial +from typing import Callable, Iterator, Literal + +import torch +from torch import nn +from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer +from nnsight.intervention.tracing.tracer import Barrier +from nnsight import LanguageModel, Envoy, save, CONFIG as NNSIGHT_CONFIG + +from circuit_tracer.attribution.context_nnsight import AttributionContext +from circuit_tracer.transcoder import TranscoderSet +from circuit_tracer.transcoder.cross_layer_transcoder import CrossLayerTranscoder +from circuit_tracer.utils import get_default_device +from circuit_tracer.utils.hf_utils import load_transcoder_from_hub +from circuit_tracer.utils.tl_nnsight_mapping import ( + get_mapping, + convert_nnsight_config_to_transformerlens, +) + +NNSIGHT_CONFIG.APP.PYMOUNT = False +NNSIGHT_CONFIG.APP.CROSS_INVOKER = False +NNSIGHT_CONFIG.APP.TRACE_CACHING = True + +# Type definition for an intervention tuple (layer, position, feature_idx, value) +Intervention = tuple[ + int | torch.Tensor, + int | slice | torch.Tensor, + int | torch.Tensor, + int | float | torch.Tensor, +] + + +class EnvoyWrapper: + def __init__(self, envoy, input_output: Literal["input", "output"]): + self.envoy = envoy + self.input_output = input_output + + @property + def output(self): + return getattr(self.envoy, self.input_output) + + @output.setter + def output(self, value): + setattr(self.envoy, self.input_output, value) + + +class NNSightReplacementModel(LanguageModel): + d_transcoder: int + transcoders: TranscoderSet | CrossLayerTranscoder + feature_input_locs: list[nn.Module] # type: ignore + feature_output_locs: list[nn.Module] # type: ignore + attention_locs: list[nn.Module] # type: ignore + layernorm_scale_locs: list[nn.Module] # type: ignore + pre_logit_location: nn.Module # type: ignore + embed_loc: nn.Module + unembed_loc: nn.Module + skip_transcoder: bool + scan: str | list[str] | None + backend: Literal["nnsight"] + + @classmethod + def from_config( + cls, + config: AutoConfig, + transcoders: TranscoderSet | CrossLayerTranscoder, # Accept both + **kwargs, + ) -> "NNSightReplacementModel": + """Create a NNSightReplacementModel from a given AutoConfig and TranscoderSet + + Args: + config (AutoConfig): the config of the HuggingFace transformer + transcoders (TranscoderSet): The transcoder set with configuration + + Returns: + NNSightReplacementModel: The loaded NNSightReplacementModel + """ + config._attn_implementation = "eager" # type: ignore + hf_model = AutoModelForCausalLM.from_config(config) + hf_tokenizer = AutoTokenizer.from_pretrained(config._name_or_path) # type: ignore + + model = cls(hf_model, tokenizer=hf_tokenizer, dispatch=True, **kwargs) + model.config = config # type: ignore + model._configure_replacement_model(transcoders) + return model + + @classmethod + def from_pretrained_and_transcoders( + cls, + model_name: str, + transcoders: TranscoderSet | CrossLayerTranscoder, + device: torch.device | str = torch.device("cuda"), + dtype: torch.dtype = torch.float32, + **kwargs, + ) -> "NNSightReplacementModel": + """Create a NNSightReplacementModel from the name of HookedTransformer and TranscoderSet + + Args: + model_name (str): the name of the pretrained HookedTransformer + transcoders (TranscoderSet): The transcoder set with configuration + + Returns: + NNSightReplacementModel: The loaded NNSightReplacementModel + """ + # The goal is to build a ReplacementModel instance *using* the parent + # LanguageModel.__init__. Since we are in a `@classmethod`, we don't yet have + # an object (`self`) to pass to `super().__init__`. We create an _uninitialised_ + # instance with `__new__`, then run the parent initialiser on it. + + # 1. Allocate the instance without initialising it. + model = cls.__new__(cls) + # 2. Call the parent (LanguageModel) initializer on this instance. + + # Convert ``torch.device`` to a HF-compatible device map + if isinstance(device, torch.device): + if device.type == "cuda": + dev_entry = device.index if device.index is not None else 0 + else: + dev_entry = device.type # e.g. "cpu" + else: + # string inputs such as "cuda:1" or "cpu". + dev_str = str(device) + if dev_str.startswith("cuda"): + # "cuda" or "cuda:1" → extract index or default to 0 + parts = dev_str.split(":") + dev_entry = int(parts[1]) if len(parts) > 1 else 0 + else: + dev_entry = dev_str # "cpu" or other accelerator names + + device_map = {"": dev_entry} + + config = AutoConfig.from_pretrained(model_name) + if hasattr(config, "quantization_config"): + config.quantization_config["dequantize"] = True + + super(cls, model).__init__( + model_name, + config=config, + device_map=device_map, + dispatch=True, + dtype=dtype, + attn_implementation="eager", + ) + + model._configure_replacement_model(transcoders) + return model + + @classmethod + def from_pretrained( + cls, + model_name: str, + transcoder_set: str, + device: torch.device | None = None, + dtype: torch.dtype = torch.float32, + **kwargs, + ) -> "NNSightReplacementModel": + """Create a NNSightReplacementModel from model name and transcoder config + + Args: + model_name (str): the name of the pretrained HookedTransformer + transcoder_set (str): Either a predefined transcoder set name, or a config file + + Returns: + NNSightReplacementModel: The loaded NNSightReplacementModel + """ + if device is None: + device = get_default_device() + + transcoders, _ = load_transcoder_from_hub(transcoder_set, device=device, dtype=dtype) # type: ignore + + return cls.from_pretrained_and_transcoders( + model_name, + transcoders, + device=device, + dtype=dtype, + **kwargs, + ) + + @staticmethod + def _resolve_attr(root: object, attr_path: str): + """Resolves a dotted attribute path that can additionally contain Python-style + list indices, e.g. "model.layers[3].mlp". + + Args: + root (object): The object from which to start attribute resolution. + attr_path (str): Dotted path, optionally containing one level of + ``[idx]`` list/ModuleList access. + + Returns: + object: The resolved attribute. + """ + current = root + # Split on dots – each token may still contain an index expression. + for token in attr_path.split("."): + if not token: + continue # Guard against accidental empty tokens + if "[" in token and token.endswith("]"): + # e.g. "layers[3]" + attr_name, idx_str = token.split("[", 1) + idx = int(idx_str[:-1]) # strip trailing ] + current = getattr(current, attr_name)[idx] + else: + current = getattr(current, token) + return current + + def _configure_replacement_model( + self, + transcoder_set: TranscoderSet | CrossLayerTranscoder, + ): + self.backend = "nnsight" + self.eval() + self.cfg = convert_nnsight_config_to_transformerlens(self.config) + + transcoder_set.to(self.device, self.dtype) + self.transcoders = transcoder_set + self.skip_transcoder = transcoder_set.skip_connection + + # ------------------------------------------------------------------ + # Instead of eagerly resolving hook locations here (which can fail + # outside of a `self.trace` context when multiple `.source`s exist), + # we cache the *patterns* needed to resolve them and provide dynamic + # property accessors which resolve the hooks on-demand inside the + # appropriate trace context. + # ------------------------------------------------------------------ + nnsight_config = get_mapping(self.config.architectures[0]) # type: ignore + + self._feature_input_pattern, self._feature_input_io = nnsight_config.feature_hook_mapping[ + transcoder_set.feature_input_hook + ] + self._feature_output_pattern, _ = nnsight_config.feature_hook_mapping[ + transcoder_set.feature_output_hook + ] + + self._attention_pattern = nnsight_config.attention_location_pattern + # Ensure we consistently store LayerNorm scale patterns as a list. + self._layernorm_scale_patterns = nnsight_config.layernorm_scale_location_patterns + self._pre_logit_location = nnsight_config.pre_logit_location + self._embed_location = nnsight_config.embed_location + + # these are real weights, not envoys + self.embed_weight = self._resolve_attr(self, nnsight_config.embed_weight) + self.unembed_weight = self._resolve_attr(self, nnsight_config.unembed_weight) + self.scan = transcoder_set.scan + + # Make sure the replacement model is entirely frozen by default. + for param in self.parameters(): + param.requires_grad = False + + def configure_gradient_flow(self, tracer): + with tracer.invoke(): + self.embed_location.output.requires_grad = True # type: ignore + + with tracer.invoke(): + for freeze_loc in self.attention_locs: + freeze_loc.output = freeze_loc.output.detach() # type: ignore + + for layernorm_scale_locs_list in self.layernorm_scale_locs: + with tracer.invoke(): + for freeze_loc in layernorm_scale_locs_list: + freeze_loc.output = freeze_loc.output.detach() # type: ignore + + def configure_skip_connection(self, tracer, barrier=None): + transcoders = ( + self.transcoders._module if isinstance(self.transcoders, Envoy) else self.transcoders + ) + + with tracer.invoke(): + for layer, (feature_input_loc, feature_output_loc) in enumerate( + zip(self.feature_input_locs, self.feature_output_locs) + ): + if transcoders.skip_connection: # type: ignore + skip = transcoders.compute_skip(layer, feature_input_loc.output) # type: ignore + else: + skip = 0 * feature_input_loc.output.sum() # type: ignore + feature_output_loc.output = skip + (feature_output_loc.output - skip).detach() # type: ignore + if barrier: + barrier() + + def get_activation_fn( + self, + sparse: bool = False, + apply_activation_function: bool = True, + append: bool = False, + ) -> tuple[ + list[torch.Tensor], + Callable[ + [Barrier | None, set[int], Iterator[int] | None], tuple[torch.Tensor, torch.Tensor] + ], + ]: + activation_matrix = ( + [[] for _ in range(self.cfg.n_layers)] if append else [None] * self.cfg.n_layers + ) + + def fetch_activations( + barrier: Barrier | None = None, + barrier_layers: set[int] | None = None, + activation_layers: Iterator[int] | None = None, + ): + # special case to zero out user\n for gemmascope 2 (-it) transcoders + gemma_3_it = "gemma-3" in self.cfg.model_name and self.cfg.model_name.endswith("-it") + overlap = 0 + if gemma_3_it: + input_ids = self.input.squeeze(0) + ignore_prefix = torch.tensor( + [2, 105, 2364, 107], dtype=input_ids.dtype, device=input_ids.device + ) + min_len = min(len(input_ids), len(ignore_prefix)) + if min_len == 0: + overlap = 0 + else: + # Compare the overlapping portion + matches = input_ids[:min_len] == ignore_prefix[:min_len] + + # Find the first False (mismatch) + if matches.all(): + overlap = min_len + else: + overlap = matches.to(torch.int).argmin().item() + + layers = range(self.cfg.n_layers) if activation_layers is None else activation_layers + for layer in layers: + feature_input_loc = self.get_feature_input_loc(layer) + transcoder_acts = ( + self.transcoders._module.encode_layer( # type: ignore + feature_input_loc.output, + layer, + apply_activation_function=apply_activation_function, + ) + .detach() + .squeeze(0) + ) + + if not (append and len(activation_matrix[layer]) > 0): # type:ignore + transcoder_acts[0] = 0 + if gemma_3_it: + transcoder_acts[:overlap] = 0 + + if sparse: + transcoder_acts = transcoder_acts.to_sparse() + + if append: + activation_matrix[layer].append(transcoder_acts) # type: ignore + else: + activation_matrix[layer] = transcoder_acts # type: ignore + + if barrier is not None and barrier_layers is not None and layer in barrier_layers: + barrier() + + logits = save(self.output.logits) + + # If `activation_layers` is None we only need activations for certain + # layers during this forward pass, so avoid creating and saving the full cache. + + if activation_layers is not None: + activation_cache = None + else: + if append: + activation_cache = torch.stack( + [torch.cat(acts, dim=0) for acts in activation_matrix] + ) + else: + activation_cache = torch.stack(activation_matrix) # type: ignore + + if sparse: + activation_cache = activation_cache.coalesce() + + return logits, activation_cache + + return activation_matrix, fetch_activations # type: ignore + + def get_activations( + self, + inputs: str | torch.Tensor, + sparse: bool = False, + apply_activation_function: bool = True, + ) -> tuple[torch.Tensor, torch.Tensor]: + """Get the transcoder activations for a given prompt + + Args: + inputs (str | torch.Tensor): The inputs you want to get activations over + sparse (bool, optional): Whether to return a sparse tensor of activations. + Useful if d_transcoder is large. Defaults to False. + + Returns: + tuple[torch.Tensor, torch.Tensor]: the model logits on the inputs and the + associated activation cache + """ + _, fetch_activations = self.get_activation_fn( + sparse=sparse, apply_activation_function=apply_activation_function + ) + with torch.inference_mode(), self.trace(inputs): + logits, activation_cache = fetch_activations() # type:ignore + logits = save(logits) # type: ignore + activation_cache = save(activation_cache) # type: ignore + + return logits, activation_cache + + @contextmanager + def zero_softcap(self): + if hasattr(self.config, "final_logit_softcapping"): + current_softcap = self.config.final_logit_softcapping # type: ignore + try: + self.config.final_logit_softcapping = None # type: ignore + yield + finally: + self.config.final_logit_softcapping = current_softcap # type: ignore + elif hasattr(self.config, "text_config") and hasattr( + self.config.text_config, "final_logit_softcapping" + ): + current_softcap = self.config.text_config.final_logit_softcapping # type: ignore + try: + self.config.text_config.final_logit_softcapping = None # type: ignore + yield + finally: + self.config.text_config.final_logit_softcapping = current_softcap # type: ignore + else: + yield + + def ensure_tokenized(self, prompt: str | torch.Tensor | list[int]) -> torch.Tensor: + """Convert prompt to 1-D tensor of token ids with proper special token handling. + + This method ensures that a special token (BOS/PAD) is prepended to the input sequence. + The first token position in transformer models typically exhibits unusually high norm + and an excessive number of active features due to how models process the beginning of + sequences. By prepending a special token, we ensure that actual content tokens have + more consistent and interpretable feature activations, avoiding the artifacts present + at position 0. This prepended token is later ignored during attribution analysis. + + Args: + prompt: String, tensor, or list of token ids representing a single sequence + + Returns: + 1-D tensor of token ids with BOS/PAD token at the beginning + + Raises: + TypeError: If prompt is not str, tensor, or list + ValueError: If tensor has wrong shape (must be 1-D or 2-D with batch size 1) + """ + + if isinstance(prompt, str): + tokens = self.tokenizer( + prompt, return_tensors="pt", add_special_tokens=False + ).input_ids.squeeze(0) + elif isinstance(prompt, torch.Tensor): + tokens = prompt.squeeze() + elif isinstance(prompt, list): + tokens = torch.tensor(prompt, dtype=torch.long).squeeze() + else: + raise TypeError(f"Unsupported prompt type: {type(prompt)}") + + if tokens.ndim > 1: + raise ValueError(f"Tensor must be 1-D, got shape {tokens.shape}") + + # Check if a special token is already present at the beginning + if tokens[0] in self.tokenizer.all_special_ids: + return tokens.to(self.device) + + # Prepend a special token to avoid artifacts at position 0 + candidate_bos_token_ids = [ + self.tokenizer.bos_token_id, + self.tokenizer.pad_token_id, + self.tokenizer.eos_token_id, + ] + candidate_bos_token_ids += self.tokenizer.all_special_ids + + dummy_bos_token_id = next(filter(None, candidate_bos_token_ids)) + if dummy_bos_token_id is None: + warnings.warn( + "No suitable special token found for BOS token replacement. " + "The first token will be ignored.", + ) + else: + tokens = torch.cat([torch.tensor([dummy_bos_token_id], device=tokens.device), tokens]) + + return tokens.to(self.device) + + @torch.no_grad() + def setup_attribution(self, inputs: str | torch.Tensor): + """Precomputes the transcoder activations and error vectors, saving them and the + token embeddings. + + Args: + inputs (str): the inputs to attribute - hard coded to be a single string (no + batching) for now + """ + + if isinstance(inputs, str): + tokens = self.ensure_tokenized(inputs) + else: + tokens = inputs.squeeze() + + assert isinstance(tokens, torch.Tensor), "Tokens must be a tensor" + assert tokens.ndim == 1, "Tokens must be a 1D tensor" + + mlp_in_cache = [None] * self.cfg.n_layers + mlp_out_cache = [None] * self.cfg.n_layers + + transcoders = self.transcoders + + with self.trace(tokens): + mlp_in_cache, mlp_out_cache = [], [] + for feature_input_loc, feature_output_loc in zip( + self.feature_input_locs, self.feature_output_locs + ): + mlp_in_cache.append(feature_input_loc.output) + + # we expect a dummy dimension 0, but GPT-OSS doesn't have one, so we add it. + y = feature_output_loc.output + if y.ndim == 2: + y = y.unsqueeze(0) # type: ignore + mlp_out_cache.append(y) + + mlp_in_cache = save(torch.cat(mlp_in_cache, dim=0)) # type: ignore + mlp_out_cache = save(torch.cat(mlp_out_cache, dim=0)) # type: ignore + logits = save(self.output.logits) + + # special case to zero out user\n for gemmascope 2 (-it) transcoders + gemma_3_it = "gemma-3" in self.cfg.model_name and self.cfg.model_name.endswith("-it") + zero_positions = slice(0, 1) + if gemma_3_it: + ignore_prefix = torch.tensor( + [2, 105, 2364, 107], dtype=tokens.dtype, device=tokens.device + ) + min_len = min(len(tokens), len(ignore_prefix)) + if min_len == 0: + zero_positions = slice(0, 0) + else: + # Compare the overlapping portion + matches = tokens[:min_len] == ignore_prefix[:min_len] + + # Find the first False (mismatch) + if matches.all(): + zero_positions = slice(0, min_len) + else: + zero_positions = slice(0, matches.to(torch.int).argmin().item()) + + attribution_data = transcoders.compute_attribution_components(mlp_in_cache, zero_positions) # type: ignore + + # Compute error vectors + error_vectors = mlp_out_cache - attribution_data["reconstruction"] + + error_vectors[:, 0] = 0 + token_vectors = self.embed_weight[ # type: ignore + tokens + ].detach() # (n_pos, d_model) # type: ignore + + return AttributionContext( + activation_matrix=attribution_data["activation_matrix"], + logits=logits, + error_vectors=error_vectors, + token_vectors=token_vectors, + decoder_vecs=attribution_data["decoder_vecs"], + encoder_vecs=attribution_data["encoder_vecs"], + encoder_to_decoder_map=attribution_data["encoder_to_decoder_map"], + decoder_locations=attribution_data["decoder_locations"], + ) + + def setup_intervention_with_freeze( + self, inputs: str | torch.Tensor, constrained_layers: range | None = None + ) -> tuple[torch.Tensor, list[Callable]]: + """Sets up an intervention with either frozen attention + LayerNorm(default) or frozen + attention, LayerNorm, and MLPs, for constrained layers + + Args: + inputs (str | torch.Tensor): The inputs to intervene on + constrained_layers (range | None): Whether to apply interventions only to a + certain range. Mostly applicable to CLTs. If the given range includes + all model layers, we also freeze LayerNorm denominators to compute + direct effects. None means no constraints (iterative patching). + + Returns: + tuple[torch.Tensor, list[Callable]]: The freeze hooks needed to run the + desired intervention. + """ + + def get_locs_to_freeze(): + # This must be in a function invoked only within a trace context. Otherwise + # the `.source` attribute cannot be read twice. + locs_to_freeze = {"attention": self.attention_locs} + if constrained_layers: + if set(range(self.cfg.n_layers)).issubset(set(constrained_layers)): # type: ignore + for i, layernorm_freeze_loc in enumerate(self.layernorm_scale_locs): + locs_to_freeze[f"layernorm-{i}"] = layernorm_freeze_loc + if self.skip_transcoder: + locs_to_freeze["feature_input"] = self.feature_input_locs + locs_to_freeze["feature_output"] = self.feature_output_locs + return locs_to_freeze + + activation_matrix, activation_fn = self.get_activation_fn() + cache = {} + + # Somehow `self` can be replaced with an `EnvoyWrapper`, which causes issues. + # Use local references to avoid that problem. + transcoders = self.transcoders + skip_transcoder = self.skip_transcoder + + # get transcoder activations and values to freeze to + with self.trace() as tracer: + with tracer.invoke(inputs): + activation_fn() # type:ignore + dict_to_freeze = save(get_locs_to_freeze()) # type: ignore + for freeze_loc_name, loc_type_to_freeze in get_locs_to_freeze().items(): + with tracer.invoke(): + for layer, loc_to_freeze in enumerate(loc_type_to_freeze): + freeze_loc_output = loc_to_freeze.output + if freeze_loc_name != "feature_input": + freeze_loc_output = freeze_loc_output.detach() # type:ignore + cache[freeze_loc_name, layer] = save(freeze_loc_output) # type: ignore + + skip_diffs = {} + + def freeze_fn(freeze_loc_name, loc_type_to_freeze, direct_effects_barrier=None): + for layer, loc_to_freeze in enumerate(loc_type_to_freeze): + if freeze_loc_name == "feature_input": + # The MLP hook out freeze hook sets the value of the MLP to the value it + # had when run on the inputs normally. We subtract out the skip that + # corresponds to such a run, and add in the skip with direct effects. + frozen_skip = transcoders.compute_skip( # type: ignore + layer, cache["feature_input", layer] + ) + normal_skip = transcoders.compute_skip(layer, loc_to_freeze.output) # type: ignore + + skip_diffs[layer] = normal_skip - frozen_skip + + else: + if freeze_loc_name == "feature_output": + if layer not in constrained_layers: # type: ignore + continue + + original_outputs = loc_to_freeze.output + cached_values = cache[freeze_loc_name, layer] + + if isinstance(original_outputs, tuple): + assert isinstance(cached_values, tuple) + assert len(original_outputs) == len(cached_values) + for orig, cached in zip(original_outputs, cached_values): + assert orig.shape == cached.shape, ( + f"Activations shape {orig.shape} does not match cached values " + f"shape {cached.shape} at hook {loc_to_freeze.name}" + ) + else: + assert original_outputs.shape == cached_values.shape, ( + f"Activations shape {original_outputs.shape} != {cached_values.shape} " + f"at hook {loc_to_freeze.name}" + ) + + if freeze_loc_name == "feature_output" and skip_transcoder: + loc_to_freeze.output = cached_values + skip_diffs[layer] + else: + loc_to_freeze.output = cached_values + + if ( + freeze_loc_name == "feature_output" + and direct_effects_barrier + and (constrained_layers is None or layer in constrained_layers) + ): + direct_effects_barrier() + + return torch.stack(activation_matrix), [ + partial( + freeze_fn, + freeze_loc_name=freeze_loc_name, + loc_type_to_freeze=loc_type_to_freeze, + ) + for freeze_loc_name, loc_type_to_freeze in dict_to_freeze.items() + ] + + @torch.no_grad + def _perform_feature_intervention( + self, + inputs, + interventions: Sequence[Intervention], + activation_matrix: torch.Tensor, + original_activations: torch.Tensor | None, + activation_barrier, + direct_effects_barrier, + constrained_layers: range | None = None, + using_past_kv_cache_idx: int | None = None, + apply_activation_function: bool = True, + ): + interventions_by_layer = defaultdict(list) + for layer, pos, feature_idx, value in interventions: + layer = layer.item() if isinstance(layer, torch.Tensor) else layer + interventions_by_layer[layer].append((pos, feature_idx, value)) + + if using_past_kv_cache_idx is not None and using_past_kv_cache_idx > 0: + # We're generating one token at a time + n_pos = 1 + elif original_activations is not None: + n_pos = original_activations.size(1) + else: + n_pos = len(self.tokenizer(inputs).input_ids) + + layer_deltas = torch.zeros( + [self.cfg.n_layers, n_pos, self.cfg.d_model], + dtype=self.dtype, + device=self.device, + ) + for layer in range(self.cfg.n_layers): + if interventions_by_layer[layer]: + if constrained_layers: + # base deltas on original activations; don't let effects propagate + transcoder_activations = original_activations[layer].clone() # type: ignore + else: + activation_barrier() + # recompute deltas based on current activations + transcoder_activations = ( + activation_matrix[layer][-1] + if using_past_kv_cache_idx is not None + else activation_matrix[layer] + ) + if transcoder_activations.is_sparse: + transcoder_activations = transcoder_activations.to_dense() + + if not apply_activation_function: + transcoder_activations = self.transcoders.apply_activation_function( + layer, transcoder_activations.unsqueeze(0) + ).squeeze(0) + + activation_deltas = torch.zeros_like(transcoder_activations) + for pos, feature_idx, value in interventions_by_layer[layer]: + activation_deltas[pos, feature_idx] = ( + value - transcoder_activations[pos, feature_idx] + ) + + poss, feature_idxs = activation_deltas.nonzero(as_tuple=True) + new_values = activation_deltas[poss, feature_idxs] + + decoder_vectors = self.transcoders._module._get_decoder_vectors( # type: ignore + layer, feature_idxs + ) + + # Handle both 2D [n_feature_idxs, d_model] and 3D + # [n_feature_idxs, n_remaining_layers, d_model] cases + if decoder_vectors.ndim == 2: + # Single-layer transcoder case: [n_feature_idxs, d_model] + decoder_vectors = decoder_vectors * new_values.unsqueeze(1) + layer_deltas[layer].index_add_(0, poss, decoder_vectors) + else: + # Cross-layer transcoder case: [n_feature_idxs, n_remaining_layers, d_model] + decoder_vectors = decoder_vectors * new_values.unsqueeze(-1).unsqueeze(-1) + + # Transpose to [n_remaining_layers, n_feature_idxs, d_model] + decoder_vectors = decoder_vectors.transpose(0, 1) + + # Distribute decoder vectors across layers + n_remaining_layers = decoder_vectors.shape[0] + layer_deltas[-n_remaining_layers:].index_add_(1, poss, decoder_vectors) + + if constrained_layers is None or layer in constrained_layers: + if direct_effects_barrier: + direct_effects_barrier() + transcoder_output = self.get_feature_output_loc(layer).output # type: ignore + transcoder_output[:] = transcoder_output + layer_deltas[layer] # type: ignore + layer_deltas[layer] *= 0 + + return save(self.output.logits) + + @torch.no_grad + def feature_intervention( + self, + inputs: str | torch.Tensor, + interventions: Sequence[Intervention], + constrained_layers: range | None = None, + freeze_attention: bool = True, + apply_activation_function: bool = True, + sparse: bool = False, + return_activations: bool = True, + ) -> tuple[torch.Tensor, torch.Tensor | None]: + """Given the input, and a dictionary of features to intervene on, performs the + intervention, allowing all effects to propagate (optionally allowing its effects to + propagate through transcoders) + + Args: + input (_type_): the input prompt to intervene on + interventions (Sequence[Intervention]): A list of interventions to perform. + Each entry should be a tuple (layer, position, feature_idx, value) + constrained_layers (range | None): Whether to apply interventions only to a + certain layer range. Mostly applicable to CLTs. If the given range includes + all model layers, we also freeze LayerNorm denominators to compute direct + effects. None means no constraints (iterative patching). + apply_activation_function (bool): whether to apply the activation function when + recording the activations to be returned. This is useful to set to False for + testing purposes, as attribution predicts the change in pre-activation + feature values. + sparse (bool): whether to sparsify the activations in the returned cache. Setting + this to True will take up less memory, at the expense of slower interventions. + return_activations (bool): Whether to compute and return feature activations. If False, + activation computation is skipped for layers not being intervened on (when + constrained_layers is not set), saving time. Activations are not returned. + Defaults to True. + """ + activation_matrix, activation_fn = self.get_activation_fn( + apply_activation_function=apply_activation_function, sparse=sparse + ) + + if (freeze_attention or constrained_layers) and interventions: + original_activations, freeze_fns = self.setup_intervention_with_freeze( + inputs, constrained_layers=constrained_layers + ) + else: + original_activations, freeze_fns = None, [] + + intervention_layers = set() + for layer, _, _, _ in interventions: + if isinstance(layer, torch.Tensor): + layer = layer.item() + intervention_layers.add(layer) + + activation_layers = None if return_activations else sorted(list(intervention_layers)) # type:ignore + + with self.trace() as tracer: + activation_barrier = None if constrained_layers else tracer.barrier(2) + direct_effects_barrier = tracer.barrier(2) if constrained_layers else None + + with tracer.invoke(inputs): + _, activation_cache = activation_fn( + barrier=activation_barrier, # type:ignore + barrier_layers=intervention_layers, + activation_layers=activation_layers, + ) + activation_cache = save(activation_cache) # type:ignore + + for freeze_fn in freeze_fns: + with tracer.invoke(): + freeze_fn(direct_effects_barrier=direct_effects_barrier) + + with tracer.invoke(): + cached_logits = self._perform_feature_intervention( + inputs, + interventions, + activation_matrix, # type: ignore + original_activations, + activation_barrier, + direct_effects_barrier, + constrained_layers, + using_past_kv_cache_idx=None, + apply_activation_function=apply_activation_function, + ) + + return cached_logits, activation_cache if return_activations else None + + def _convert_open_ended_interventions( + self, + interventions: Sequence[Intervention], + ) -> Sequence[Intervention]: + """Convert open-ended interventions into position-0 equivalents. + + An intervention is *open-ended* if its position component is a ``slice`` whose + ``stop`` attribute is ``None`` (e.g. ``slice(1, None)``). Such interventions will + also apply to tokens generated in an open-ended generation loop. In such cases, + when use_past_kv_cache=True, the model only runs the most recent token + (and there is thus only 1 position). + """ + converted = [] + for layer, pos, feature_idx, value in interventions: + if isinstance(pos, slice) and pos.stop is None: + converted.append((layer, 0, feature_idx, value)) + return converted + + @torch.no_grad + def feature_intervention_generate( + self, + inputs: str | torch.Tensor, + interventions: Sequence[Intervention], + constrained_layers: range | None = None, + freeze_attention: bool = True, + apply_activation_function: bool = True, + sparse: bool = False, + return_activations: bool = True, + **kwargs, + ) -> tuple[str, torch.Tensor, torch.Tensor | None]: + """Given the input and a dictionary of features to intervene on, this + performs the intervention and generates a continuation. It returns the + logits and activations at each generation position. This function accepts + additional kwargs valid for HookedTransformer.generate(). Note that + `freeze_attention` applies only to the first token generated. + + If `kv_cache` is True (default), generation is faster because the model + caches KV pairs and only processes the new token per step. If False, + the model performs a full forward pass across all tokens. Due to numerical + precision, logits/activations from `feature_intervention_generate(...)` + may differ from `feature_intervention(...)` unless `kv_cache` is False. + + Args: + input (_type_): the input prompt to intervene on + interventions (list[tuple[int, Union[int, slice, torch.Tensor]], int, + int | torch.Tensor]): A list of interventions to perform, formatted as + a list of (layer, position, feature_idx, value) + constrained_layers: (range | None = None): Whether to freeze MLPs and + transcoders, attention patterns, and LayerNorm denominators for a layer + range. This applies only to the first token generated. + freeze_attention (bool): Whether to freeze all attention patterns (applies to + the first token generated). + apply_activation_function (bool): whether to apply the activation function when + recording the activations to be returned. This is useful to set to False for + testing purposes, as attribution predicts the change in pre-activation + feature values. + sparse (bool): whether to sparsify the activations in the returned cache. Setting + this to True will take up less memory, at the expense of slower interventions. + return_activations (bool): Whether to compute and return feature activations. If False, + activation computation is skipped for layers not being intervened on (when + constrained_layers is not set), saving time. Returns None for activations. + Defaults to True. + """ + + # remove verbose kwarg, which is valid for TL models but not NNsight ones. + kwargs.pop("verbose", None) + + tokenizer = self.tokenizer + converted_interventions = self._convert_open_ended_interventions(interventions) + + activation_matrix, activation_fn = self.get_activation_fn( + apply_activation_function=apply_activation_function, + append=True, + sparse=sparse, + ) + + if (freeze_attention or constrained_layers) and interventions: + original_activations, freeze_fns = self.setup_intervention_with_freeze( + inputs, constrained_layers=constrained_layers + ) + else: + original_activations, freeze_fns = None, [] + + intervention_layers = set() + for layer, _, _, _ in interventions: + if isinstance(layer, torch.Tensor): + layer = layer.item() + intervention_layers.add(layer) + + converted_intervention_layers = set() + for layer, _, _, _ in converted_interventions: + if isinstance(layer, torch.Tensor): + layer = layer.item() + converted_intervention_layers.add(layer) + + activation_cache = [None] + + with self.generate(**kwargs) as tracer: + activation_barrier = tracer.barrier(2) + direct_effects_barrier = tracer.barrier(2) if constrained_layers else None + + with tracer.invoke(inputs): + with tracer.iter[:] as act_idx: + current_intervention_layers = ( + intervention_layers if act_idx == 0 else converted_intervention_layers + ) + activation_layers = ( + None + if return_activations + else list(sorted(list(current_intervention_layers))) + ) # type:ignore + current_act_barrier = ( + None if constrained_layers and act_idx == 0 else activation_barrier + ) + + _, iter_activation_cache = activation_fn( + barrier=current_act_barrier, # type:ignore + barrier_layers=current_intervention_layers, + activation_layers=activation_layers, + ) + activation_cache[0] = save(iter_activation_cache) + + for freeze_fn in freeze_fns: + with tracer.invoke(): + with tracer.iter[:1]: + freeze_fn(direct_effects_barrier=direct_effects_barrier) + + all_logits = save(list()) # type: ignore + with tracer.invoke(): + with tracer.iter[:] as idx: + logits = self._perform_feature_intervention( + inputs=inputs, + interventions=(interventions if idx == 0 else converted_interventions), + activation_matrix=activation_matrix, # type: ignore + original_activations=original_activations, + activation_barrier=activation_barrier, + direct_effects_barrier=(direct_effects_barrier if idx == 0 else None), + constrained_layers=constrained_layers if idx == 0 else None, + using_past_kv_cache_idx=idx, # type: ignore + apply_activation_function=apply_activation_function, + ) + all_logits.append(logits.squeeze(0)) + + with tracer.invoke(): + out = save(self.generator.output) + return ( + tokenizer.decode(out.squeeze(0)), + torch.cat(all_logits, dim=0), + (activation_cache[0] if return_activations else None), + ) + + # ------------------------------------------------------------------ + # Dynamic hook location properties + # ------------------------------------------------------------------ + + def get_feature_input_loc(self, layer: int): + """ + Returns a feature input location wrapped in an EnvoyWrapper. + + Some feature inputs expose `.input` while others expose `.output`. An + EnvoyWrapper normalizes this so that `.output` always returns the + relevant value. + """ + return EnvoyWrapper( + self._resolve_attr(self, self._feature_input_pattern.format(layer=layer)), + self._feature_input_io, # type: ignore + ) + + @property + def feature_input_locs(self) -> Iterator[nn.Module]: + """Dynamically resolve the MLP input hook locations for every layer.""" + for layer in range(self.cfg.n_layers): # type: ignore + yield self.get_feature_input_loc(layer) # type: ignore + + def get_feature_output_loc(self, layer: int): + return self._resolve_attr(self, self._feature_output_pattern.format(layer=layer)) + + @property + def feature_output_locs(self) -> Iterator[nn.Module]: + """Dynamically resolve the MLP output hook locations for every layer.""" + for layer in range(self.cfg.n_layers): # type: ignore + yield self.get_feature_output_loc(layer) # type: ignore + + @property + def attention_locs(self) -> Iterator[nn.Module]: + """Dynamically resolve the attention pattern hook locations for every layer.""" + for layer in range(self.cfg.n_layers): # type: ignore + yield self._resolve_attr(self, self._attention_pattern.format(layer=layer)) # type: ignore + + @property + def layernorm_scale_locs(self) -> list[Iterator[nn.Module]]: + """Dynamically resolve the LayerNorm scale hook locations (can be per-layer or shared).""" + locs = [] + for pattern in self._layernorm_scale_patterns: + if "{layer}" in pattern: + + def layer_iterator(p=pattern): + for layer in range(self.cfg.n_layers): # type: ignore + yield self._resolve_attr(self, p.format(layer=layer)) + + locs.append(layer_iterator()) + else: + + def single_iterator(p=pattern): + yield self._resolve_attr(self, p) + + locs.append(single_iterator()) + return locs + + @property + def pre_logit_location(self) -> nn.Module: + """Dynamically resolve the pre-logit hook location.""" + return self._resolve_attr(self, self._pre_logit_location) # type: ignore + + @property + def embed_location(self) -> nn.Module: + """Dynamically resolve the embed hook location.""" + return self._resolve_attr(self, self._embed_location) # type: ignore diff --git a/circuit_tracer/replacement_model.py b/circuit_tracer/replacement_model/replacement_model_transformerlens.py similarity index 82% rename from circuit_tracer/replacement_model.py rename to circuit_tracer/replacement_model/replacement_model_transformerlens.py index d804e77f..84f2d33b 100644 --- a/circuit_tracer/replacement_model.py +++ b/circuit_tracer/replacement_model/replacement_model_transformerlens.py @@ -1,17 +1,17 @@ import warnings from collections import defaultdict +from collections.abc import Sequence from contextlib import contextmanager from functools import partial -from collections.abc import Callable, Sequence +from typing import Callable, Literal import torch import torch.nn.functional as F from torch import nn from transformer_lens import HookedTransformer, HookedTransformerConfig from transformer_lens.hook_points import HookPoint -from transformers.tokenization_utils_base import PreTrainedTokenizerBase -from circuit_tracer.attribution.context import AttributionContext +from circuit_tracer.attribution.context_transformerlens import AttributionContext from circuit_tracer.transcoder import TranscoderSet from circuit_tracer.transcoder.cross_layer_transcoder import CrossLayerTranscoder from circuit_tracer.utils import get_default_device @@ -19,7 +19,10 @@ # Type definition for an intervention tuple (layer, position, feature_idx, value) Intervention = tuple[ - int | torch.Tensor, int | slice | torch.Tensor, int | torch.Tensor, float | torch.Tensor + int | torch.Tensor, + int | slice | torch.Tensor, + int | torch.Tensor, + int | float | torch.Tensor, ] @@ -61,13 +64,13 @@ def forward(self, x): return self.hook_post(x) -class ReplacementModel(HookedTransformer): +class TransformerLensReplacementModel(HookedTransformer): transcoders: TranscoderSet | CrossLayerTranscoder # Support both types feature_input_hook: str feature_output_hook: str skip_transcoder: bool scan: str | list[str] | None - tokenizer: PreTrainedTokenizerBase + backend: Literal["transformerlens"] @classmethod def from_config( @@ -75,15 +78,16 @@ def from_config( config: HookedTransformerConfig, transcoders: TranscoderSet | CrossLayerTranscoder, # Accept both **kwargs, - ) -> "ReplacementModel": - """Create a ReplacementModel from a given HookedTransformerConfig and TranscoderSet + ) -> "TransformerLensReplacementModel": + """Create a TransformerLensReplacementModel from a HookedTransformerConfig + and a TranscoderSet. Args: config (HookedTransformerConfig): the config of the HookedTransformer transcoders (TranscoderSet): The transcoder set with configuration Returns: - ReplacementModel: The loaded ReplacementModel + TransformerLensReplacementModel: The loaded TransformerLensReplacementModel """ model = cls(config, **kwargs) model._configure_replacement_model(transcoders) @@ -95,15 +99,16 @@ def from_pretrained_and_transcoders( model_name: str, transcoders: TranscoderSet | CrossLayerTranscoder, # Accept both **kwargs, - ) -> "ReplacementModel": - """Create a ReplacementModel from the name of HookedTransformer and TranscoderSet + ) -> "TransformerLensReplacementModel": + """Create a TransformerLensReplacementModel from a HookedTransformer name + and a TranscoderSet. Args: model_name (str): the name of the pretrained HookedTransformer transcoders (TranscoderSet): The transcoder set with configuration Returns: - ReplacementModel: The loaded ReplacementModel + TransformerLensReplacementModel: The loaded TransformerLensReplacementModel """ model = super().from_pretrained( model_name, @@ -122,12 +127,10 @@ def from_pretrained( model_name: str, transcoder_set: str, device: torch.device | None = None, - dtype: torch.dtype = torch.float32, - lazy_encoder: bool = False, - lazy_decoder: bool = True, + dtype: torch.dtype | None = torch.float32, **kwargs, - ) -> "ReplacementModel": - """Create a ReplacementModel from model name and transcoder config + ) -> "TransformerLensReplacementModel": + """Create a TransformerLensReplacementModel from model name and transcoder config Args: model_name (str): the name of the pretrained HookedTransformer @@ -143,18 +146,15 @@ def from_pretrained( **kwargs: Additional keyword arguments passed to HookedTransformer.from_pretrained Returns: - ReplacementModel: The loaded ReplacementModel + TransformerLensReplacementModel: The loaded TransformerLensReplacementModel """ if device is None: device = get_default_device() - transcoders, _ = load_transcoder_from_hub( - transcoder_set, - device=device, - dtype=dtype, - lazy_encoder=lazy_encoder, - lazy_decoder=lazy_decoder, - ) + ( + transcoders, + _, + ) = load_transcoder_from_hub(transcoder_set, device=device, dtype=dtype) # type: ignore return cls.from_pretrained_and_transcoders( model_name, @@ -165,6 +165,7 @@ def from_pretrained( ) def _configure_replacement_model(self, transcoder_set: TranscoderSet | CrossLayerTranscoder): + self.backend = "transformerlens" transcoder_set.to(self.cfg.device, self.cfg.dtype) self.transcoders = transcoder_set @@ -184,12 +185,8 @@ def _configure_replacement_model(self, transcoder_set: TranscoderSet | CrossLaye self.setup() def _configure_gradient_flow(self): - if isinstance(self.transcoders, TranscoderSet): - for layer, transcoder in enumerate(self.transcoders): - self._configure_skip_connection(self.blocks[layer], transcoder) - else: - for layer in range(self.cfg.n_layers): - self._configure_skip_connection(self.blocks[layer], self.transcoders) + for layer in range(self.cfg.n_layers): + self._configure_skip_connection(self.blocks[layer], self.transcoders, layer) def stop_gradient(acts, hook): return acts.detach() @@ -207,13 +204,15 @@ def stop_gradient(acts, hook): for param in self.parameters(): param.requires_grad = False - def enable_gradient(tensor, hook): - tensor.requires_grad = True - return tensor + def enable_gradient(acts, hook): + acts.requires_grad = True + return acts - self.hook_embed.add_hook(enable_gradient, is_permanent=True) + self.hook_embed.add_hook(enable_gradient, is_permanent=True) # type: ignore - def _configure_skip_connection(self, block, transcoder): + def _configure_skip_connection( + self, block, transcoders: TranscoderSet | CrossLayerTranscoder, layer: int + ): cached = {} def cache_activations(acts, hook): @@ -224,8 +223,8 @@ def add_skip_connection(acts: torch.Tensor, hook: HookPoint, grad_hook: HookPoin # of this function. If we put the backwards hook here at hook, the grads will be 0 # because we detached acts. skip_input_activation = cached.pop("acts") - if hasattr(transcoder, "W_skip") and transcoder.W_skip is not None: - skip = transcoder.compute_skip(skip_input_activation) + if transcoders.skip_connection: + skip = transcoders.compute_skip(layer, skip_input_activation) else: skip = skip_input_activation * 0 return grad_hook(skip + (acts - skip).detach()) @@ -289,6 +288,10 @@ def cache_activations(acts, hook, layer): .detach() .squeeze(0) ) + + if not append: + transcoder_acts[0] = 0 + if sparse: transcoder_acts = transcoder_acts.to_sparse() @@ -366,7 +369,9 @@ def ensure_tokenized(self, prompt: str | torch.Tensor | list[int]) -> torch.Tens """ if isinstance(prompt, str): - tokens = self.tokenizer(prompt, return_tensors="pt").input_ids.squeeze(0) + tokens = self.tokenizer( + prompt, return_tensors="pt", add_special_tokens=False + ).input_ids.squeeze(0) # type: ignore elif isinstance(prompt, torch.Tensor): tokens = prompt.squeeze() elif isinstance(prompt, list): @@ -378,22 +383,22 @@ def ensure_tokenized(self, prompt: str | torch.Tensor | list[int]) -> torch.Tens raise ValueError(f"Tensor must be 1-D, got shape {tokens.shape}") # Check if a special token is already present at the beginning - if tokens[0] in self.tokenizer.all_special_ids: + if tokens[0] in self.tokenizer.all_special_ids: # type: ignore return tokens.to(self.cfg.device) # Prepend a special token to avoid artifacts at position 0 candidate_bos_token_ids = [ - self.tokenizer.bos_token_id, - self.tokenizer.pad_token_id, - self.tokenizer.eos_token_id, + self.tokenizer.bos_token_id, # type: ignore + self.tokenizer.pad_token_id, # type: ignore + self.tokenizer.eos_token_id, # type: ignore ] - candidate_bos_token_ids += self.tokenizer.all_special_ids + candidate_bos_token_ids += self.tokenizer.all_special_ids # type: ignore dummy_bos_token_id = next(filter(None, candidate_bos_token_ids)) if dummy_bos_token_id is None: warnings.warn( "No suitable special token found for BOS token replacement. " - "The first token will be ignored." + "The first token will be ignored.", ) else: tokens = torch.cat([torch.tensor([dummy_bos_token_id], device=tokens.device), tokens]) @@ -450,17 +455,18 @@ def setup_attribution(self, inputs: str | torch.Tensor): ) def setup_intervention_with_freeze( - self, inputs: str | torch.Tensor, constrained_layers: range | None = None + self, + inputs: str | torch.Tensor, + constrained_layers: range | None = None, ) -> tuple[torch.Tensor, list[tuple[str, Callable]]]: """Sets up an intervention with either frozen attention + LayerNorm(default) or frozen attention, LayerNorm, and MLPs, for constrained layers Args: - inputs (Union[str, torch.Tensor]): The inputs to intervene on - constrained_layers (range | None): whether to apply interventions only to a certain - range. Mostly applicable to CLTs. If the given range includes all model layers, - we also freeze layernorm denominators, computing direct effects. None means no - constraints (iterative patching) + inputs (str | torch.Tensor): The inputs to intervene on + constrained_layers: (tuple[int,int] | range | None = None): Whether to freeze + attention, LayerNorm, and MLPs within a specified layer range. Defaults + to None. Returns: list[tuple[str, Callable]]: The freeze hooks needed to run the desired intervention. @@ -518,22 +524,26 @@ def diff_hook(activations, hook, layer: int): # The MLP hook out freeze hook sets the value of the MLP to the value it # had when run on the inputs normally. We subtract out the skip that # corresponds to such a run, and add in the skip with direct effects. - assert not isinstance(self.transcoders, CrossLayerTranscoder), "Skip CLTs forbidden" - frozen_skip = self.transcoders[layer].compute_skip(freeze_cache[hook.name]) - normal_skip = self.transcoders[layer].compute_skip(activations) + frozen_skip = self.transcoders.compute_skip(layer, freeze_cache[hook.name]) + normal_skip = self.transcoders.compute_skip(layer, activations) skip_diffs[layer] = normal_skip - frozen_skip def add_diff_hook(activations, hook, layer: int): - # open-ended generation case return activations + skip_diffs[layer] fwd_hooks += [ - (f"blocks.{layer}.{self.feature_input_hook}", partial(diff_hook, layer=layer)) + ( + f"blocks.{layer}.{self.feature_input_hook}", + partial(diff_hook, layer=layer), + ) for layer in constrained_layers ] fwd_hooks += [ - (f"blocks.{layer}.{self.feature_output_hook}", partial(add_diff_hook, layer=layer)) + ( + f"blocks.{layer}.{self.feature_output_hook}", + partial(add_diff_hook, layer=layer), + ) for layer in constrained_layers ] return torch.stack(original_activations), fwd_hooks @@ -555,13 +565,12 @@ def _get_feature_intervention_hooks( Args: input (_type_): the input prompt to intervene on - intervention_dict (Sequence[Intervention]): A list of interventions to perform, - formatted as a list of (layer, position, feature_idx, value) - constrained_layers (range | None): whether to apply interventions only to a certain - range, freezing all MLPs within the layer range before doing so. This is mostly - applicable to CLTs. If the given range includes all model layers, we also freeze - layernorm denominators, computing direct effects.nNone means no constraints - (iterative patching) + intervention_dict (list[Intervention]): A list of interventions to perform, formatted as + a list of (layer, position, feature_idx, value) + constrained_layers (range | tuple | None): Whether to apply interventions only + to a certain layer range. Mostly applicable to CLTs. If the given range + includes all model layers, we also freeze LayerNorm denominators to compute + direct effects. None means no constraints (iterative patching). apply_activation_function (bool): whether to apply the activation function when recording the activations to be returned. This is useful to set to False for testing purposes, as attribution predicts the change in pre-activation @@ -595,7 +604,7 @@ def _get_feature_intervention_hooks( if isinstance(inputs, torch.Tensor): n_pos = inputs.size(0) else: - n_pos = len(self.tokenizer(inputs).input_ids) + n_pos = len(self.tokenizer(inputs).input_ids) # type: ignore layer_deltas = torch.zeros( [self.cfg.n_layers, n_pos, self.cfg.d_model], @@ -672,15 +681,22 @@ def intervention_hook(activations, hook, layer: int): delta_hooks = [ ( f"blocks.{layer}.{self.feature_output_hook}", - partial(calculate_delta_hook, layer=layer, layer_interventions=layer_interventions), + partial( + calculate_delta_hook, + layer=layer, + layer_interventions=layer_interventions, + ), ) for layer, layer_interventions in interventions_by_layer.items() ] intervention_range = constrained_layers if constrained_layers else range(self.cfg.n_layers) intervention_hooks = [ - (f"blocks.{layer}.{self.feature_output_hook}", partial(intervention_hook, layer=layer)) - for layer in range(self.cfg.n_layers) + ( + f"blocks.{layer}.{self.feature_output_hook}", + partial(intervention_hook, layer=layer), + ) + for layer in intervention_range ] all_hooks = freeze_hooks + activation_hooks + delta_hooks + intervention_hooks @@ -714,22 +730,20 @@ def feature_intervention( sparse: bool = False, return_activations: bool = True, ) -> tuple[torch.Tensor, torch.Tensor | None]: - """Given the input, and a dictionary of features to intervene on, performs the - intervention, and returns the logits and feature activations. If freeze_attention or - constrained_layers is True, attention patterns will be frozen, along with MLPs and - LayerNorms. If constrained_layers is set, the effects of intervention will not propagate - through the constrained layers, and CLTs will write only to those layers. Otherwise, the - effects of the intervention will propagate through transcoders / LayerNorms + """Given the input and a dictionary of features to intervene on, this performs + the intervention and returns logits and feature activations. If `freeze_attention` + or `constrained_layers` is True, attention patterns, MLPs and LayerNorms may be + frozen. When `constrained_layers` is set, effects do not propagate through those + layers (useful for CLTs). Otherwise, effects propagate through transcoders and + LayerNorms. Args: input (_type_): the input prompt to intervene on - interventions (list[tuple[int, Union[int, slice, torch.Tensor]], int, - Union[int, torch.Tensor]]): A list of interventions to perform, formatted as + interventions (list[tuple[int, int, slice | torch.Tensor], int, + int | torch.Tensor]): A list of interventions to perform, formatted as a list of (layer, position, feature_idx, value) - constrained_layers (range | None): whether to apply interventions only to a certain - range. Mostly applicable to CLTs. If the given range includes all model layers, - we also freeze layernorm denominators, computing direct effects. None means no - constraints (iterative patching) + constrained_layers (range | tuple | None): Whether to apply interventions only + to a specific range. Mostly applicable to CLTs. freeze_attention (bool): whether to freeze all attention patterns an layernorms apply_activation_function (bool): whether to apply the activation function when recording the activations to be returned. This is useful to set to False for @@ -739,7 +753,7 @@ def feature_intervention( this to True will take up less memory, at the expense of slower interventions. return_activations (bool): Whether to compute and return feature activations. If False, activation computation is skipped for layers not being intervened on (when - constrained_layers is not set), saving time. Returns None for activations. + constrained_layers is not set), saving time. Activations are not returned. Defaults to True. """ @@ -756,17 +770,14 @@ def feature_intervention( with self.hooks(hooks): # type: ignore logits = self(inputs) - if return_activations: - activation_cache = torch.stack(activation_cache) - else: - activation_cache = None + activation_cache = torch.stack(activation_cache) if return_activations else None return logits, activation_cache def _convert_open_ended_interventions( self, interventions: Sequence[Intervention], - ) -> Sequence[Intervention]: + ) -> list[Intervention]: """Convert open-ended interventions into position-0 equivalents. An intervention is *open-ended* if its position component is a ``slice`` whose @@ -799,27 +810,22 @@ def feature_intervention_generate( This function accepts all kwargs valid for HookedTransformer.generate(). Note that freeze_attention applies only to the first token generated. - This function accepts all kwargs valid for HookedTransformer.generate(). Note that - direct_effects and freeze_attention apply only to the first token generated. - - Note that if kv_cache is True (default), generation will be faster, as the model - will cache the KVs, and only process the one new token per step; if it is False, - the model will generate by doing a full forward pass across all tokens. Note that - due to numerical precision issues, you are only guaranteed that the logits / - activations of model.feature_intervention_generate(s, ...) are equivalent to - model.feature_intervention(s, ...) if kv_cache is False. + Note that if `kv_cache` is True (default), generation will be faster because + the model caches KV pairs and only processes the new token per step. If + `kv_cache` is False, the model does a full forward pass across all tokens. + Due to numerical precision, logits/activations from + `feature_intervention_generate(...)` may differ from `feature_intervention(...)` + unless `kv_cache` is False. Args: input (_type_): the input prompt to intervene on - interventions (list[tuple[int, Union[int, slice, torch.Tensor]], int, - Union[int, torch.Tensor]]): A list of interventions to perform, formatted as + interventions (list[tuple[int, int, slice | torch.Tensor], int, + int | torch.Tensor]): A list of interventions to perform, formatted as a list of (layer, position, feature_idx, value) - constrained_layers: (range | None = None): whether to freeze all MLPs/transcoders / - attn patterns / layernorm denominators. This will only apply to the very first - token generated. If all layers are constrained, also freezes layernorm, computing - direct effects. - freeze_attention (bool): whether to freeze all attention patterns. Applies only to - the first token generated + constrained_layers: (tuple[int,int] | range | None = None): Whether to freeze + MLPs/transcoders, attention patterns, and LayerNorm denominators for a + layer range. This only applies to the first token generated. + freeze_attention (bool): whether to freeze all attention patterns. apply_activation_function (bool): whether to apply the activation function when recording the activations to be returned. This is useful to set to False for testing purposes, as attribution predicts the change in pre-activation @@ -880,11 +886,8 @@ def clear_and_add_hooks(tensor, hook): generation: str = self.generate(inputs, **kwargs) # type:ignore self.reset_hooks() - logits = torch.cat((logit_cache[0], *open_ended_logits), dim=1) # type:ignore - open_ended_activations = torch.stack( - [torch.cat(acts, dim=0) for acts in open_ended_activations], # type:ignore - dim=0, - ) + logits = torch.cat((logit_cache[0][:, -1:], *open_ended_logits), dim=1) # type:ignore + if return_activations: activation_cache = torch.stack(activation_cache) if open_ended_activations and any(acts for acts in open_ended_activations): @@ -901,7 +904,7 @@ def clear_and_add_hooks(tensor, hook): else: activations = None - return generation, logits, activations + return generation, logits.squeeze(0), activations def __del__(self): # Prevent memory leaks diff --git a/circuit_tracer/utils/__init__.py b/circuit_tracer/utils/__init__.py index 08b61482..e23c6fd5 100644 --- a/circuit_tracer/utils/__init__.py +++ b/circuit_tracer/utils/__init__.py @@ -1,6 +1,5 @@ import torch - -from circuit_tracer.utils.create_graph_files import create_graph_files +from circuit_tracer.utils.create_graph_files import create_graph_files as create_graph_files def get_default_device() -> torch.device: @@ -8,4 +7,4 @@ def get_default_device() -> torch.device: return torch.device("cuda" if torch.cuda.is_available() else "cpu") -__all__ = ["create_graph_files"] +__all__ = ["create_graph_files", "get_default_device"] diff --git a/circuit_tracer/utils/create_graph_files.py b/circuit_tracer/utils/create_graph_files.py index 34a06d7f..568dd17a 100644 --- a/circuit_tracer/utils/create_graph_files.py +++ b/circuit_tracer/utils/create_graph_files.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import logging import os import time @@ -8,13 +10,19 @@ from circuit_tracer.frontend.graph_models import Metadata, Model, Node, QParams from circuit_tracer.frontend.utils import add_graph_metadata -from circuit_tracer.graph import Graph, prune_graph +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from circuit_tracer.graph import Graph + logger = logging.getLogger(__name__) -def load_graph_data(file_path) -> Graph: +def load_graph_data(file_path) -> "Graph": """Load graph data from a PyTorch file.""" + from circuit_tracer.graph import Graph + start_time = time.time() graph = Graph.from_pt(file_path) time_ms = (time.time() - start_time) * 1000 @@ -22,7 +30,7 @@ def load_graph_data(file_path) -> Graph: return graph -def create_nodes(graph: Graph, node_mask, tokenizer, cumulative_scores): +def create_nodes(graph: "Graph", node_mask, tokenizer, cumulative_scores): """Create all nodes for the graph.""" start_time = time.time() @@ -74,7 +82,7 @@ def create_nodes(graph: Graph, node_mask, tokenizer, cumulative_scores): return nodes -def create_used_nodes_and_edges(graph: Graph, nodes, edge_mask): +def create_used_nodes_and_edges(graph: "Graph", nodes, edge_mask): """Filter to only used nodes and create edges.""" start_time = time.time() edges = edge_mask.numpy() @@ -108,7 +116,7 @@ def create_used_nodes_and_edges(graph: Graph, nodes, edge_mask): return used_nodes, used_edges -def build_model(graph: Graph, used_nodes, used_edges, slug, scan, node_threshold, tokenizer): +def build_model(graph: "Graph", used_nodes, used_edges, slug, scan, node_threshold, tokenizer): """Build the full model object.""" start_time = time.time() @@ -151,13 +159,16 @@ def build_model(graph: Graph, used_nodes, used_edges, slug, scan, node_threshold def create_graph_files( - graph_or_path: Graph | str, + graph_or_path: "Graph" | str, slug: str, output_path, scan=None, node_threshold=0.8, edge_threshold=0.98, ): + # Import Graph/prune_graph locally to avoid circular import at module import time + from circuit_tracer.graph import Graph, prune_graph + total_start_time = time.time() if isinstance(graph_or_path, Graph): diff --git a/circuit_tracer/utils/salient_logits.py b/circuit_tracer/utils/salient_logits.py new file mode 100644 index 00000000..5e4d397b --- /dev/null +++ b/circuit_tracer/utils/salient_logits.py @@ -0,0 +1,47 @@ +import torch + + +@torch.no_grad() +def compute_salient_logits( + logits: torch.Tensor, + unembed_proj: torch.Tensor, + *, + max_n_logits: int = 10, + desired_logit_prob: float = 0.95, +) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Pick the smallest logit set whose cumulative prob >= *desired_logit_prob*. + + Args: + logits: ``(d_vocab,)`` vector (single position). + unembed_proj: ``(d_model, d_vocab)`` unembedding matrix. + max_n_logits: Hard cap *k*. + desired_logit_prob: Cumulative probability threshold *p*. + + Returns: + tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + * logit_indices - ``(k,)`` vocabulary ids. + * logit_probs - ``(k,)`` softmax probabilities. + * demeaned_vecs - ``(k, d_model)`` unembedding columns, demeaned. + """ + + probs = torch.softmax(logits, dim=-1) + top_p, top_idx = torch.topk(probs, max_n_logits) + cutoff = int(torch.searchsorted(torch.cumsum(top_p, 0), desired_logit_prob)) + 1 + top_p, top_idx = top_p[:cutoff], top_idx[:cutoff] + + # unembed_proj can be presented as (d_model, d_vocab) or its transpose (d_vocab, d_model). + # We determine which axis corresponds to the vocabulary by matching against the logits length. + + if unembed_proj.shape[0] == logits.shape[0]: + # Shape is (d_vocab, d_model) – first axis is vocabulary. + cols = unembed_proj[top_idx] # (k, d_model) + demean = unembed_proj.mean(dim=0, keepdim=True) # (1, d_model) + demeaned_vecs = cols - demean # (k, d_model) + + else: + # Shape is (d_model, d_vocab) – second axis is vocabulary. + cols = unembed_proj[:, top_idx] # (d_model, k) + demean = unembed_proj.mean(dim=-1, keepdim=True) # (d_model, 1) + demeaned_vecs = (cols - demean).T # (k, d_model) + + return top_idx, top_p, demeaned_vecs diff --git a/circuit_tracer/utils/tl_nnsight_mapping.py b/circuit_tracer/utils/tl_nnsight_mapping.py new file mode 100644 index 00000000..06a4215e --- /dev/null +++ b/circuit_tracer/utils/tl_nnsight_mapping.py @@ -0,0 +1,283 @@ +from dataclasses import dataclass +from typing import Any, Literal + + +@dataclass +class TransformerLens_NNSight_Mapping: + """Mapping specifying important locations in NNSight models, as well as mapping + from TL Hook Points to NNSight locations""" + + model_architecture: str # HuggingFace model architecture + attention_location_pattern: str # Location of the attention patterns + layernorm_scale_location_patterns: list[str] # Location of the Layernorm denominators + # Location immediately before logits (location from which we will attribute for logit tokens) + pre_logit_location: str + # Location of the embedding Module (location to which we will attribute for embeddings) + embed_location: str + embed_weight: str # Location of the embedding weight matrix + unembed_weight: str # Location of the unembedding weight matrix + # Mapping from (TransformerLens Hook) to a tuple representing an NNSight Envoy location, and + # whether we want its input or output + feature_hook_mapping: dict[str, tuple[str, Literal["input", "output"]]] + + +# Create an instance with the original configuration values +gemma_2_mapping = TransformerLens_NNSight_Mapping( + model_architecture="Gemma2ForCausalLM", + attention_location_pattern="model.layers[{layer}].self_attn.source.attention_interface_0.source.nn_functional_dropout_0", + layernorm_scale_location_patterns=[ + "model.layers[{layer}].input_layernorm.source.self__norm_0.source.torch_rsqrt_0", + "model.layers[{layer}].post_attention_layernorm.source.self__norm_0.source.torch_rsqrt_0", + "model.layers[{layer}].pre_feedforward_layernorm.source.self__norm_0.source.torch_rsqrt_0", + "model.layers[{layer}].post_feedforward_layernorm.source.self__norm_0.source.torch_rsqrt_0", + "model.norm.source.self__norm_0.source.torch_rsqrt_0", + ], + pre_logit_location="model", + embed_location="model.embed_tokens", + embed_weight="model.embed_tokens.weight", + unembed_weight="lm_head.weight", + feature_hook_mapping={ + "ln2.hook_normalized": ( + "model.layers[{layer}].pre_feedforward_layernorm.source.self__norm_0", + "output", + ), + "hook_resid_mid": ("model.layers[{layer}].pre_feedforward_layernorm", "input"), + "hook_mlp_out": ("model.layers[{layer}].post_feedforward_layernorm", "output"), + }, +) + +# Create an instance with the original configuration values +gemma_3_mapping = TransformerLens_NNSight_Mapping( + model_architecture="Gemma3ForCausalLM", + attention_location_pattern="model.layers[{layer}].self_attn.source.attention_interface_0.source.nn_functional_dropout_0", + layernorm_scale_location_patterns=[ + "model.layers[{layer}].input_layernorm.source.self__norm_0.source.torch_rsqrt_0", + "model.layers[{layer}].self_attn.q_norm.source.self__norm_0.source.torch_rsqrt_0", + "model.layers[{layer}].self_attn.k_norm.source.self__norm_0.source.torch_rsqrt_0", + "model.layers[{layer}].post_attention_layernorm.source.self__norm_0.source.torch_rsqrt_0", + "model.layers[{layer}].pre_feedforward_layernorm.source.self__norm_0.source.torch_rsqrt_0", + "model.layers[{layer}].post_feedforward_layernorm.source.self__norm_0.source.torch_rsqrt_0", + "model.norm.source.self__norm_0.source.torch_rsqrt_0", + ], + pre_logit_location="model", + embed_location="model.embed_tokens", + embed_weight="model.embed_tokens.weight", + unembed_weight="lm_head.weight", + feature_hook_mapping={ + "ln2.hook_normalized": ( + "model.layers[{layer}].pre_feedforward_layernorm.source.self__norm_0", + "output", + ), + "hook_resid_mid": ("model.layers[{layer}].pre_feedforward_layernorm", "input"), + "mlp.hook_in": ("model.layers[{layer}].pre_feedforward_layernorm", "output"), + "hook_mlp_out": ("model.layers[{layer}].post_feedforward_layernorm", "output"), + }, +) + +gemma_3_conditional_mapping = TransformerLens_NNSight_Mapping( + model_architecture="Gemma3ForConditionalGeneration", + attention_location_pattern="language_model.layers[{layer}].self_attn.source.attention_interface_0.source.nn_functional_dropout_0", + layernorm_scale_location_patterns=[ + "language_model.layers[{layer}].input_layernorm.source.self__norm_0.source.torch_rsqrt_0", + "language_model.layers[{layer}].self_attn.q_norm.source.self__norm_0.source.torch_rsqrt_0", + "language_model.layers[{layer}].self_attn.k_norm.source.self__norm_0.source.torch_rsqrt_0", + "language_model.layers[{layer}].post_attention_layernorm.source.self__norm_0.source.torch_rsqrt_0", + "language_model.layers[{layer}].pre_feedforward_layernorm.source.self__norm_0.source.torch_rsqrt_0", + "language_model.layers[{layer}].post_feedforward_layernorm.source.self__norm_0.source.torch_rsqrt_0", + "language_model.norm.source.self__norm_0.source.torch_rsqrt_0", + ], + pre_logit_location="language_model", + embed_location="language_model.embed_tokens", + embed_weight="language_model.embed_tokens.weight", + unembed_weight="lm_head.weight", + feature_hook_mapping={ + "ln2.hook_normalized": ( + "language_model.layers[{layer}].pre_feedforward_layernorm.source.self__norm_0", + "output", + ), + "hook_resid_mid": ("language_model.layers[{layer}].pre_feedforward_layernorm", "input"), + "mlp.hook_in": ("language_model.layers[{layer}].pre_feedforward_layernorm", "output"), + "hook_mlp_out": ("language_model.layers[{layer}].post_feedforward_layernorm", "output"), + }, +) + +# Create an instance with the original configuration values +llama_3_mapping = TransformerLens_NNSight_Mapping( + model_architecture="LlamaForCausalLM", + attention_location_pattern="model.layers[{layer}].self_attn.source.attention_interface_0.source.nn_functional_dropout_0", + layernorm_scale_location_patterns=[ + "model.layers[{layer}].input_layernorm.source.mean_0", + "model.layers[{layer}].post_attention_layernorm.source.mean_0", + "model.norm.source.mean_0", + ], + pre_logit_location="model", + embed_location="model.embed_tokens", + embed_weight="model.embed_tokens.weight", + unembed_weight="lm_head.weight", + feature_hook_mapping={ + "hook_resid_mid": ("model.layers[{layer}].post_attention_layernorm", "input"), + "hook_mlp_out": ("model.layers[{layer}].mlp", "output"), + "mlp.hook_in": ("model.layers[{layer}].post_attention_layernorm", "output"), + "mlp.hook_out": ("model.layers[{layer}].mlp", "output"), + }, +) + +# Create an instance with the original configuration values +qwen_3_mapping = TransformerLens_NNSight_Mapping( + model_architecture="Qwen3ForCausalLM", + attention_location_pattern="model.layers[{layer}].self_attn.source.attention_interface_0.source.nn_functional_dropout_0", + layernorm_scale_location_patterns=[ + "model.layers[{layer}].input_layernorm.source.mean_0", + "model.layers[{layer}].post_attention_layernorm.source.mean_0", + "model.norm.source.mean_0", + ], + pre_logit_location="model", + embed_location="model.embed_tokens", + embed_weight="model.embed_tokens.weight", + unembed_weight="lm_head.weight", + feature_hook_mapping={ + "mlp.hook_in": ("model.layers[{layer}].post_attention_layernorm", "output"), + "mlp.hook_out": ("model.layers[{layer}].mlp", "output"), + }, +) + + +gpt_oss_mapping = TransformerLens_NNSight_Mapping( + model_architecture="GptOssForCausalLM", + attention_location_pattern="model.layers[{layer}].self_attn.source.attention_interface_0.source.nn_functional_dropout_0", + layernorm_scale_location_patterns=[ + "model.layers[{layer}].input_layernorm.source.mean_0", + "model.layers[{layer}].post_attention_layernorm.source.mean_0", + "model.norm.source.mean_0", + ], + pre_logit_location="model", + embed_location="model.embed_tokens", + embed_weight="model.embed_tokens.weight", + unembed_weight="lm_head.weight", + feature_hook_mapping={ + "hook_resid_mid": ("model.layers[{layer}].post_attention_layernorm", "input"), + "mlp.hook_in": ("model.layers[{layer}].post_attention_layernorm", "output"), + "mlp.hook_out": ("model.layers[{layer}].mlp.source.self_experts_0", "output"), + "hook_mlp_out": ("model.layers[{layer}].mlp.source.self_experts_0", "output"), + }, +) + + +def get_mapping(model_architecture: str) -> TransformerLens_NNSight_Mapping: + """Get the TransformerLens-NNSight mapping for a given model architecture. + + Args: + model_architecture: The model architecture name (e.g., 'Gemma2ForCausalLM', + 'Llama2ForCausalLM') + + Returns: + TransformerLens_NNSight_Mapping: The mapping configuration for the specified architecture + + Raises: + ValueError: If the model architecture is not supported + """ + mappings = { + mapping.model_architecture: mapping + for mapping in [ + gemma_2_mapping, + gemma_3_mapping, + gemma_3_conditional_mapping, + llama_3_mapping, + qwen_3_mapping, + gpt_oss_mapping, + ] + } + + if model_architecture not in mappings: + supported_architectures = list(mappings.keys()) + raise ValueError( + f"Unsupported model architecture: {model_architecture}. " + f"Supported architectures: {supported_architectures}" + ) + + return mappings[model_architecture] + + +@dataclass +class UnifiedConfig: + """A unified config class that supports both TransformerLens and NNsight field names.""" + + n_layers: int + d_model: int + d_head: int + n_heads: int + d_mlp: int + d_vocab: int + + tokenizer_name: str + model_name: str + original_architecture: str + + n_key_value_heads: int | None = None + dtype: Any | None = None + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary, excluding None values.""" + return {k: v for k, v in self.__dict__.items() if v is not None} + + @classmethod + def from_dict(cls, config_dict: dict[str, Any]) -> "UnifiedConfig": + """Create from dictionary.""" + return cls( + n_layers=config_dict["n_layers"], + d_model=config_dict["d_model"], + d_head=config_dict["d_head"], + n_heads=config_dict["n_heads"], + d_mlp=config_dict["d_mlp"], + d_vocab=config_dict["d_vocab"], + tokenizer_name=config_dict["tokenizer_name"], + model_name=config_dict["model_name"], + original_architecture=config_dict["original_architecture"], + n_key_value_heads=config_dict.get("n_key_value_heads"), + dtype=config_dict.get("dtype"), + ) + + +def convert_nnsight_config_to_transformerlens(config): + """Convert NNsight config to TransformerLens config format. + + Args: + config: NNsight config object or UnifiedConfig (pass-through) or HookedTransformerConfig + + Returns: + UnifiedConfig: A unified configuration object + """ + # If already a UnifiedConfig, return as-is + if isinstance(config, UnifiedConfig): + return config + + field_mappings = { + # Basic model dimensions + "num_hidden_layers": "n_layers", + "hidden_size": "d_model", + "head_dim": "d_head", + "num_attention_heads": "n_heads", + "intermediate_size": "d_mlp", + "vocab_size": "d_vocab", + # Attention parameters + "num_key_value_heads": "n_key_value_heads", + # Model metadata + "torch_dtype": "dtype", + } + config_dict = config.to_dict() + + if "original_architecture" not in config_dict: + config_dict["original_architecture"] = config.architectures[0] + if "tokenizer_name" not in config_dict: + config_dict["tokenizer_name"] = config.name_or_path + if "model_name" not in config_dict: + config_dict["model_name"] = config.name_or_path + + if "text_config" in config_dict: + config_dict |= config_dict["text_config"] + + for nnsight_field, transformerlens_field in field_mappings.items(): + if transformerlens_field not in config_dict and nnsight_field in config_dict: + config_dict[transformerlens_field] = config_dict[nnsight_field] + + return UnifiedConfig.from_dict(config_dict) From f90a7a4247485dc9a7bf18b9add76993d56c100b Mon Sep 17 00:00:00 2001 From: Daniel Dale Date: Thu, 15 Jan 2026 11:14:41 -0800 Subject: [PATCH 07/18] Add pytest markers for long-running and high memory tests; adjust batch sizes to allow running more tests with minimum CI hardware profile --- pyproject.toml | 2 ++ tests/test_attributions_gemma.py | 2 +- tests/test_attributions_gemma3_nnsight.py | 9 ++++++--- tests/test_attributions_gemma_nnsight.py | 2 +- tests/test_attributions_llama_nnsight.py | 1 + tests/test_freeze_points.py | 3 ++- tests/test_offload.py | 2 ++ tests/test_transformerlens_nnsight_same_gemma.py | 3 +++ tests/test_transformerlens_nnsight_same_gemma_clts.py | 3 +++ tests/test_transformerlens_nnsight_same_llama.py | 3 +++ tests/test_transformerlens_nnsight_same_llama_clts.py | 3 +++ tests/test_tutorial_notebook_backends.py | 3 +++ tests/utils/test_caching.py | 3 +++ 13 files changed, 33 insertions(+), 6 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d25126f4..c9696837 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,6 +57,8 @@ exclude = ["**/node_modules", "**/__pycache__", "**/.*", "demos"] [tool.pytest.ini_options] markers = [ "requires_disk: marks tests requiring storage space", + "long_running: marks tests that take a very long time to complete", + "high_mem: marks tests requiring significant GPU memory (>24GB VRAM or multiple large models)", ] [dependency-groups] diff --git a/tests/test_attributions_gemma.py b/tests/test_attributions_gemma.py index c142252f..d7981b9a 100644 --- a/tests/test_attributions_gemma.py +++ b/tests/test_attributions_gemma.py @@ -410,7 +410,7 @@ def test_gemma_2_2b_clt(): s = "The National Digital Analytics Group (ND" model = ReplacementModel.from_pretrained("google/gemma-2-2b", "mntss/clt-gemma-2-2b-426k") assert isinstance(model, TransformerLensReplacementModel) - graph = attribute(s, model) + graph = attribute(s, model, batch_size=256) print("Changing logit softcap to 0, as the logits will otherwise be off.") with model.zero_softcap(): diff --git a/tests/test_attributions_gemma3_nnsight.py b/tests/test_attributions_gemma3_nnsight.py index 0e25694d..fa1d89ef 100644 --- a/tests/test_attributions_gemma3_nnsight.py +++ b/tests/test_attributions_gemma3_nnsight.py @@ -265,7 +265,7 @@ def verify_feature_edges( model: NNSightReplacementModel, graph: Graph, n_samples: int = 100, - act_atol=5e-4, + act_atol=1e-3, # dummy transcoder gemma3 tests need slightly higher tolerance act_rtol=1e-5, logit_atol=1e-5, logit_rtol=1e-3, @@ -484,7 +484,7 @@ def test_gemma3_with_dummy_transcoders(): s = "The National Digital Analytics Group (ND" model = load_gemma3_with_dummy_transcoders() model.to(torch.float32) # type:ignore - graph = attribute(s, model) + graph = attribute(s, model, batch_size=256) assert isinstance(model, NNSightReplacementModel) @@ -498,7 +498,7 @@ def test_gemma3_with_dummy_clt(): s = "The National Digital Analytics Group (ND" model = load_gemma3_with_dummy_clt() model.to(torch.float32) # type:ignore - graph = attribute(s, model) + graph = attribute(s, model, batch_size=256) assert isinstance(model, NNSightReplacementModel) @@ -525,6 +525,7 @@ def test_gemma_3_1b(): verify_feature_edges(model, graph) +@pytest.mark.high_mem @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_gemma_3_1b_clt(): s = "The National Digital Analytics Group (ND" @@ -543,6 +544,8 @@ def test_gemma_3_1b_clt(): verify_feature_edges(model, graph) +@pytest.mark.high_mem +@pytest.mark.long_running @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_gemma_3_4b(): s = "The National Digital Analytics Group (ND" diff --git a/tests/test_attributions_gemma_nnsight.py b/tests/test_attributions_gemma_nnsight.py index 386e669c..7474b14e 100644 --- a/tests/test_attributions_gemma_nnsight.py +++ b/tests/test_attributions_gemma_nnsight.py @@ -341,7 +341,7 @@ def test_gemma_2_2b(): model = ReplacementModel.from_pretrained("google/gemma-2-2b", "gemma", backend="nnsight") assert isinstance(model, NNSightReplacementModel) - graph = attribute(s, model) + graph = attribute(s, model, batch_size=256) print("Changing logit softcap to 0, as the logits will otherwise be off.") with model.zero_softcap(): diff --git a/tests/test_attributions_llama_nnsight.py b/tests/test_attributions_llama_nnsight.py index 52e2de39..12385fed 100644 --- a/tests/test_attributions_llama_nnsight.py +++ b/tests/test_attributions_llama_nnsight.py @@ -149,6 +149,7 @@ def test_large_llama_model(): tokenizer_class.all_special_ids = original_all_special_ids # type:ignore +@pytest.mark.high_mem @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_llama_3_2_1b(): s = "The National Digital Analytics Group (ND" diff --git a/tests/test_freeze_points.py b/tests/test_freeze_points.py index 373c9650..cfb518df 100644 --- a/tests/test_freeze_points.py +++ b/tests/test_freeze_points.py @@ -31,7 +31,8 @@ def cleanup_cuda(): # ("google/gemma-3-1b-pt", "mwhanna/gemma-scope-2-1b-pt/clt/width_262k_l0_medium_affine"), # This requires lazy loading ( "google/gemma-3-4b-pt", - "mwhanna/gemma-scope-2-4b-pt/transcoder_all/width_262k_l0_small_affine", + # we use width_16k here instead of 262k to avoid large download not used elsewhere in the test suite + "mwhanna/gemma-scope-2-4b-pt/transcoder_all/width_16k_l0_small_affine", ), ] diff --git a/tests/test_offload.py b/tests/test_offload.py index 58220a47..4acd61ea 100644 --- a/tests/test_offload.py +++ b/tests/test_offload.py @@ -48,6 +48,7 @@ def test_offload_tl(): assert param.device.type == original_device.type +@pytest.mark.high_mem @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_offload_nnsight(): s = "The National Digital Analytics Group (ND" @@ -75,6 +76,7 @@ def test_offload_nnsight(): assert param.device.type == original_device.type +@pytest.mark.high_mem @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_offload_nnsight_gemma_3(): s = "The National Digital Analytics Group (ND" diff --git a/tests/test_transformerlens_nnsight_same_gemma.py b/tests/test_transformerlens_nnsight_same_gemma.py index 3094e8c5..f6cfc5e2 100644 --- a/tests/test_transformerlens_nnsight_same_gemma.py +++ b/tests/test_transformerlens_nnsight_same_gemma.py @@ -9,6 +9,9 @@ attribute as attribute_transformerlens, ) +# Mark all tests in this module as requiring large GPU memory +pytestmark = pytest.mark.high_mem + @pytest.fixture(autouse=True) def cleanup_cuda(): diff --git a/tests/test_transformerlens_nnsight_same_gemma_clts.py b/tests/test_transformerlens_nnsight_same_gemma_clts.py index 40b8820a..194e4c39 100644 --- a/tests/test_transformerlens_nnsight_same_gemma_clts.py +++ b/tests/test_transformerlens_nnsight_same_gemma_clts.py @@ -9,6 +9,9 @@ attribute as attribute_transformerlens, ) +# Mark all tests in this module as requiring high GPU memory +pytestmark = pytest.mark.high_mem + @pytest.fixture(autouse=True) def cleanup_cuda(): diff --git a/tests/test_transformerlens_nnsight_same_llama.py b/tests/test_transformerlens_nnsight_same_llama.py index f3fd2133..03006950 100644 --- a/tests/test_transformerlens_nnsight_same_llama.py +++ b/tests/test_transformerlens_nnsight_same_llama.py @@ -9,6 +9,9 @@ attribute as attribute_transformerlens, ) +# Mark all tests in this module as requiring high GPU memory +pytestmark = pytest.mark.high_mem + @pytest.fixture(autouse=True) def cleanup_cuda(): diff --git a/tests/test_transformerlens_nnsight_same_llama_clts.py b/tests/test_transformerlens_nnsight_same_llama_clts.py index 13ceef42..4668f188 100644 --- a/tests/test_transformerlens_nnsight_same_llama_clts.py +++ b/tests/test_transformerlens_nnsight_same_llama_clts.py @@ -6,6 +6,9 @@ from circuit_tracer.replacement_model import ReplacementModel from circuit_tracer.attribution.attribute import attribute +# Mark all tests in this module as requiring high GPU memory +pytestmark = pytest.mark.high_mem + @pytest.fixture(autouse=True) def cleanup_cuda(): diff --git a/tests/test_tutorial_notebook_backends.py b/tests/test_tutorial_notebook_backends.py index bfc6d3f8..e5a52205 100644 --- a/tests/test_tutorial_notebook_backends.py +++ b/tests/test_tutorial_notebook_backends.py @@ -9,6 +9,9 @@ attribute as attribute_transformerlens, ) +# Mark all tests in this module as requiring high GPU memory +pytestmark = pytest.mark.high_mem + @pytest.fixture(autouse=True) def cleanup_cuda(): diff --git a/tests/utils/test_caching.py b/tests/utils/test_caching.py index e8b321ec..97aaafca 100644 --- a/tests/utils/test_caching.py +++ b/tests/utils/test_caching.py @@ -33,6 +33,7 @@ def cleanup_cache(): @pytest.mark.requires_disk +@pytest.mark.long_running def test_caching_enables_lazy_loading(): # 1. Load from hub without cache - lazy loading should not work because # gemma-scope transcoders use npz format which doesn't support lazy loading @@ -81,6 +82,7 @@ def test_caching_enables_lazy_loading(): @pytest.mark.requires_disk +@pytest.mark.long_running def test_custom_cache_directory(): try: # Ensure test cache dir doesn't exist initially @@ -119,6 +121,7 @@ def test_custom_cache_directory(): @pytest.mark.requires_disk +@pytest.mark.long_running def test_cache_directory_from_env_var(): env_cache_dir = Path.home() / ".cache" / "circuit-tracer-env-test" old_env = os.environ.get("CIRCUIT_TRACER_CACHE_DIR") From af101cf22373a4cbf654b980622ca0f1692aa5cc Mon Sep 17 00:00:00 2001 From: Daniel Dale Date: Thu, 15 Jan 2026 11:16:59 -0800 Subject: [PATCH 08/18] Update unembedding matrix handling to auto-detect backend-variant orientation in AttributionTargets --- circuit_tracer/attribution/targets.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/circuit_tracer/attribution/targets.py b/circuit_tracer/attribution/targets.py index b46c6d05..03b573b9 100644 --- a/circuit_tracer/attribution/targets.py +++ b/circuit_tracer/attribution/targets.py @@ -295,7 +295,7 @@ def _from_list( Args: targets: List of mixed target specifications logits: ``(d_vocab,)`` logit vector - unembed_proj: ``(d_model, d_vocab)`` unembedding matrix + unembed_proj: ``(d_model, d_vocab)`` or ``(d_vocab, d_model)`` unembedding matrix tokenizer: Tokenizer for string→int conversion Returns: @@ -314,7 +314,8 @@ def _compute_logit_vecs( Args: indices: ``(k,)`` vocabulary indices to compute vectors for logits: ``(d_vocab,)`` logit vector for single position - unembed_proj: ``(d_model, d_vocab)`` unembedding matrix + unembed_proj: ``(d_model, d_vocab)`` or ``(d_vocab, d_model)`` unembedding matrix + (orientation auto-detected by matching vocab dimension to logits) Returns: Tuple of: @@ -324,9 +325,21 @@ def _compute_logit_vecs( """ probs = torch.softmax(logits, dim=-1) selected_probs = probs[indices] - cols = unembed_proj[:, indices] - demeaned = cols - unembed_proj.mean(dim=-1, keepdim=True) - return indices, selected_probs, demeaned.T + + # Auto-detect matrix orientation by matching against vocabulary size + d_vocab = logits.shape[0] + if unembed_proj.shape[0] == d_vocab: + # Shape is (d_vocab, d_model) – first axis is vocabulary (e.g., NNSight) + cols = unembed_proj[indices] # (k, d_model) + demean = unembed_proj.mean(dim=0, keepdim=True) # (1, d_model) + demeaned_vecs = cols - demean # (k, d_model) + else: + # Shape is (d_model, d_vocab) – second axis is vocabulary (e.g., TransformerLens) + cols = unembed_proj[:, indices] # (d_model, k) + demean = unembed_proj.mean(dim=-1, keepdim=True) # (d_model, 1) + demeaned_vecs = (cols - demean).T # (k, d_model) + + return indices, selected_probs, demeaned_vecs @staticmethod def _process_target_list( @@ -346,7 +359,7 @@ def _process_target_list( Args: targets: List of attribution targets in any combination of the above formats logits: ``(d_vocab,)`` vector for computing probabilities - unembed_proj: ``(d_model, d_vocab)`` unembedding matrix for computing vectors + unembed_proj: ``(d_model, d_vocab)`` or ``(d_vocab, d_model)`` unembedding matrix tokenizer: Tokenizer to use for string token conversion and to get vocab_size Returns: From 2f8eb2b5121d153c761beb1568aa733cbc0b8bc9 Mon Sep 17 00:00:00 2001 From: Daniel Dale Date: Fri, 16 Jan 2026 17:59:28 -0800 Subject: [PATCH 09/18] minor type fix, clarify current vram gating mark --- circuit_tracer/attribution/attribute_nnsight.py | 4 ++-- .../replacement_model/replacement_model_nnsight.py | 10 +++++++--- circuit_tracer/utils/create_graph_files.py | 2 +- pyproject.toml | 2 +- tests/test_attributions_gemma3_nnsight.py | 5 +++-- tests/test_attributions_llama_nnsight.py | 2 +- tests/test_offload.py | 4 ++-- tests/test_transformerlens_nnsight_same_gemma.py | 2 +- tests/test_transformerlens_nnsight_same_gemma_clts.py | 2 +- tests/test_transformerlens_nnsight_same_llama.py | 2 +- tests/test_transformerlens_nnsight_same_llama_clts.py | 2 +- tests/test_tutorial_notebook_backends.py | 2 +- 12 files changed, 22 insertions(+), 17 deletions(-) diff --git a/circuit_tracer/attribution/attribute_nnsight.py b/circuit_tracer/attribution/attribute_nnsight.py index 049d4a3c..670f3133 100644 --- a/circuit_tracer/attribution/attribute_nnsight.py +++ b/circuit_tracer/attribution/attribute_nnsight.py @@ -23,7 +23,7 @@ import logging import time from collections.abc import Sequence -from typing import Literal +from typing import Literal, cast import torch from tqdm import tqdm @@ -177,7 +177,7 @@ def _run_attribution( targets = AttributionTargets( attribution_targets=attribution_targets, logits=ctx.logits[0, -1], - unembed_proj=model.unembed_weight, # NNSight uses unembed_weight + unembed_proj=cast(torch.Tensor, model.unembed_weight), # NNSight uses unembed_weight tokenizer=model.tokenizer, max_n_logits=max_n_logits, desired_logit_prob=desired_logit_prob, diff --git a/circuit_tracer/replacement_model/replacement_model_nnsight.py b/circuit_tracer/replacement_model/replacement_model_nnsight.py index 85a4f6ca..a5851633 100644 --- a/circuit_tracer/replacement_model/replacement_model_nnsight.py +++ b/circuit_tracer/replacement_model/replacement_model_nnsight.py @@ -3,7 +3,7 @@ from collections.abc import Sequence from contextlib import contextmanager from functools import partial -from typing import Callable, Iterator, Literal +from typing import Callable, Iterator, Literal, cast import torch from torch import nn @@ -241,8 +241,12 @@ def _configure_replacement_model( self._embed_location = nnsight_config.embed_location # these are real weights, not envoys - self.embed_weight = self._resolve_attr(self, nnsight_config.embed_weight) - self.unembed_weight = self._resolve_attr(self, nnsight_config.unembed_weight) + self.embed_weight = cast( + torch.Tensor, self._resolve_attr(self, nnsight_config.embed_weight) + ) + self.unembed_weight = cast( + torch.Tensor, self._resolve_attr(self, nnsight_config.unembed_weight) + ) self.scan = transcoder_set.scan # Make sure the replacement model is entirely frozen by default. diff --git a/circuit_tracer/utils/create_graph_files.py b/circuit_tracer/utils/create_graph_files.py index 568dd17a..ebcd3442 100644 --- a/circuit_tracer/utils/create_graph_files.py +++ b/circuit_tracer/utils/create_graph_files.py @@ -159,7 +159,7 @@ def build_model(graph: "Graph", used_nodes, used_edges, slug, scan, node_thresho def create_graph_files( - graph_or_path: "Graph" | str, + graph_or_path: "Graph | str", slug: str, output_path, scan=None, diff --git a/pyproject.toml b/pyproject.toml index c9696837..82fe338e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,7 +58,7 @@ exclude = ["**/node_modules", "**/__pycache__", "**/.*", "demos"] markers = [ "requires_disk: marks tests requiring storage space", "long_running: marks tests that take a very long time to complete", - "high_mem: marks tests requiring significant GPU memory (>24GB VRAM or multiple large models)", + "large_gpu: marks tests requiring significant GPU memory (>24GB VRAM or multiple large models)", ] [dependency-groups] diff --git a/tests/test_attributions_gemma3_nnsight.py b/tests/test_attributions_gemma3_nnsight.py index ebf43059..452a7548 100644 --- a/tests/test_attributions_gemma3_nnsight.py +++ b/tests/test_attributions_gemma3_nnsight.py @@ -525,7 +525,7 @@ def test_gemma_3_1b(): verify_feature_edges(model, graph) -@pytest.mark.high_mem +@pytest.mark.large_gpu @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_gemma_3_1b_it(): s = "user\nThe National Digital Analytics Group (ND" @@ -544,6 +544,7 @@ def test_gemma_3_1b_it(): verify_feature_edges(model, graph) +@pytest.mark.large_gpu @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_gemma_3_1b_clt(): s = "The National Digital Analytics Group (ND" @@ -562,7 +563,7 @@ def test_gemma_3_1b_clt(): verify_feature_edges(model, graph) -@pytest.mark.high_mem +@pytest.mark.large_gpu @pytest.mark.long_running @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_gemma_3_4b(): diff --git a/tests/test_attributions_llama_nnsight.py b/tests/test_attributions_llama_nnsight.py index 12385fed..e685e3fd 100644 --- a/tests/test_attributions_llama_nnsight.py +++ b/tests/test_attributions_llama_nnsight.py @@ -149,7 +149,7 @@ def test_large_llama_model(): tokenizer_class.all_special_ids = original_all_special_ids # type:ignore -@pytest.mark.high_mem +@pytest.mark.large_gpu @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_llama_3_2_1b(): s = "The National Digital Analytics Group (ND" diff --git a/tests/test_offload.py b/tests/test_offload.py index 4acd61ea..a2e2f5d3 100644 --- a/tests/test_offload.py +++ b/tests/test_offload.py @@ -48,7 +48,7 @@ def test_offload_tl(): assert param.device.type == original_device.type -@pytest.mark.high_mem +@pytest.mark.large_gpu @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_offload_nnsight(): s = "The National Digital Analytics Group (ND" @@ -76,7 +76,7 @@ def test_offload_nnsight(): assert param.device.type == original_device.type -@pytest.mark.high_mem +@pytest.mark.large_gpu @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_offload_nnsight_gemma_3(): s = "The National Digital Analytics Group (ND" diff --git a/tests/test_transformerlens_nnsight_same_gemma.py b/tests/test_transformerlens_nnsight_same_gemma.py index f6cfc5e2..99705e4d 100644 --- a/tests/test_transformerlens_nnsight_same_gemma.py +++ b/tests/test_transformerlens_nnsight_same_gemma.py @@ -10,7 +10,7 @@ ) # Mark all tests in this module as requiring large GPU memory -pytestmark = pytest.mark.high_mem +pytestmark = pytest.mark.large_gpu @pytest.fixture(autouse=True) diff --git a/tests/test_transformerlens_nnsight_same_gemma_clts.py b/tests/test_transformerlens_nnsight_same_gemma_clts.py index 194e4c39..b6e36c91 100644 --- a/tests/test_transformerlens_nnsight_same_gemma_clts.py +++ b/tests/test_transformerlens_nnsight_same_gemma_clts.py @@ -10,7 +10,7 @@ ) # Mark all tests in this module as requiring high GPU memory -pytestmark = pytest.mark.high_mem +pytestmark = pytest.mark.large_gpu @pytest.fixture(autouse=True) diff --git a/tests/test_transformerlens_nnsight_same_llama.py b/tests/test_transformerlens_nnsight_same_llama.py index 03006950..b2308336 100644 --- a/tests/test_transformerlens_nnsight_same_llama.py +++ b/tests/test_transformerlens_nnsight_same_llama.py @@ -10,7 +10,7 @@ ) # Mark all tests in this module as requiring high GPU memory -pytestmark = pytest.mark.high_mem +pytestmark = pytest.mark.large_gpu @pytest.fixture(autouse=True) diff --git a/tests/test_transformerlens_nnsight_same_llama_clts.py b/tests/test_transformerlens_nnsight_same_llama_clts.py index 4668f188..e4133c08 100644 --- a/tests/test_transformerlens_nnsight_same_llama_clts.py +++ b/tests/test_transformerlens_nnsight_same_llama_clts.py @@ -7,7 +7,7 @@ from circuit_tracer.attribution.attribute import attribute # Mark all tests in this module as requiring high GPU memory -pytestmark = pytest.mark.high_mem +pytestmark = pytest.mark.large_gpu @pytest.fixture(autouse=True) diff --git a/tests/test_tutorial_notebook_backends.py b/tests/test_tutorial_notebook_backends.py index e5a52205..c1b0653e 100644 --- a/tests/test_tutorial_notebook_backends.py +++ b/tests/test_tutorial_notebook_backends.py @@ -10,7 +10,7 @@ ) # Mark all tests in this module as requiring high GPU memory -pytestmark = pytest.mark.high_mem +pytestmark = pytest.mark.large_gpu @pytest.fixture(autouse=True) From 6b76d087b73e8614e9aae78cffd7b843fb9f0663 Mon Sep 17 00:00:00 2001 From: Daniel Dale Date: Tue, 10 Feb 2026 14:17:27 -0800 Subject: [PATCH 10/18] adds integration tests, refactors proposed interface incorporating PR review feedback, includes comment and mark changes that will be separated into a separate PR --- circuit_tracer/attribution/attribute.py | 18 +- .../attribution/attribute_nnsight.py | 29 +- .../attribution/attribute_transformerlens.py | 29 +- circuit_tracer/attribution/targets.py | 337 +++---- circuit_tracer/graph.py | 64 +- circuit_tracer/utils/tl_nnsight_mapping.py | 2 +- tests/test_attribution_targets.py | 892 ++++++++++++++---- tests/test_graph.py | 79 +- 8 files changed, 1009 insertions(+), 441 deletions(-) diff --git a/circuit_tracer/attribution/attribute.py b/circuit_tracer/attribution/attribute.py index f59f0b30..c4fc16f9 100644 --- a/circuit_tracer/attribution/attribute.py +++ b/circuit_tracer/attribution/attribute.py @@ -14,6 +14,7 @@ from circuit_tracer.graph import Graph if TYPE_CHECKING: + from circuit_tracer.attribution.targets import TargetSpec from circuit_tracer.replacement_model.replacement_model_nnsight import NNSightReplacementModel from circuit_tracer.replacement_model.replacement_model_transformerlens import ( TransformerLensReplacementModel, @@ -24,9 +25,7 @@ def attribute( prompt: str | torch.Tensor | list[int], model: "NNSightReplacementModel | TransformerLensReplacementModel", *, - attribution_targets: ( - Sequence[tuple[str, float, torch.Tensor] | int | str] | torch.Tensor | None - ) = None, + attribution_targets: "Sequence[str] | Sequence[TargetSpec] | torch.Tensor | None" = None, max_n_logits: int = 10, desired_logit_prob: float = 0.95, batch_size: int = 512, @@ -43,16 +42,13 @@ def attribute( Args: prompt: Text, token ids, or tensor - will be tokenized if str. model: Frozen ``ReplacementModel`` (either nnsight or transformerlens backend) - attribution_targets: Flexible attribution target specification in one of several formats: + attribution_targets: Target specification in one of four formats: - None: Auto-select salient logits based on probability threshold - torch.Tensor: Tensor of token indices - - Sequence[tuple[str, float, torch.Tensor] | int | str]: Sequence where - each element can be: - * int or str: Token ID/string (auto-resolves probability and - unembed vector) - * tuple[str, float, torch.Tensor]: Fully specified logit spec with - arbitrary string tokens (or functions thereof) that may not be in - vocabulary + - Sequence[str]: Token strings (tokenized, auto-computes probability + and unembed vector) + - Sequence[TargetSpec]: Fully specified custom targets (CustomTarget or + tuple[str, float, torch.Tensor]) with arbitrary unembed directions max_n_logits: Max number of logit nodes (used when attribution_targets is None). desired_logit_prob: Keep logits until cumulative prob >= this value (used when attribution_targets is None). diff --git a/circuit_tracer/attribution/attribute_nnsight.py b/circuit_tracer/attribution/attribute_nnsight.py index 670f3133..e192e363 100644 --- a/circuit_tracer/attribution/attribute_nnsight.py +++ b/circuit_tracer/attribution/attribute_nnsight.py @@ -28,7 +28,11 @@ import torch from tqdm import tqdm -from circuit_tracer.attribution.targets import AttributionTargets +from circuit_tracer.attribution.targets import ( + AttributionTargets, + TargetSpec, + log_attribution_target_info, +) from circuit_tracer.graph import Graph, compute_partial_influences from circuit_tracer.replacement_model.replacement_model_nnsight import NNSightReplacementModel from circuit_tracer.utils.disk_offload import offload_modules @@ -38,9 +42,7 @@ def attribute( prompt: str | torch.Tensor | list[int], model: NNSightReplacementModel, *, - attribution_targets: ( - Sequence[tuple[str, float, torch.Tensor] | int | str] | torch.Tensor | None - ) = None, + attribution_targets: Sequence[str] | Sequence[TargetSpec] | torch.Tensor | None = None, max_n_logits: int = 10, desired_logit_prob: float = 0.95, batch_size: int = 512, @@ -54,16 +56,13 @@ def attribute( Args: prompt: Text, token ids, or tensor - will be tokenized if str. model: Frozen ``NNSightReplacementModel`` - attribution_targets: Flexible attribution target specification in one of several formats: + attribution_targets: Target specification in one of four formats: - None: Auto-select salient logits based on probability threshold - torch.Tensor: Tensor of token indices - - Sequence[tuple[str, float, torch.Tensor] | int | str]: Sequence where - each element can be: - * int or str: Token ID/string (auto-resolves probability and - unembed vector) - * tuple[str, float, torch.Tensor]: Fully specified logit spec with - arbitrary string tokens (or functions thereof) that may not be in - vocabulary + - Sequence[str]: Token strings (tokenized, auto-computes probability + and unembed vector) + - Sequence[TargetSpec]: Fully specified custom targets (CustomTarget or tuple) + with arbitrary unembed directions max_n_logits: Max number of logit nodes (used when attribution_targets is None). desired_logit_prob: Keep logits until cumulative prob >= this value (used when attribution_targets is None). @@ -183,11 +182,7 @@ def _run_attribution( desired_logit_prob=desired_logit_prob, ) - if attribution_targets is None: - logger.info( - f"Selected {len(targets)} logits with cumulative probability " - f"{targets.logit_probabilities.sum().item():.4f}" - ) + log_attribution_target_info(targets, attribution_targets, logger) if offload: offload_handles += offload_modules([model.embed_location], offload) diff --git a/circuit_tracer/attribution/attribute_transformerlens.py b/circuit_tracer/attribution/attribute_transformerlens.py index efd57d39..449f3088 100644 --- a/circuit_tracer/attribution/attribute_transformerlens.py +++ b/circuit_tracer/attribution/attribute_transformerlens.py @@ -28,7 +28,11 @@ import torch from tqdm import tqdm -from circuit_tracer.attribution.targets import AttributionTargets +from circuit_tracer.attribution.targets import ( + AttributionTargets, + TargetSpec, + log_attribution_target_info, +) from circuit_tracer.graph import Graph, compute_partial_influences from circuit_tracer.replacement_model.replacement_model_transformerlens import ( TransformerLensReplacementModel, @@ -40,9 +44,7 @@ def attribute( prompt: str | torch.Tensor | list[int], model: TransformerLensReplacementModel, *, - attribution_targets: ( - Sequence[tuple[str, float, torch.Tensor] | int | str] | torch.Tensor | None - ) = None, + attribution_targets: Sequence[str] | Sequence[TargetSpec] | torch.Tensor | None = None, max_n_logits: int = 10, desired_logit_prob: float = 0.95, batch_size: int = 512, @@ -56,16 +58,13 @@ def attribute( Args: prompt: Text, token ids, or tensor - will be tokenized if str. model: Frozen ``TransformerLensReplacementModel`` - attribution_targets: Flexible attribution target specification in one of several formats: + attribution_targets: Target specification in one of four formats: - None: Auto-select salient logits based on probability threshold - torch.Tensor: Tensor of token indices - - Sequence[tuple[str, float, torch.Tensor] | int | str]: Sequence where - each element can be: - * int or str: Token ID/string (auto-resolves probability and - unembed vector) - * tuple[str, float, torch.Tensor]: Fully specified logit spec with - arbitrary string tokens (or functions thereof) that may not be in - vocabulary + - Sequence[str]: Token strings (tokenized, auto-computes probability + and unembed vector) + - Sequence[TargetSpec]: Fully specified custom targets (CustomTarget or tuple) + with arbitrary unembed directions max_n_logits: Max number of logit nodes (used when attribution_targets is None). desired_logit_prob: Keep logits until cumulative prob >= this value (used when attribution_targets is None). @@ -173,11 +172,7 @@ def _run_attribution( desired_logit_prob=desired_logit_prob, ) - if attribution_targets is None: - logger.info( - f"Selected {len(targets)} logits with cumulative probability " - f"{targets.logit_probabilities.sum().item():.4f}" - ) + log_attribution_target_info(targets, attribution_targets, logger) if offload: offload_handles += offload_modules([model.unembed, model.embed], offload) diff --git a/circuit_tracer/attribution/targets.py b/circuit_tracer/attribution/targets.py index 03b573b9..c06bd170 100644 --- a/circuit_tracer/attribution/targets.py +++ b/circuit_tracer/attribution/targets.py @@ -14,35 +14,47 @@ from collections.abc import Sequence from typing import NamedTuple +import logging +import warnings import torch class LogitTarget(NamedTuple): - """Data transfer object (DTO) for logit attribution targets. + """Token metadata for attribution: string representation and vocabulary index.""" + + token_str: str + vocab_idx: int - A lightweight record structure containing token metadata for attribution. + +class CustomTarget(NamedTuple): + """A fully specified custom attribution target. Attributes: - token_str: String representation of the token (decoded from vocab or arbitrary) - vocab_idx: Vocabulary index - either a real token ID (< vocab_size) or - a virtual index for OOV tokens (>= vocab_size) + token_str: Label for this target (e.g., "logit(x)-logit(y)") + prob: Weight/probability for this target + vec: Custom unembed direction vector (d_model,) """ token_str: str - vocab_idx: int + prob: float + vec: torch.Tensor + + +TargetSpec = CustomTarget | tuple[str, float, torch.Tensor] class AttributionTargets: """Container for processed attribution target specifications. - High-level data structure that encapsulates target identifiers, softmax probabilities, - and demeaned unembedding vectors needed for attribution graph computation. + Encapsulates target identifiers, softmax probabilities, and demeaned unembedding + vectors needed for attribution graph computation. - Supports multiple input formats for flexible target specification: + Supports four input formats: - None: Auto-select salient logits by probability threshold - - torch.Tensor: Specific vocabulary indices (i.e. token_ids) - - list: Mixed targets (tuples for OOV tokens, ints/strs for valid token_ids) + - torch.Tensor: Specific vocabulary indices (token IDs) + - Sequence[str]: Token strings (tokenized internally) + - Sequence[TargetSpec]: Fully specified custom targets (CustomTarget or raw tuple[str, float, torch.Tensor]) Attributes: logit_targets: List of LogitTarget records with token strings and vocab indices @@ -52,9 +64,7 @@ class AttributionTargets: def __init__( self, - attribution_targets: ( - Sequence[tuple[str, float, torch.Tensor] | int | str] | torch.Tensor | None - ), + attribution_targets: Sequence[str] | Sequence[TargetSpec] | torch.Tensor | None, logits: torch.Tensor, unembed_proj: torch.Tensor, tokenizer, @@ -65,14 +75,13 @@ def __init__( """Build attribution targets from user specification. Args: - attribution_targets: Target specification in one of several formats: + attribution_targets: Target specification in one of four formats: - None: Auto-select salient logits based on probability threshold - torch.Tensor: Tensor of vocabulary token IDs - - list[tuple[str, float, torch.Tensor] | int | str]: List where - each element can be: - * int or str: Token ID/string (auto-computes probability & vector) - * tuple[str, float, torch.Tensor]: Fully specified target logit with arbitrary - string token (or function thereof) (may use virtual index for OOV tokens) + - Sequence[str]: Token strings (tokenized, then auto-computes probability & vector) + - Sequence[TargetSpec]: Fully specified custom targets (CustomTarget or + tuple[str, float, torch.Tensor]) with custom probability and unembed direction + (uses virtual index for OOV tokens) logits: ``(d_vocab,)`` logit vector for single position unembed_proj: ``(d_model, d_vocab)`` unembedding matrix tokenizer: Tokenizer for string→int conversion @@ -89,14 +98,23 @@ def __init__( attr_spec = self._from_salient(**salient_ctor, **ctor_shared) elif isinstance(attribution_targets, torch.Tensor): attr_spec = self._from_indices(indices=attribution_targets, **ctor_shared) - elif isinstance(attribution_targets, list): + elif isinstance(attribution_targets, Sequence): if not attribution_targets: - raise ValueError("attribution_targets list cannot be empty") - attr_spec = self._from_list(target_list=attribution_targets, **ctor_shared) + raise ValueError("attribution_targets sequence cannot be empty") + first = attribution_targets[0] + if isinstance(first, str): + attr_spec = self._from_str(token_strs=attribution_targets, **ctor_shared) # type: ignore[arg-type] + elif isinstance(first, (tuple, CustomTarget)): + attr_spec = self._from_tuple(target_tuples=attribution_targets, **ctor_shared) # type: ignore[arg-type] + else: + raise TypeError( + f"Sequence elements must be str or TargetSpec (CustomTarget or " + f"tuple[str, float, Tensor]), got {type(first)}" + ) else: raise TypeError( - f"attribution_targets must be None, torch.Tensor, or list, " - f"got {type(attribution_targets)}" + f"attribution_targets must be None, torch.Tensor, Sequence[str], " + f"or Sequence[TargetSpec], got {type(attribution_targets)}" ) self.logit_targets, self.logit_probabilities, self.logit_vectors = attr_spec @@ -135,51 +153,22 @@ def vocab_size(self) -> int: @property def vocab_indices(self) -> list[int]: """All vocabulary indices including virtual indices (>= vocab_size). - Vocab indices are a generalization of token IDs that can represent: - - Real vocab indices (< vocab_size) for token_ids valid in the current tokenizer vocab space - - Virtual indices (>= vocab_size) for arbitrary string tokens (or functions thereof) - - Use has_virtual_indices to check if any virtual indices are present. - Use token_ids to get a tensor of only real vocabulary indices. Returns: - List of vocabulary indices (including virtual indices) + List of vocabulary indices """ return [target.vocab_idx for target in self.logit_targets] - @property - def has_virtual_indices(self) -> bool: - """Check if any targets use virtual indices (OOV tokens). - - Virtual indices (vocab_idx >= vocab_size) are a technique for representing - arbitrary string tokens not in the model's vocabulary. - - Returns: - True if virtual indices are present, False otherwise - """ - vocab_size = self.tokenizer.vocab_size - return any(t.vocab_idx >= vocab_size for t in self.logit_targets) - @property def token_ids(self) -> torch.Tensor: - """Tensor of valid vocabulary indices (< vocab_size only). + """Tensor of vocabulary indices. Returns a torch.Tensor of vocab indices on the same device as other tensors, - suitable for indexing into logit vectors or embeddings. This property will - raise a ValueError if any targets use virtual indices (arbitrary strings). - - Raises: - ValueError: If any targets have virtual indices (vocab_idx >= vocab_size) + suitable for indexing into logit vectors or embeddings. Returns: torch.Tensor: Long tensor of vocabulary indices """ - if self.has_virtual_indices: - raise ValueError( - "Cannot create token_ids tensor: some targets use virtual indices " - "(arbitrary strings not in vocabulary). Check has_virtual_indices " - "before accessing token_ids, or use vocab_indices to get all indices." - ) return torch.tensor( self.vocab_indices, dtype=torch.long, device=self.logit_probabilities.device ) @@ -279,29 +268,117 @@ def _from_indices( return logit_targets, probs, vecs @staticmethod - def _from_list( - target_list: Sequence[tuple[str, float, torch.Tensor] | int | str], + def _from_str( + token_strs: Sequence[str], logits: torch.Tensor, unembed_proj: torch.Tensor, tokenizer, ) -> tuple[list[LogitTarget], torch.Tensor, torch.Tensor]: - """Construct from mixed list of targets. + """Construct from a sequence of token strings. - Supports heterogeneous list where each element can be: - - int: Vocabulary index (auto-compute prob/vec) - - str: Token string (tokenize, auto-compute) - - tuple[str, float, Tensor]: Fully specified arbitrary string or function thereof + Each string is tokenized and its probability/vector auto-computed. Args: - targets: List of mixed target specifications + token_strs: Sequence of token strings logits: ``(d_vocab,)`` logit vector - unembed_proj: ``(d_model, d_vocab)`` or ``(d_vocab, d_model)`` unembedding matrix + unembed_proj: Unembedding matrix tokenizer: Tokenizer for string→int conversion Returns: Tuple of (logit_targets, probabilities, vectors) """ - return AttributionTargets._process_target_list(target_list, logits, unembed_proj, tokenizer) + vocab_size = logits.shape[0] + indices = [] + for token_str in token_strs: + try: + ids = tokenizer.encode(token_str, add_special_tokens=False) + except Exception as e: + raise ValueError( + f"Failed to encode string token {token_str!r} using tokenizer: {e}" + ) from e + if not ids: + raise ValueError(f"String token {token_str!r} encoded to empty token sequence.") + if len(ids) > 1: + warnings.warn( + f"String token {token_str!r} encoded to {len(ids)} tokens; " + f"using only the last token (index {ids[-1]}). " + f"Consider providing single-token strings for more predictable behavior." + ) + token_id = ids[-1] + assert 0 <= token_id < vocab_size, ( + f"Token {token_str!r} resolved to index {token_id}, " + f"out of vocabulary range [0, {vocab_size})" + ) + indices.append(token_id) + return AttributionTargets._from_indices( + indices=torch.tensor(indices, dtype=torch.long), + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + @staticmethod + def _validate_custom_target( + target: TargetSpec, + ) -> CustomTarget: + """Validate and normalize a custom target. + + Args: + target: A CustomTarget or raw (token_str, prob, vec) tuple + + Returns: + Validated CustomTarget instance + + Raises: + ValueError: If the tuple has wrong length or element types + """ + if not isinstance(target, CustomTarget): + if len(target) != 3: + raise ValueError( + f"Tuple targets must have exactly 3 elements " + f"(token_str, probability, vector), got {len(target)}" + ) + token_str, prob, vec = target + else: + token_str, prob, vec = target.token_str, target.prob, target.vec + if not isinstance(token_str, str): + raise TypeError(f"Custom target token_str must be str, got {type(token_str)}") + if not isinstance(prob, (int, float)): + raise TypeError(f"Custom target prob must be int or float, got {type(prob)}") + if not isinstance(vec, torch.Tensor): + raise TypeError(f"Custom target vec must be torch.Tensor, got {type(vec)}") + return CustomTarget(token_str=token_str, prob=float(prob), vec=vec) + + @staticmethod + def _from_tuple( + target_tuples: Sequence[TargetSpec], + logits: torch.Tensor, + unembed_proj: torch.Tensor, + tokenizer, + ) -> tuple[list[LogitTarget], torch.Tensor, torch.Tensor]: + """Construct from fully specified custom targets. + + Each target provides (token_str, prob, vec) for an arbitrary + attribution direction that may not correspond to a vocabulary token. + + Args: + target_tuples: Sequence of CustomTarget or raw tuple instances + logits: ``(d_vocab,)`` logit vector (used for vocab_size) + unembed_proj: Unembedding matrix (unused but kept for interface consistency) + tokenizer: Tokenizer (unused but kept for interface consistency) + + Returns: + Tuple of (logit_targets, probabilities, vectors) + """ + vocab_size = logits.shape[0] + logit_targets, probs, vecs = [], [], [] + for position, target in enumerate(target_tuples): + validated = AttributionTargets._validate_custom_target(target) + virtual_idx = vocab_size + position + logit_targets.append(LogitTarget(token_str=validated.token_str, vocab_idx=virtual_idx)) + probs.append(validated.prob) + vecs.append(validated.vec) + return logit_targets, torch.tensor(probs), torch.stack(vecs) @staticmethod def _compute_logit_vecs( @@ -341,105 +418,31 @@ def _compute_logit_vecs( return indices, selected_probs, demeaned_vecs - @staticmethod - def _process_target_list( - targets: Sequence[tuple[str, float, torch.Tensor] | int | str], - logits: torch.Tensor, - unembed_proj: torch.Tensor, - tokenizer, - ) -> tuple[list[LogitTarget], torch.Tensor, torch.Tensor]: - """Process mixed target list into LogitTarget instances, probabilities, vectors. - - Supports flexible mixed-mode targets where each element can be: - - int: Token ID (computes probability and vector, uses actual vocab index) - - str: Token string (tokenizes, computes probability and vector, uses actual token_id) - - tuple[str, float, torch.Tensor]: Arbitrary string or function thereof with custom prob/vec - (uses virtual index) - Args: - targets: List of attribution targets in any combination of the above formats - logits: ``(d_vocab,)`` vector for computing probabilities - unembed_proj: ``(d_model, d_vocab)`` or ``(d_vocab, d_model)`` unembedding matrix - tokenizer: Tokenizer to use for string token conversion and to get vocab_size +def log_attribution_target_info( + targets: "AttributionTargets", + attribution_targets: Sequence[str] | Sequence[TargetSpec] | torch.Tensor | None, + logger: logging.Logger, +) -> None: + """Log information about attribution targets. - Returns: - Tuple of: - * logit_targets - List of LogitTarget instances where: - - For int/str tokens: vocab_idx is actual vocab index, token_str is decoded - - For tuple targets: vocab_idx is virtual (vocab_size + position), - token_str is the arbitrary string or function thereof - * probabilities - ``(k,)`` probabilities - * vectors - ``(k, d_model)`` demeaned vectors - - Raises: - ValueError: If str token cannot be encoded or int token is out of vocab range - """ - vocab_size = logits.shape[0] - - def validate_token_id(token_id: int, original_token: int | str) -> None: - """Validate that token_id is within valid vocabulary range.""" - if not (0 <= token_id < vocab_size): - raise ValueError( - f"Token {original_token!r} resolved to index {token_id}, which is " - f"out of vocabulary range [0, {vocab_size})" - ) - - def token_to_idx(token: int | str) -> int: - """Convert token (int or str) to token index with validation.""" - if isinstance(token, str): - try: - ids = tokenizer.encode(token, add_special_tokens=False) - except Exception as e: - raise ValueError( - f"Failed to encode string token {token!r} using tokenizer: {e}" - ) from e - - if not ids: - raise ValueError( - f"String token {token!r} encoded to empty token sequence. " - f"Cannot determine valid token ID." - ) - - token_id = ids[-1] - validate_token_id(token_id, token) - return token_id - else: - validate_token_id(token, token) - return token - - logit_targets, probs, vecs = [], [], [] - - for position, target in enumerate(targets): - if isinstance(target, tuple): - # Fully specified tuple: (str_token, probability, vector) - # This is an arbitrary string or function of one, so we use virtual indices - if len(target) != 3: - raise ValueError( - f"Tuple targets must have exactly 3 elements " - f"(token_str, probability, vector), got {len(target)}" - ) - token_str, prob, vec = target - if not isinstance(token_str, str): - raise ValueError( - f"Tuple targets must have str as first element, got {type(token_str)}" - ) - - # Use virtual index for arbitrary string/function thereof - virtual_idx = vocab_size + position - logit_targets.append(LogitTarget(token_str=token_str, vocab_idx=virtual_idx)) - probs.append(prob) - vecs.append(vec) - else: - # Single token (int | str) - compute probability and vector, use valid token_ids - idx = token_to_idx(target) - idx_tensor = torch.tensor([idx], dtype=torch.long) - _, prob_tensor, vec_tensor = AttributionTargets._compute_logit_vecs( - idx_tensor, logits, unembed_proj - ) - - token_str = tokenizer.decode(idx) - logit_targets.append(LogitTarget(token_str=token_str, vocab_idx=idx)) - probs.append(prob_tensor.item()) - vecs.append(vec_tensor.squeeze(0)) - - return logit_targets, torch.tensor(probs), torch.stack(vecs) + Args: + targets: AttributionTargets instance with processed targets + attribution_targets: Original attribution_targets specification + logger: Logger to use for output + """ + prob_sum = targets.logit_probabilities.sum().item() + if attribution_targets is None: + target_desc = "salient logits" + weight_desc = "cumulative probability" + elif ( + isinstance(attribution_targets, Sequence) + and attribution_targets + and isinstance(attribution_targets[0], (tuple, CustomTarget)) + ): + target_desc = "custom attribution targets" + weight_desc = "total weight" + else: + target_desc = "specified logit targets" + weight_desc = "cumulative probability" + logger.info(f"Using {len(targets)} {target_desc} with {weight_desc} {prob_sum:.4f}") diff --git a/circuit_tracer/graph.py b/circuit_tracer/graph.py index 326e1cdd..7996f3d9 100644 --- a/circuit_tracer/graph.py +++ b/circuit_tracer/graph.py @@ -43,7 +43,7 @@ def __init__( activation_values: torch.Tensor, scan: str | list[str] | None = None, attribution_targets: AttributionTargets | None = None, - logit_targets: list[LogitTarget] | torch.Tensor | None = None, + logit_targets: list[LogitTarget] | None = None, logit_probabilities: torch.Tensor | None = None, vocab_size: int | None = None, ): @@ -66,22 +66,19 @@ def __init__( model.cfg.n_layers * len(input_tokens) error nodes, len(input_tokens) embed nodes, len(logit_targets) logit nodes. The rows represent target nodes, while columns represent source nodes. - cfg (HookedTransformerConfig): The cfg of the model. + cfg: The cfg of the model. selected_features (torch.Tensor): Indices into active_features for selected nodes. activation_values (torch.Tensor): Activation values for selected features. scan (Optional[Union[str,List[str]]], optional): The identifier of the transcoders used in the graph. Without a scan, the graph cannot be uploaded (since we won't know what transcoders were used). Defaults to None - attribution_targets (Optional[AttributionTargets]): Attribution targets container. - When provided, logit_targets, logit_probabilities, and vocab_size are - extracted from it. - logit_targets (Optional[Union[List[LogitTarget], torch.Tensor]]): Either a list - of LogitTarget records or a tensor of token_ids. When using tensor - format, token_str fields will be empty strings. - logit_probabilities (Optional[torch.Tensor]): Logit probabilities. Required if - attribution_targets is not provided. - vocab_size (Optional[int]): Vocabulary size for determining virtual indices. - If not provided, defaults to cfg.d_vocab. + attribution_targets: Attribution targets container. When provided, + logit_targets, logit_probabilities, and vocab_size are extracted from it. + logit_targets: List of LogitTarget records. Required if attribution_targets + is not provided. + logit_probabilities: Logit probabilities. Required if attribution_targets + is not provided. + vocab_size: Vocabulary size. If not provided, defaults to cfg.d_vocab. """ if attribution_targets is not None: if logit_targets is not None or logit_probabilities is not None: @@ -93,13 +90,7 @@ def __init__( self.logit_probabilities = attribution_targets.logit_probabilities self.vocab_size = attribution_targets.vocab_size elif logit_targets is not None and logit_probabilities is not None: - if isinstance(logit_targets, torch.Tensor): - # When reconstructing from tensor, token_str is not available - self.logit_targets = [ - LogitTarget(token_str="", vocab_idx=int(idx)) for idx in logit_targets.tolist() - ] - else: - self.logit_targets = logit_targets + self.logit_targets = logit_targets self.logit_probabilities = logit_probabilities self.vocab_size = vocab_size if vocab_size is not None else cfg.d_vocab else: @@ -134,36 +125,18 @@ def to(self, device): @property def vocab_indices(self) -> list[int]: - """All vocabulary indices including virtual indices (>= vocab_size). - - Provides the same interface as AttributionTargets.vocab_indices. - """ + """All vocabulary indices.""" return [target.vocab_idx for target in self.logit_targets] - @property - def has_virtual_indices(self) -> bool: - """Check if any targets use virtual indices (OOV tokens). - - Virtual indices (vocab_idx >= vocab_size) are a technique used to represent - arbitrary string tokens (or functions thereof) not in the tokenizer's vocabulary. - """ - return any(t.vocab_idx >= self.vocab_size for t in self.logit_targets) - @property def logit_token_ids(self) -> torch.Tensor: - """Tensor of logit target token IDs (< vocab_size only). + """Tensor of logit target token IDs. Returns token IDs for logit targets on the same device as other graph tensors. - Provides the same interface as AttributionTargets.token_ids. - Raises: - ValueError: If any targets have virtual indices + Returns: + torch.Tensor: Long tensor of vocabulary indices """ - if self.has_virtual_indices: - raise ValueError( - "Cannot create logit_token_ids tensor: some targets use virtual indices. " - "Use vocab_indices to get all indices including virtual ones." - ) return torch.tensor( self.vocab_indices, dtype=torch.long, device=self.logit_probabilities.device ) @@ -210,6 +183,9 @@ def to_pt(self, path: str): def from_pt(path: str, map_location="cpu") -> "Graph": """Load a graph (saved using graph.to_pt) from a .pt file at the given path. + Handles backward compatibility with older serialized graphs that stored + logit_targets as a torch.Tensor of token IDs. + Args: path (str): The path of the Graph to load map_location (str, optional): the device to load the graph onto. @@ -219,6 +195,12 @@ def from_pt(path: str, map_location="cpu") -> "Graph": Graph: the Graph saved at the specified path """ d = torch.load(path, weights_only=False, map_location=map_location) + # BC: convert legacy tensor logit_targets to LogitTarget list + lt = d.get("logit_targets") + if isinstance(lt, torch.Tensor): + d["logit_targets"] = [ + LogitTarget(token_str="", vocab_idx=int(idx)) for idx in lt.tolist() + ] return Graph(**d) diff --git a/circuit_tracer/utils/tl_nnsight_mapping.py b/circuit_tracer/utils/tl_nnsight_mapping.py index 06a4215e..a28acaba 100644 --- a/circuit_tracer/utils/tl_nnsight_mapping.py +++ b/circuit_tracer/utils/tl_nnsight_mapping.py @@ -277,7 +277,7 @@ def convert_nnsight_config_to_transformerlens(config): config_dict |= config_dict["text_config"] for nnsight_field, transformerlens_field in field_mappings.items(): - if transformerlens_field not in config_dict and nnsight_field in config_dict: + if transformerlens_field not in config_dict: config_dict[transformerlens_field] = config_dict[nnsight_field] return UnifiedConfig.from_dict(config_dict) diff --git a/tests/test_attribution_targets.py b/tests/test_attribution_targets.py index a035b2c7..734c7dcf 100644 --- a/tests/test_attribution_targets.py +++ b/tests/test_attribution_targets.py @@ -1,13 +1,23 @@ -"""Unit tests for AttributionTargets class.""" +"""Tests for AttributionTargets class.""" + +import gc +from collections.abc import Sequence +from typing import cast import torch import pytest -from circuit_tracer.attribution.targets import AttributionTargets +from circuit_tracer import Graph, ReplacementModel +from circuit_tracer.attribution.attribute import attribute +from circuit_tracer.attribution.targets import AttributionTargets, CustomTarget, LogitTarget class MockTokenizer: - """Mock tokenizer for testing.""" + """Mock tokenizer for testing. + + This tokenizer supports bijective encode/decode for strings of the form + ``"tok_"`` so that roundtrip consistency tests work correctly. + """ vocab_size = 100 # Define vocab size for testing @@ -15,12 +25,19 @@ def encode(self, text, add_special_tokens=False): # Simple mock: return token indices within valid range (0-99) if not text: return [] - # Use hash to generate consistent indices within range - return [hash(text) % 100] + # Support roundtrip: if text is "tok_", decode back to N + if text.startswith("tok_"): + try: + idx = int(text[4:]) + if 0 <= idx < self.vocab_size: + return [idx] + except ValueError: + pass + # Fallback: use hash to generate consistent indices within range + return [hash(text) % self.vocab_size] def decode(self, token_id): """Decode a single token ID to a string.""" - # Simple mock: return string representation prefixed with "tok_" if isinstance(token_id, int): return f"tok_{token_id}" return str(token_id) @@ -41,92 +58,94 @@ def mock_data(): return logits, unembed_proj, tokenizer +# === Sequence[str] mode tests === + + +def test_attribution_targets_str_list(mock_data): + """Test AttributionTargets with Sequence[str] input (list).""" + logits, unembed_proj, tokenizer = mock_data + targets = AttributionTargets( + attribution_targets=["hello", "world", "test"], + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + assert len(targets) == 3 + assert all(isinstance(t, LogitTarget) for t in targets.logit_targets) + assert targets.logit_probabilities.shape == (3,) + assert targets.logit_vectors.shape == (3, 64) + # All should have real vocab indices + assert all(t.vocab_idx < tokenizer.vocab_size for t in targets.logit_targets) + # token_ids should work (all real indices) + token_ids = targets.token_ids + assert token_ids.shape == (3,) + # tokens property should return decoded strings + assert all(len(t) > 0 for t in targets.tokens) + + +# === Sequence[TargetSpec] mode tests === + + @pytest.mark.parametrize( - "targets_list,expected_len,expected_key_types,expected_keys,test_id", + "target_tuples,expected_keys", [ - ( - [("arbitrary_token", 0.5, torch.randn(64)), 5, ("another", 0.3, torch.randn(64))], - 3, - # LogitTarget instances have both str and int, but check token_str type - ["str", "int", "str"], - ["arbitrary_token", None, "another"], # None for dynamic int keys - "mixed", - ), ( [ ("token1", 0.4, torch.randn(64)), ("token2", 0.3, torch.randn(64)), ("token3", 0.3, torch.randn(64)), ], - 3, - ["str", "str", "str"], ["token1", "token2", "token3"], - "all_tuples", - ), - ( - ["hello", "world", "test"], - 3, - ["int", "int", "int"], # Strings get tokenized to ints - [None, None, None], # Dynamic keys - "all_strings", ), ], - ids=["mixed", "all_tuples", "all_strings"], + ids=["all_tuples"], ) -def test_attribution_targets_list_mode( - mock_data, targets_list, expected_len, expected_key_types, expected_keys, test_id -): - """Test AttributionTargets with list input (most flexible mode).""" +def test_attribution_targets_tuple_list(mock_data, target_tuples, expected_keys): + """Test AttributionTargets with Sequence[tuple[str, float, Tensor]] input.""" logits, unembed_proj, tokenizer = mock_data - targets = AttributionTargets( - attribution_targets=targets_list, + attribution_targets=target_tuples, logits=logits, unembed_proj=unembed_proj, tokenizer=tokenizer, ) - # Verify basic structure - from circuit_tracer.attribution.targets import LogitTarget + assert len(targets) == len(expected_keys) + assert all(isinstance(t, LogitTarget) for t in targets.logit_targets) + # Tuple targets get virtual indices + assert all(t.vocab_idx >= tokenizer.vocab_size for t in targets.logit_targets) + # Check token_str matches expected keys + for i, expected_key in enumerate(expected_keys): + assert targets.logit_targets[i].token_str == expected_key + assert torch.allclose(targets.logit_probabilities, torch.tensor([0.4, 0.3, 0.3])) - assert isinstance(targets.logit_targets, list) - assert len(targets) == expected_len + +def test_attribution_targets_custom_target_namedtuple(mock_data): + """Test AttributionTargets with Sequence[CustomTarget] input.""" + logits, unembed_proj, tokenizer = mock_data + + custom_targets = [ + CustomTarget(token_str="target_a", prob=0.6, vec=torch.randn(64)), + CustomTarget(token_str="target_b", prob=0.4, vec=torch.randn(64)), + ] + targets = AttributionTargets( + attribution_targets=custom_targets, + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + assert len(targets) == 2 assert all(isinstance(t, LogitTarget) for t in targets.logit_targets) - assert targets.logit_probabilities.shape == (expected_len,) - assert targets.logit_vectors.shape == (expected_len, 64) - - # Verify token_str and vocab_idx based on expected types - for i, expected_type in enumerate(expected_key_types): - target = targets.logit_targets[i] - assert isinstance(target.token_str, str), f"Target {i} token_str should be str" - assert isinstance(target.vocab_idx, int), f"Target {i} vocab_idx should be int" - - # Check token_str matches expected_keys when provided - expected_key = expected_keys[i] - if expected_key is not None: - assert target.token_str == expected_key, f"Target {i} token_str mismatch" - - # Check vocab_idx type based on whether this was an arbitrary string/ - # function thereof (tuple) - if expected_type == "str": # Was a tuple with arbitrary string - # Should have virtual index >= vocab_size - assert target.vocab_idx >= tokenizer.vocab_size, f"Target {i} should have virtual index" - else: # Was int or tokenized string - # Should have real vocab index < vocab_size - assert target.vocab_idx < tokenizer.vocab_size, ( - f"Target {i} should have real vocab index" - ) - - # Test-specific assertions - if test_id == "mixed": - # First and third elements from tuples should have provided probs - assert abs(targets.logit_probabilities[0].item() - 0.5) < 1e-6 - assert abs(targets.logit_probabilities[2].item() - 0.3) < 1e-6 - elif test_id == "all_tuples": - assert torch.allclose(targets.logit_probabilities, torch.tensor([0.4, 0.3, 0.3])) - elif test_id == "all_strings": - # All should be tokenized - check via tokens property - assert all(len(t) > 0 for t in targets.tokens) + # CustomTarget targets get virtual indices + assert all(t.vocab_idx >= tokenizer.vocab_size for t in targets.logit_targets) + assert targets.logit_targets[0].token_str == "target_a" + assert targets.logit_targets[1].token_str == "target_b" + assert torch.allclose(targets.logit_probabilities, torch.tensor([0.6, 0.4])) + + +# === Auto modes (None and Tensor) === @pytest.mark.parametrize( @@ -157,112 +176,276 @@ def test_attribution_targets_auto_modes( **kwargs, ) - # Verify basic structure - all targets should be LogitTarget instances - from circuit_tracer.attribution.targets import LogitTarget - assert isinstance(targets.logit_targets, list) assert all(isinstance(t, LogitTarget) for t in targets.logit_targets) - # All should have real vocab indices (< vocab_size) assert all(t.vocab_idx < tokenizer.vocab_size for t in targets.logit_targets) if test_id == "salient": assert len(targets) <= max_n_logits assert len(targets) >= 1 - # Probabilities should sum to at least desired_prob (or hit max_n_logits) prob_sum = targets.logit_probabilities.sum().item() assert prob_sum >= desired_prob or len(targets) == max_n_logits elif test_id == "specific_indices": - # Check vocab_idx matches expected assert [t.vocab_idx for t in targets.logit_targets] == [5, 10, 15] assert targets.logit_probabilities.shape == (3,) assert targets.logit_vectors.shape == (3, 64) +# === Error handling === + + @pytest.mark.parametrize( - "targets_list,error_match", + "targets_input,error_type,error_match", [ ( [("token", 0.5)], # Only 2 elements, should be 3 + ValueError, "exactly 3 elements", ), ( [(5, 0.5, torch.randn(64))], # int instead of str - "str as first element", + TypeError, + "Custom target token_str must be str", ), ( [], # Empty list + ValueError, "cannot be empty", ), + ( + [42], # int in list (no longer supported) + TypeError, + "Sequence elements must be str or TargetSpec", + ), + ( + torch.tensor([5, 105, 10]), # Tensor with out of range + ValueError, + "Token indices must be in range", + ), + ], + ids=[ + "invalid_tuple_length", + "invalid_tuple_token_type", + "empty_list", + "int_in_list_rejected", + "tensor_out_of_range", ], - ids=["invalid_tuple_length", "invalid_tuple_token_type", "empty_list"], ) -def test_attribution_targets_errors(mock_data, targets_list, error_match): +def test_attribution_targets_errors(mock_data, targets_input, error_type, error_match): """Test AttributionTargets error handling.""" logits, unembed_proj, tokenizer = mock_data - with pytest.raises(ValueError, match=error_match): + with pytest.raises(error_type, match=error_match): AttributionTargets( - attribution_targets=targets_list, # type: ignore + attribution_targets=targets_input, # type: ignore logits=logits, unembed_proj=unembed_proj, tokenizer=tokenizer, ) -def test_attribution_targets_consistency(mock_data): - """Test that the same inputs produce consistent results.""" - logits, unembed_proj, tokenizer = mock_data +# === Consistency tests === - targets_list = [5, "hello", ("custom", 0.5, torch.randn(64))] + +def test_attribution_targets_str_list_consistency(mock_data): + """Test that the same string list inputs produce consistent results.""" + logits, unembed_proj, tokenizer = mock_data targets1 = AttributionTargets( - attribution_targets=targets_list, + attribution_targets=["hello", "world"], logits=logits, unembed_proj=unembed_proj, tokenizer=tokenizer, ) targets2 = AttributionTargets( - attribution_targets=targets_list, + attribution_targets=["hello", "world"], logits=logits, unembed_proj=unembed_proj, tokenizer=tokenizer, ) - assert targets1.logit_targets == targets2.logit_targets + assert torch.equal(targets1.logit_probabilities, targets2.logit_probabilities) + assert torch.equal(targets1.logit_vectors, targets2.logit_vectors) -def test_attribution_targets_tokens_property(mock_data): - """Test tokens property decodes ints and preserves strings.""" +def test_attribution_targets_none_vs_str_list_consistency(mock_data): + """Test that None (auto-select) and equivalent Sequence[str] produce same results. + + Runs with None to auto-select salient logits, then constructs equivalent + Sequence[str] from the auto-selected token strings and verifies consistency. + """ + logits, unembed_proj, tokenizer = mock_data + + # Auto-select + targets_auto = AttributionTargets( + attribution_targets=None, + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + max_n_logits=5, + desired_logit_prob=0.8, + ) + + # Reconstruct using the auto-selected token strings + auto_token_strs = targets_auto.tokens + targets_explicit = AttributionTargets( + attribution_targets=auto_token_strs, + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + # Same logit targets + assert targets_auto.logit_targets == targets_explicit.logit_targets + # Same probabilities + assert torch.allclose(targets_auto.logit_probabilities, targets_explicit.logit_probabilities) + # Same vectors + assert torch.allclose(targets_auto.logit_vectors, targets_explicit.logit_vectors) + + +def test_attribution_targets_none_vs_tuple_list_consistency(mock_data): + """Test that None and equivalent Sequence[TargetSpec] produce same results. + + Auto-selects, then constructs equivalent Sequence[TargetSpec] with the same + probabilities and vectors, and verifies consistency. + """ logits, unembed_proj, tokenizer = mock_data - targets_list = [ - 5, - ("arbitrary", 0.5, torch.randn(64)), - 10, + # Auto-select + targets_auto = AttributionTargets( + attribution_targets=None, + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + max_n_logits=3, + desired_logit_prob=0.5, + ) + + # Reconstruct as tuple list with same probs and vecs + tuple_targets = [ + (tok, prob.item(), vec) + for tok, prob, vec in zip( + targets_auto.tokens, + targets_auto.logit_probabilities, + targets_auto.logit_vectors, + ) ] + targets_tuple = AttributionTargets( + attribution_targets=tuple_targets, + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + # Same probabilities + assert torch.allclose(targets_auto.logit_probabilities, targets_tuple.logit_probabilities) + # Same vectors + assert torch.allclose(targets_auto.logit_vectors, targets_tuple.logit_vectors) + # Same token strings + assert targets_auto.tokens == targets_tuple.tokens + + +# === Tuple (non-list Sequence) input tests === + + +def test_attribution_targets_tuple_of_strs(mock_data): + """Test AttributionTargets accepts tuple[str, ...] as Sequence[str] input.""" + logits, unembed_proj, tokenizer = mock_data + targets = AttributionTargets( + attribution_targets=("hello", "world", "test"), + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + assert len(targets) == 3 + assert all(isinstance(t, LogitTarget) for t in targets.logit_targets) + assert targets.logit_probabilities.shape == (3,) + assert targets.logit_vectors.shape == (3, 64) + assert all(t.vocab_idx < tokenizer.vocab_size for t in targets.logit_targets) + + +def test_attribution_targets_tuple_of_target_specs(mock_data): + """Test AttributionTargets accepts tuple[TargetSpec, ...] as Sequence[TargetSpec] input.""" + logits, unembed_proj, tokenizer = mock_data + ct1 = CustomTarget(token_str="alpha", prob=0.6, vec=torch.randn(64)) + ct2 = CustomTarget(token_str="beta", prob=0.4, vec=torch.randn(64)) + targets = AttributionTargets( + attribution_targets=(ct1, ct2), + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + assert len(targets) == 2 + assert all(isinstance(t, LogitTarget) for t in targets.logit_targets) + assert targets.logit_targets[0].token_str == "alpha" + assert targets.logit_targets[1].token_str == "beta" + assert torch.allclose(targets.logit_probabilities, torch.tensor([0.6, 0.4])) + + +# === Property and utility tests === + + +def test_attribution_targets_tokens_property(mock_data): + """Test tokens property returns correct strings for tuple targets.""" + logits, unembed_proj, tokenizer = mock_data targets = AttributionTargets( - attribution_targets=targets_list, + attribution_targets=[ + ("arbitrary", 0.5, torch.randn(64)), + ("custom_func", 0.3, torch.randn(64)), + ], logits=logits, unembed_proj=unembed_proj, tokenizer=tokenizer, ) tokens = targets.tokens + assert tokens == ["arbitrary", "custom_func"] - assert isinstance(tokens, list) - assert len(tokens) == 3 - assert tokens[0] == "tok_5" # int decoded with tokenizer - assert tokens[1] == "arbitrary" # str kept as-is - assert tokens[2] == "tok_10" # int decoded with tokenizer + +def test_attribution_targets_vocab_indices(mock_data): + """Test vocab_indices property for tuple targets (virtual indices).""" + logits, unembed_proj, tokenizer = mock_data + vocab_size = tokenizer.vocab_size + + targets = AttributionTargets( + attribution_targets=[ + ("t1", 0.3, torch.randn(64)), + ("t2", 0.4, torch.randn(64)), + ("t3", 0.3, torch.randn(64)), + ], + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + expected = [vocab_size + 0, vocab_size + 1, vocab_size + 2] + assert targets.vocab_indices == expected + + +def test_attribution_targets_token_ids_real(mock_data): + """Test token_ids property for real vocab indices (str list and tensor).""" + logits, unembed_proj, tokenizer = mock_data + + # Tensor input + targets = AttributionTargets( + attribution_targets=torch.tensor([5, 10, 15]), + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + token_ids = targets.token_ids + assert torch.equal(token_ids, torch.tensor([5, 10, 15], dtype=torch.long)) @pytest.mark.parametrize( "test_method,expected_value", [ ("to_device", "cpu"), - ("repr", "AttributionTargets(n=5, keys=[1, 2, 3]...)"), - ("len", 5), + ("repr", "AttributionTargets"), + ("len", 3), ], ids=["to_device", "repr", "len"], ) @@ -270,114 +453,467 @@ def test_attribution_targets_utility_methods(mock_data, test_method, expected_va """Test utility methods: to(), __repr__(), and __len__().""" logits, unembed_proj, tokenizer = mock_data - # Use same targets for all tests - targets_list = [1, 2, 3, 4, 5] - targets = AttributionTargets( - attribution_targets=targets_list, + attribution_targets=["a", "b", "c"], logits=logits, unembed_proj=unembed_proj, tokenizer=tokenizer, ) if test_method == "to_device": - # Test device transfer targets_cpu = targets.to("cpu") assert isinstance(targets_cpu, AttributionTargets) - assert targets_cpu.logit_targets == targets.logit_targets assert targets_cpu.logit_probabilities.device.type == expected_value assert targets_cpu.logit_vectors.device.type == expected_value - assert targets_cpu.tokenizer is tokenizer # Verify tokenizer preserved + assert targets_cpu.tokenizer is tokenizer elif test_method == "repr": - # Test string representation repr_str = repr(targets) assert "AttributionTargets" in repr_str - assert "n=5" in repr_str - # Check for "targets=" since keys are now LogitTarget instances - assert "targets=" in repr_str + assert "n=3" in repr_str elif test_method == "len": - # Test __len__ assert len(targets) == expected_value -@pytest.mark.parametrize( - "targets_list,expected_indices,test_id", - [ - # All real vocab tokens - ([5, 10, 15], [5, 10, 15], "all_real"), - # Mixed real and virtual (arbitrary strings) - ([5, ("arb", 0.5, torch.randn(64)), 10], lambda vs: [5, vs + 1, 10], "mixed"), - # All virtual (arbitrary strings) - ( +# === Multi-token encoding tests === + + +def test_attribution_targets_multi_token_warning(mock_data): + """Test that multi-token strings trigger a warning.""" + logits, unembed_proj, tokenizer = mock_data + + # Mock tokenizer to return multi-token encoding for a specific string + original_encode = tokenizer.encode + + def multi_token_encode(text, add_special_tokens=False): + if text == "multi_token_string": + return [10, 20, 30] # Three tokens + return original_encode(text, add_special_tokens) + + tokenizer.encode = multi_token_encode + + with pytest.warns(UserWarning, match="encoded to 3 tokens"): + targets = AttributionTargets( + attribution_targets=["multi_token_string"], + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + # Verify it used the last token + assert len(targets) == 1 + assert targets.logit_targets[0].vocab_idx == 30 + + # Restore original encode + tokenizer.encode = original_encode + + +# === Type validation === + + +def test_attribution_targets_tuple_invalid_prob_type(mock_data): + """Test that invalid prob type raises TypeError.""" + logits, unembed_proj, tokenizer = mock_data + + with pytest.raises(TypeError, match="Custom target prob must be int or float"): + from circuit_tracer.attribution.targets import TargetSpec + + invalid_targets = cast( + Sequence[TargetSpec], [ - ("t1", 0.3, torch.randn(64)), - ("t2", 0.4, torch.randn(64)), - ("t3", 0.3, torch.randn(64)), + ( + "token1", + "0.5", + torch.randn(64), + ), # String instead of float - intentionally invalid ], - lambda vs: [vs + 0, vs + 1, vs + 2], - "all_virtual", - ), - ], - ids=["all_real", "mixed", "all_virtual"], -) -def test_attribution_targets_vocab_indices(mock_data, targets_list, expected_indices, test_id): - """Test vocab_indices property with various combinations of real and virtual tokens.""" + ) + AttributionTargets( + attribution_targets=invalid_targets, + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + +def test_attribution_targets_tuple_invalid_vec_type(mock_data): + """Test that invalid vec type raises TypeError.""" + logits, unembed_proj, tokenizer = mock_data + + with pytest.raises(TypeError, match="Custom target vec must be torch.Tensor"): + from circuit_tracer.attribution.targets import TargetSpec + + invalid_targets = cast( + Sequence[TargetSpec], + [ + ("token1", 0.5, [1.0, 2.0, 3.0]), # List instead of Tensor - intentionally invalid + ], + ) + AttributionTargets( + attribution_targets=invalid_targets, + logits=logits, + unembed_proj=unembed_proj, + tokenizer=tokenizer, + ) + + +def test_attribution_targets_tuple_valid_int_prob(mock_data): + """Test that int probability is accepted (not just float).""" logits, unembed_proj, tokenizer = mock_data - vocab_size = tokenizer.vocab_size # 100 targets = AttributionTargets( - attribution_targets=targets_list, + attribution_targets=[ + ("token1", 1, torch.randn(64)), # Int probability + ], logits=logits, unembed_proj=unembed_proj, tokenizer=tokenizer, ) - # Compute expected indices (may depend on vocab_size for virtual indices) - if callable(expected_indices): - expected = expected_indices(vocab_size) + assert len(targets) == 1 + assert targets.logit_probabilities[0].item() == 1.0 + + +# ============================================================================= +# Integration tests: custom target correctness & format consistency +# ============================================================================= + +# === Shared helpers for integration tests === + + +def _get_top_features(graph: Graph, n: int = 10) -> list[tuple[int, int, int]]: + """Extract the top-N feature nodes from the graph based on attribution scores. + + Returns list of (layer, pos, feature_idx) tuples. + """ + error_node_offset = graph.active_features.shape[0] + _, first_order_indices = torch.topk(graph.adjacency_matrix[-1, :error_node_offset], n) + top_features = [tuple(x) for x in graph.active_features[first_order_indices].tolist()] + return top_features + + +def _get_unembed_weights(model, backend: str): + """Helper to get unembedding weights in a backend-agnostic way.""" + if backend == "transformerlens": + return model.unembed.W_U # (d_model, d_vocab) else: - expected = expected_indices - - vocab_indices = targets.vocab_indices - assert vocab_indices == expected - assert all(isinstance(idx, int) for idx in vocab_indices) - - # Verify virtual index detection - if test_id == "all_real": - assert not targets.has_virtual_indices - # Should be able to get token_ids - token_ids = targets.token_ids - assert torch.equal(token_ids, torch.tensor(expected, dtype=torch.long)) + return model.unembed_weight # (d_vocab, d_model) for NNSight + + +def _build_custom_diff_target( + model, prompt: str, token_x: str, token_y: str, backend: str +) -> tuple[CustomTarget, int, int]: + """Build a CustomTarget representing logit(x) - logit(y) from the model's unembed matrix. + + Returns: + Tuple of (custom_target, idx_x, idx_y) where idx_x and idx_y are + the token indices for x and y respectively. + """ + tokenizer = model.tokenizer + idx_x = tokenizer.encode(token_x, add_special_tokens=False)[-1] + idx_y = tokenizer.encode(token_y, add_special_tokens=False)[-1] + + input_ids = model.ensure_tokenized(prompt) + with torch.no_grad(): + logits, _ = model.get_activations(input_ids) + last_logits = logits.squeeze(0)[-1] # (d_vocab,) + + # Auto-detect matrix orientation by matching against vocabulary size + d_vocab = tokenizer.vocab_size + unembed = _get_unembed_weights(model, backend) + if unembed.shape[0] == d_vocab: + vec_x = unembed[idx_x] # (d_model,) + vec_y = unembed[idx_y] # (d_model,) else: - assert targets.has_virtual_indices - # Should raise when trying to get token_ids - with pytest.raises(ValueError, match="virtual indices"): - _ = targets.token_ids + # Shape is (d_model, d_vocab) – second axis is vocabulary (e.g., TransformerLens) + vec_x = unembed[:, idx_x] # (d_model,) + vec_y = unembed[:, idx_y] # (d_model,) + + diff_vec = vec_x - vec_y + # Use the absolute difference in softmax probabilities as weight + probs = torch.softmax(last_logits, dim=-1) + diff_prob = (probs[idx_x] - probs[idx_y]).abs().item() + if diff_prob < 1e-6: + diff_prob = 0.5 # fallback weight if probs are nearly equal + + custom_target = CustomTarget( + token_str=f"logit({token_x})-logit({token_y})", + prob=diff_prob, + vec=diff_vec, + ) + return custom_target, idx_x, idx_y -@pytest.mark.parametrize( - "targets_list,error_match", - [ - # Out of range token ID - ([110], "out of vocabulary range.*100"), - # Negative token ID - ([-5], "out of vocabulary range"), - # Tensor with out of range - (torch.tensor([5, 105, 10]), "Token indices must be in range"), - ], - ids=["token_id_out_of_range", "token_id_negative", "tensor_out_of_range"], -) -def test_attribution_targets_validation_errors(mock_data, targets_list, error_match): - """Test validation catches various invalid token ID errors.""" - logits, unembed_proj, tokenizer = mock_data +def _cfg_backend(backend: str): + """Return (model, n_layers_range, unembed_proj) for the given backend.""" + if backend == "transformerlens": + model = ReplacementModel.from_pretrained("google/gemma-2-2b", "gemma") + n_layers_range = range(model.cfg.n_layers) # type: ignore + unembed_proj = model.unembed.W_U + else: + model = ReplacementModel.from_pretrained("google/gemma-2-2b", "gemma", backend="nnsight") + n_layers_range = range(model.config.num_hidden_layers) # type: ignore + unembed_proj = model.unembed_weight + return model, n_layers_range, unembed_proj + + +def _run_custom_target_correctness(backend: str): + """Backend-agnostic logic for custom target correctness test. + + 1. Build a CustomTarget logit(x) - logit(y) + 2. Run attribution using that custom target + 3. Find top attributed features via first-order adjacency scores + 4. Ablate those features → verify logit diff magnitude decreases + 5. Amplify those features (10x) → verify logit diff magnitude increases + 6. Verify unrelated logits are not dramatically affected + + NOTE: All logit comparisons use ``zero_softcap()`` so that baseline and + intervention logits live in the same (unsoftcapped) space. Without this, + Gemma-2's ``output_logits_soft_cap`` compresses baseline logits, making + direct comparison with unsoftcapped intervention logits invalid. + """ + + prompt = "The capital of the state containing Dallas is" + token_x, token_y = "▁Austin", "▁Dallas" + unrelated_tokens = ["▁banana", "▁pillow"] # for stability check + + model, n_layers_range, _ = _cfg_backend(backend) + custom_target, idx_x, idx_y = _build_custom_diff_target( + model, prompt, token_x, token_y, backend + ) + assert model.tokenizer is not None + unrelated_indices = [ + model.tokenizer.encode(tok, add_special_tokens=False)[-1] for tok in unrelated_tokens + ] - with pytest.raises(ValueError, match=error_match): - AttributionTargets( - attribution_targets=targets_list, # type: ignore - logits=logits, - unembed_proj=unembed_proj, - tokenizer=tokenizer, + graph = attribute(prompt, model, attribution_targets=[custom_target], batch_size=256) + + # Validate graph structure + assert len(graph.logit_targets) == 1 + assert graph.logit_targets[0].token_str == custom_target.token_str + # Virtual index (custom target uses index >= vocab_size) + assert graph.logit_targets[0].vocab_idx >= graph.vocab_size + + # Get baseline logits w/o softcap + input_ids = model.ensure_tokenized(prompt) + with torch.no_grad(), model.zero_softcap(): + baseline_logits, _ = model.get_activations(input_ids) + baseline_logits = baseline_logits.squeeze(0)[-1] + baseline_x = baseline_logits[idx_x].item() + baseline_y = baseline_logits[idx_y].item() + baseline_diff = baseline_x - baseline_y + baseline_unrelated = [baseline_logits[idx].item() for idx in unrelated_indices] + + # Get top features from attribution graph (by first-order adjacency scores) + top_features = _get_top_features(graph, n=5) + + ablation_interventions = [(layer, pos, feat_idx, 0.0) for layer, pos, feat_idx in top_features] + + with model.zero_softcap(): + ablated_logits, _ = model.feature_intervention( + input_ids, + ablation_interventions, + constrained_layers=n_layers_range, + return_activations=False, + ) + ablated_logits = ablated_logits.squeeze(0)[-1] + ablated_x = ablated_logits[idx_x].item() + ablated_y = ablated_logits[idx_y].item() + ablated_diff = ablated_x - ablated_y + ablated_unrelated = [ablated_logits[idx].item() for idx in unrelated_indices] + + # === Amplification by 10x === + # Get pre-activation feature values for amplification targets + with torch.no_grad(): + _, act_cache = model.get_activations(input_ids, apply_activation_function=False) + + amplify_interventions = [ + (layer, pos, feat_idx, act_cache[layer, pos, feat_idx].item() * 10.0) + for layer, pos, feat_idx in top_features + ] + + with model.zero_softcap(): + amplified_logits, _ = model.feature_intervention( + input_ids, + amplify_interventions, + constrained_layers=n_layers_range, + return_activations=False, ) + amplified_logits = amplified_logits.squeeze(0)[-1] + amplified_x = amplified_logits[idx_x].item() + amplified_y = amplified_logits[idx_y].item() + amplified_diff = amplified_x - amplified_y + amplified_unrelated = [amplified_logits[idx].item() for idx in unrelated_indices] + + # === Directional assertions === + # The custom target direction is logit(x) - logit(y), so baseline_diff > 0. + # Ablating top features that contribute to this direction should decrease the diff. + # Amplifying those features should increase the diff. + assert abs(ablated_diff) < abs(baseline_diff), ( + f"Ablation of top features should decrease |logit diff|: " + f"|baseline_diff|={abs(baseline_diff):.4f}, |ablated_diff|={abs(ablated_diff):.4f}" + ) + assert abs(amplified_diff) > abs(baseline_diff), ( + f"Amplification of top features should increase |logit diff|: " + f"|baseline_diff|={abs(baseline_diff):.4f}, |amplified_diff|={abs(amplified_diff):.4f}" + ) + + # === Unrelated logit stability check === + # Verify that unrelated tokens are not affected more than the target tokens. + # The max individual target logit change provides an upper bound for unrelated changes. + max_target_abl_change = max(abs(ablated_x - baseline_x), abs(ablated_y - baseline_y)) + max_target_amp_change = max(abs(amplified_x - baseline_x), abs(amplified_y - baseline_y)) + + for i, tok in enumerate(unrelated_tokens): + unrelated_abl_change = abs(ablated_unrelated[i] - baseline_unrelated[i]) + unrelated_amp_change = abs(amplified_unrelated[i] - baseline_unrelated[i]) + + assert unrelated_abl_change < max_target_abl_change, ( + f"Unrelated token '{tok}' ablation change ({unrelated_abl_change:.4f}) " + f"should be less than max target logit change ({max_target_abl_change:.4f})" + ) + assert unrelated_amp_change < max_target_amp_change, ( + f"Unrelated token '{tok}' amplification change ({unrelated_amp_change:.4f}) " + f"should be less than max target logit change ({max_target_amp_change:.4f})" + ) + + +def _run_attribution_format_consistency(backend: str): + """Backend-agnostic logic for attribution target format consistency test. + + Runs attribution with None (auto-select), then constructs equivalent Sequence[str] + and Sequence[CustomTarget] from the auto-selected targets and verifies consistency. + """ + prompt = "Entropy spares no entity" + + model, _, unembed_proj = _cfg_backend(backend) + + # Run with None (auto-select salient logits) + graph_none = attribute(prompt, model, attribution_targets=None, max_n_logits=5, batch_size=256) + + # Extract the auto-selected token strings and their internal data + auto_token_strs = [t.token_str for t in graph_none.logit_targets] + + # Run with Sequence[str] using the same token strings + graph_str = attribute(prompt, model, attribution_targets=auto_token_strs, batch_size=256) + + # Run with Sequence[CustomTarget] using the same tokens, probs, and vectors + # Reconstruct the unembed vectors for each auto-selected token + input_ids = model.ensure_tokenized(prompt) + with torch.no_grad(): + logits, _ = model.get_activations(input_ids) + last_logits = logits.squeeze(0)[-1] + + # Build the same AttributionTargets that _from_salient would produce to extract the exact vectors + assert isinstance(unembed_proj, torch.Tensor) + auto_targets_obj = AttributionTargets( + attribution_targets=None, + logits=last_logits, + unembed_proj=unembed_proj, + tokenizer=model.tokenizer, + max_n_logits=5, + desired_logit_prob=0.8, + ) + + custom_targets = [ + CustomTarget(token_str=tok, prob=prob.item(), vec=vec) + for tok, prob, vec in zip( + auto_targets_obj.tokens, + auto_targets_obj.logit_probabilities, + auto_targets_obj.logit_vectors, + ) + ] + + graph_tuple = attribute(prompt, model, attribution_targets=custom_targets, batch_size=256) + + # Verify consistency between None and Sequence[str] + # Same number of targets + assert len(graph_none.logit_targets) == len(graph_str.logit_targets), ( + f"None ({len(graph_none.logit_targets)}) vs str ({len(graph_str.logit_targets)}) " + f"target count mismatch" + ) + + # Same token strings + none_tokens = [t.token_str for t in graph_none.logit_targets] + str_tokens = [t.token_str for t in graph_str.logit_targets] + assert none_tokens == str_tokens, f"Token strings differ: {none_tokens} vs {str_tokens}" + + # Same probabilities (within tolerance) + assert torch.allclose( + graph_none.logit_probabilities, + graph_str.logit_probabilities, + atol=1e-6, + ), "Probabilities differ between None and Sequence[str] modes" + + # Same adjacency matrix (within tolerance) + assert torch.allclose( + graph_none.adjacency_matrix, + graph_str.adjacency_matrix, + atol=1e-5, + rtol=1e-4, + ), "Adjacency matrices differ between None and Sequence[str] modes" + + # Verify consistency between None and Sequence[CustomTarget] + assert len(graph_none.logit_targets) == len(graph_tuple.logit_targets), ( + f"None ({len(graph_none.logit_targets)}) vs tuple ({len(graph_tuple.logit_targets)}) " + f"target count mismatch" + ) + + # Token strings should match + tuple_tokens = [t.token_str for t in graph_tuple.logit_targets] + assert none_tokens == tuple_tokens, f"Token strings differ: {none_tokens} vs {tuple_tokens}" + + # Probabilities should match + assert torch.allclose( + graph_none.logit_probabilities, + graph_tuple.logit_probabilities.to(graph_none.logit_probabilities.device), + atol=1e-6, + ), "Probabilities differ between None and Sequence[CustomTarget] modes" + + # Adjacency matrices should match (tuple targets use the same unembed vecs) + assert torch.allclose( + graph_none.adjacency_matrix, + graph_tuple.adjacency_matrix.to(graph_none.adjacency_matrix.device), + atol=1e-5, + rtol=1e-4, + ), "Adjacency matrices differ between None and Sequence[CustomTarget] modes" + + +@pytest.fixture(autouse=False) +def cleanup_cuda(): + yield + gc.collect() + torch.cuda.empty_cache() + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("backend", ["transformerlens", "nnsight"]) +def test_custom_target_correctness(cleanup_cuda, backend): + """Verify custom attribution targets produce valid results. + + Constructs logit(x) - logit(y) direction, runs attribution, then + verifies that ablating/amplifying top features changes the logit difference + in the expected directions. + + Args: + cleanup_cuda: Fixture for CUDA cleanup after test + backend: Model backend to test ("transformerlens" or "nnsight") + """ + _run_custom_target_correctness(backend) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("backend", ["transformerlens", "nnsight"]) +def test_attribution_format_consistency(cleanup_cuda, backend): + """Verify None, Sequence[str], and Sequence[CustomTarget] produce consistent results. + + Runs attribution with None (auto-select), then with equivalent Sequence[str] and + Sequence[CustomTarget] targets, and verifies the graphs are consistent. + + Args: + cleanup_cuda: Fixture for CUDA cleanup after test + backend: Model backend to test ("transformerlens" or "nnsight") + """ + _run_attribution_format_consistency(backend) if __name__ == "__main__": diff --git a/tests/test_graph.py b/tests/test_graph.py index 59166489..d1c7d307 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -145,7 +145,7 @@ def test_small_graph(): def test_graph_with_tensor_logit_targets(): - """Test that Graph accepts legacy tensor format for logit_targets.""" + """Test that Graph accepts LogitTarget list format and from_pt handles legacy tensor format.""" cfg = HookedTransformerConfig.from_dict( { "n_layers": 2, @@ -164,32 +164,35 @@ def test_graph_with_tensor_logit_targets(): adjacency_matrix = torch.zeros([10, 10]) adjacency_matrix[9, 5] = 1.0 - # Test with tensor format - token_str will be empty + # Test with LogitTarget list using empty token strings (simulates legacy conversion) graph_tensor = Graph( input_string="test", input_tokens=torch.tensor([1, 2, 3]), active_features=torch.tensor([[0, 0, 5]]), adjacency_matrix=adjacency_matrix, cfg=cfg, - logit_targets=torch.tensor([262, 290, 314]), # Tensor format + logit_targets=[ + LogitTarget(token_str="", vocab_idx=262), + LogitTarget(token_str="", vocab_idx=290), + LogitTarget(token_str="", vocab_idx=314), + ], logit_probabilities=torch.tensor([0.5, 0.3, 0.2]), selected_features=torch.tensor([0]), activation_values=torch.tensor([1.5]), ) - # Verify conversion to LogitTarget list with empty token strings + # Verify LogitTarget list format assert len(graph_tensor.logit_targets) == 3 assert graph_tensor.logit_targets[0].vocab_idx == 262 assert graph_tensor.logit_targets[1].vocab_idx == 290 assert graph_tensor.logit_targets[2].vocab_idx == 314 - # Token strings are empty when constructed from tensor + # Token strings are empty when constructed from legacy tensor assert graph_tensor.logit_targets[0].token_str == "" assert graph_tensor.logit_targets[1].token_str == "" assert graph_tensor.logit_targets[2].token_str == "" # Verify properties work assert graph_tensor.vocab_indices == [262, 290, 314] - assert not graph_tensor.has_virtual_indices assert torch.equal(graph_tensor.logit_token_ids, torch.tensor([262, 290, 314])) # Test with LogitTarget list format (current) @@ -218,9 +221,13 @@ def test_graph_with_tensor_logit_targets(): "logit_targets_input,expected_token_strs", [ pytest.param( - torch.tensor([262, 290, 314]), + [ + LogitTarget(token_str="", vocab_idx=262), + LogitTarget(token_str="", vocab_idx=290), + LogitTarget(token_str="", vocab_idx=314), + ], ["", "", ""], - id="tensor_format", + id="empty_token_str_format", ), pytest.param( [ @@ -281,7 +288,6 @@ def test_graph_serialization_with_logit_targets(logit_targets_input, expected_to # Verify loaded graph has correct data assert loaded_graph.vocab_indices == [262, 290, 314] assert loaded_graph.vocab_size == 50257 - assert not loaded_graph.has_virtual_indices assert torch.equal(loaded_graph.logit_token_ids, torch.tensor([262, 290, 314])) assert torch.equal(loaded_graph.logit_probabilities, torch.tensor([0.5, 0.3, 0.2])) @@ -295,3 +301,58 @@ def test_graph_serialization_with_logit_targets(logit_targets_input, expected_to finally: if os.path.exists(tmp_path): os.unlink(tmp_path) + + +def test_graph_from_pt_legacy_tensor_format(): + """Test that Graph.from_pt correctly handles legacy serialized graphs with tensor logit_targets.""" + import tempfile + import os + + cfg = HookedTransformerConfig.from_dict( + { + "n_layers": 2, + "d_model": 8, + "n_ctx": 32, + "d_head": 4, + "n_heads": 2, + "d_mlp": 16, + "act_fn": "gelu", + "d_vocab": 50257, + "model_name": "test-model", + "device": get_default_device(), + } + ) + + # Simulate a legacy .pt file with tensor logit_targets + legacy_data = { + "input_string": "test", + "adjacency_matrix": torch.zeros([10, 10]), + "cfg": cfg, + "active_features": torch.tensor([[0, 0, 5]]), + "logit_targets": torch.tensor([262, 290, 314]), # Legacy tensor format + "logit_probabilities": torch.tensor([0.5, 0.3, 0.2]), + "vocab_size": 50257, + "input_tokens": torch.tensor([1, 2, 3]), + "selected_features": torch.tensor([0]), + "activation_values": torch.tensor([1.5]), + "scan": None, + } + + with tempfile.NamedTemporaryFile(delete=False, suffix=".pt") as tmp: + tmp_path = tmp.name + + try: + torch.save(legacy_data, tmp_path) + loaded_graph = Graph.from_pt(tmp_path) + + # Verify from_pt converted tensor to LogitTarget list + assert len(loaded_graph.logit_targets) == 3 + assert all(isinstance(lt, LogitTarget) for lt in loaded_graph.logit_targets) + assert loaded_graph.logit_targets[0].vocab_idx == 262 + assert loaded_graph.logit_targets[1].vocab_idx == 290 + assert loaded_graph.logit_targets[2].vocab_idx == 314 + assert loaded_graph.logit_targets[0].token_str == "" + assert loaded_graph.vocab_indices == [262, 290, 314] + finally: + if os.path.exists(tmp_path): + os.unlink(tmp_path) From c8b44257563ff2bdfbed8a1e2389917fc5f6dd64 Mon Sep 17 00:00:00 2001 From: Daniel Dale Date: Tue, 10 Feb 2026 16:03:48 -0800 Subject: [PATCH 11/18] remove marks, add vram skipif conditions --- circuit_tracer/utils/create_graph_files.py | 10 +++++----- pyproject.toml | 2 -- tests/conftest.py | 3 +++ tests/test_attributions_gemma3_nnsight.py | 11 ++++------- tests/test_attributions_llama_nnsight.py | 4 ++-- tests/test_offload.py | 7 +++---- tests/test_transformerlens_nnsight_same_gemma.py | 5 +++-- tests/test_transformerlens_nnsight_same_gemma_clts.py | 5 +++-- tests/test_transformerlens_nnsight_same_llama.py | 5 +++-- tests/test_transformerlens_nnsight_same_llama_clts.py | 5 +++-- tests/test_tutorial_notebook_backends.py | 5 +++-- tests/utils/test_caching.py | 3 --- 12 files changed, 32 insertions(+), 33 deletions(-) diff --git a/circuit_tracer/utils/create_graph_files.py b/circuit_tracer/utils/create_graph_files.py index ebcd3442..6691464e 100644 --- a/circuit_tracer/utils/create_graph_files.py +++ b/circuit_tracer/utils/create_graph_files.py @@ -19,7 +19,7 @@ logger = logging.getLogger(__name__) -def load_graph_data(file_path) -> "Graph": +def load_graph_data(file_path) -> Graph: """Load graph data from a PyTorch file.""" from circuit_tracer.graph import Graph @@ -30,7 +30,7 @@ def load_graph_data(file_path) -> "Graph": return graph -def create_nodes(graph: "Graph", node_mask, tokenizer, cumulative_scores): +def create_nodes(graph: Graph, node_mask, tokenizer, cumulative_scores): """Create all nodes for the graph.""" start_time = time.time() @@ -82,7 +82,7 @@ def create_nodes(graph: "Graph", node_mask, tokenizer, cumulative_scores): return nodes -def create_used_nodes_and_edges(graph: "Graph", nodes, edge_mask): +def create_used_nodes_and_edges(graph: Graph, nodes, edge_mask): """Filter to only used nodes and create edges.""" start_time = time.time() edges = edge_mask.numpy() @@ -116,7 +116,7 @@ def create_used_nodes_and_edges(graph: "Graph", nodes, edge_mask): return used_nodes, used_edges -def build_model(graph: "Graph", used_nodes, used_edges, slug, scan, node_threshold, tokenizer): +def build_model(graph: Graph, used_nodes, used_edges, slug, scan, node_threshold, tokenizer): """Build the full model object.""" start_time = time.time() @@ -159,7 +159,7 @@ def build_model(graph: "Graph", used_nodes, used_edges, slug, scan, node_thresho def create_graph_files( - graph_or_path: "Graph | str", + graph_or_path: Graph | str, slug: str, output_path, scan=None, diff --git a/pyproject.toml b/pyproject.toml index 82fe338e..d25126f4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,8 +57,6 @@ exclude = ["**/node_modules", "**/__pycache__", "**/.*", "demos"] [tool.pytest.ini_options] markers = [ "requires_disk: marks tests requiring storage space", - "long_running: marks tests that take a very long time to complete", - "large_gpu: marks tests requiring significant GPU memory (>24GB VRAM or multiple large models)", ] [dependency-groups] diff --git a/tests/conftest.py b/tests/conftest.py index 67cef3cf..d7088353 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,6 +1,9 @@ import pytest import torch +# Check for 32GB+ VRAM once at module load time +has_32gb = torch.cuda.is_available() and torch.cuda.get_device_properties(0).total_memory > 32e9 + @pytest.fixture(autouse=True) def set_torch_seed() -> None: diff --git a/tests/test_attributions_gemma3_nnsight.py b/tests/test_attributions_gemma3_nnsight.py index 452a7548..d5c6f1f1 100644 --- a/tests/test_attributions_gemma3_nnsight.py +++ b/tests/test_attributions_gemma3_nnsight.py @@ -14,6 +14,7 @@ from circuit_tracer.transcoder.activation_functions import JumpReLU from circuit_tracer.transcoder.cross_layer_transcoder import CrossLayerTranscoder from circuit_tracer.replacement_model.replacement_model_nnsight import NNSightReplacementModel +from tests.conftest import has_32gb gemma_3_config_dict = { "_sliding_window_pattern": 6, @@ -525,8 +526,7 @@ def test_gemma_3_1b(): verify_feature_edges(model, graph) -@pytest.mark.large_gpu -@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.skipif(not has_32gb, reason="Requires >=32GB VRAM") def test_gemma_3_1b_it(): s = "user\nThe National Digital Analytics Group (ND" model = ReplacementModel.from_pretrained( @@ -544,8 +544,7 @@ def test_gemma_3_1b_it(): verify_feature_edges(model, graph) -@pytest.mark.large_gpu -@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.skipif(not has_32gb, reason="Requires >=32GB VRAM") def test_gemma_3_1b_clt(): s = "The National Digital Analytics Group (ND" model = ReplacementModel.from_pretrained( @@ -563,9 +562,7 @@ def test_gemma_3_1b_clt(): verify_feature_edges(model, graph) -@pytest.mark.large_gpu -@pytest.mark.long_running -@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.skipif(not has_32gb, reason="Requires >=32GB VRAM") def test_gemma_3_4b(): s = "The National Digital Analytics Group (ND" model = ReplacementModel.from_pretrained( diff --git a/tests/test_attributions_llama_nnsight.py b/tests/test_attributions_llama_nnsight.py index e685e3fd..295be5b4 100644 --- a/tests/test_attributions_llama_nnsight.py +++ b/tests/test_attributions_llama_nnsight.py @@ -11,6 +11,7 @@ from circuit_tracer.replacement_model.replacement_model_nnsight import NNSightReplacementModel from circuit_tracer.transcoder import SingleLayerTranscoder, TranscoderSet from circuit_tracer.transcoder.activation_functions import TopK +from tests.conftest import has_32gb sys.path.append(os.path.dirname(__file__)) from test_attributions_gemma_nnsight import verify_feature_edges, verify_token_and_error_edges @@ -149,8 +150,7 @@ def test_large_llama_model(): tokenizer_class.all_special_ids = original_all_special_ids # type:ignore -@pytest.mark.large_gpu -@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.skipif(not has_32gb, reason="Requires >=32GB VRAM") def test_llama_3_2_1b(): s = "The National Digital Analytics Group (ND" model = ReplacementModel.from_pretrained( diff --git a/tests/test_offload.py b/tests/test_offload.py index a2e2f5d3..cbfb4bba 100644 --- a/tests/test_offload.py +++ b/tests/test_offload.py @@ -12,6 +12,7 @@ from circuit_tracer.replacement_model.replacement_model_nnsight import ( NNSightReplacementModel, ) +from tests.conftest import has_32gb @pytest.fixture(autouse=True) @@ -48,8 +49,7 @@ def test_offload_tl(): assert param.device.type == original_device.type -@pytest.mark.large_gpu -@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.skipif(not has_32gb, reason="Requires >=32GB VRAM") def test_offload_nnsight(): s = "The National Digital Analytics Group (ND" model = ReplacementModel.from_pretrained("google/gemma-2-2b", "gemma", backend="nnsight") @@ -76,8 +76,7 @@ def test_offload_nnsight(): assert param.device.type == original_device.type -@pytest.mark.large_gpu -@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.skipif(not has_32gb, reason="Requires >=32GB VRAM") def test_offload_nnsight_gemma_3(): s = "The National Digital Analytics Group (ND" model_name = "google/gemma-3-4b-pt" diff --git a/tests/test_transformerlens_nnsight_same_gemma.py b/tests/test_transformerlens_nnsight_same_gemma.py index 99705e4d..6e40eb07 100644 --- a/tests/test_transformerlens_nnsight_same_gemma.py +++ b/tests/test_transformerlens_nnsight_same_gemma.py @@ -8,9 +8,10 @@ from circuit_tracer.attribution.attribute_transformerlens import ( attribute as attribute_transformerlens, ) +from tests.conftest import has_32gb -# Mark all tests in this module as requiring large GPU memory -pytestmark = pytest.mark.large_gpu +# Mark all tests in this module as requiring 32GB+ VRAM +pytestmark = [pytest.mark.skipif(not has_32gb, reason="Requires >=32GB VRAM")] @pytest.fixture(autouse=True) diff --git a/tests/test_transformerlens_nnsight_same_gemma_clts.py b/tests/test_transformerlens_nnsight_same_gemma_clts.py index b6e36c91..06f288b5 100644 --- a/tests/test_transformerlens_nnsight_same_gemma_clts.py +++ b/tests/test_transformerlens_nnsight_same_gemma_clts.py @@ -8,9 +8,10 @@ from circuit_tracer.attribution.attribute_transformerlens import ( attribute as attribute_transformerlens, ) +from tests.conftest import has_32gb -# Mark all tests in this module as requiring high GPU memory -pytestmark = pytest.mark.large_gpu +# Mark all tests in this module as requiring 32GB+ VRAM +pytestmark = [pytest.mark.skipif(not has_32gb, reason="Requires >=32GB VRAM")] @pytest.fixture(autouse=True) diff --git a/tests/test_transformerlens_nnsight_same_llama.py b/tests/test_transformerlens_nnsight_same_llama.py index b2308336..a2afeb92 100644 --- a/tests/test_transformerlens_nnsight_same_llama.py +++ b/tests/test_transformerlens_nnsight_same_llama.py @@ -8,9 +8,10 @@ from circuit_tracer.attribution.attribute_transformerlens import ( attribute as attribute_transformerlens, ) +from tests.conftest import has_32gb -# Mark all tests in this module as requiring high GPU memory -pytestmark = pytest.mark.large_gpu +# Mark all tests in this module as requiring 32GB+ VRAM +pytestmark = [pytest.mark.skipif(not has_32gb, reason="Requires >=32GB VRAM")] @pytest.fixture(autouse=True) diff --git a/tests/test_transformerlens_nnsight_same_llama_clts.py b/tests/test_transformerlens_nnsight_same_llama_clts.py index e4133c08..6b669c16 100644 --- a/tests/test_transformerlens_nnsight_same_llama_clts.py +++ b/tests/test_transformerlens_nnsight_same_llama_clts.py @@ -5,9 +5,10 @@ from circuit_tracer.replacement_model import ReplacementModel from circuit_tracer.attribution.attribute import attribute +from tests.conftest import has_32gb -# Mark all tests in this module as requiring high GPU memory -pytestmark = pytest.mark.large_gpu +# Mark all tests in this module as requiring 32GB+ VRAM +pytestmark = [pytest.mark.skipif(not has_32gb, reason="Requires >=32GB VRAM")] @pytest.fixture(autouse=True) diff --git a/tests/test_tutorial_notebook_backends.py b/tests/test_tutorial_notebook_backends.py index c1b0653e..7b7c7f96 100644 --- a/tests/test_tutorial_notebook_backends.py +++ b/tests/test_tutorial_notebook_backends.py @@ -8,9 +8,10 @@ from circuit_tracer.attribution.attribute_transformerlens import ( attribute as attribute_transformerlens, ) +from tests.conftest import has_32gb -# Mark all tests in this module as requiring high GPU memory -pytestmark = pytest.mark.large_gpu +# Mark all tests in this module as requiring 32GB+ VRAM +pytestmark = [pytest.mark.skipif(not has_32gb, reason="Requires >=32GB VRAM")] @pytest.fixture(autouse=True) diff --git a/tests/utils/test_caching.py b/tests/utils/test_caching.py index 97aaafca..e8b321ec 100644 --- a/tests/utils/test_caching.py +++ b/tests/utils/test_caching.py @@ -33,7 +33,6 @@ def cleanup_cache(): @pytest.mark.requires_disk -@pytest.mark.long_running def test_caching_enables_lazy_loading(): # 1. Load from hub without cache - lazy loading should not work because # gemma-scope transcoders use npz format which doesn't support lazy loading @@ -82,7 +81,6 @@ def test_caching_enables_lazy_loading(): @pytest.mark.requires_disk -@pytest.mark.long_running def test_custom_cache_directory(): try: # Ensure test cache dir doesn't exist initially @@ -121,7 +119,6 @@ def test_custom_cache_directory(): @pytest.mark.requires_disk -@pytest.mark.long_running def test_cache_directory_from_env_var(): env_cache_dir = Path.home() / ".cache" / "circuit-tracer-env-test" old_env = os.environ.get("CIRCUIT_TRACER_CACHE_DIR") From f4ad1e9f08213fa41d70e2cb085803ed0f7fb3fd Mon Sep 17 00:00:00 2001 From: Daniel Dale Date: Tue, 10 Feb 2026 16:04:27 -0800 Subject: [PATCH 12/18] revert comment hunks to submit in a separate PR --- circuit_tracer/attribution/attribute.py | 6 +- .../attribution/attribute_nnsight.py | 2 +- .../attribution/attribute_transformerlens.py | 2 +- circuit_tracer/graph.py | 2 +- .../replacement_model_nnsight.py | 82 ++++++++----------- .../replacement_model_transformerlens.py | 53 ++++++------ circuit_tracer/utils/tl_nnsight_mapping.py | 24 ++---- 7 files changed, 71 insertions(+), 100 deletions(-) diff --git a/circuit_tracer/attribution/attribute.py b/circuit_tracer/attribution/attribute.py index c4fc16f9..409b2d18 100644 --- a/circuit_tracer/attribution/attribute.py +++ b/circuit_tracer/attribution/attribute.py @@ -1,9 +1,5 @@ """ -Unified attribution interface that routes to the correct backend implementation. - -This module provides a unified entry point for computing attribution graphs, -automatically dispatching to either the TransformerLens or NNSight implementation -based on the backend type of the provided ReplacementModel. +Unified attribution interface that routes to the correct implementation based on the ReplacementModel backend. """ from collections.abc import Sequence diff --git a/circuit_tracer/attribution/attribute_nnsight.py b/circuit_tracer/attribution/attribute_nnsight.py index e192e363..25b590f6 100644 --- a/circuit_tracer/attribution/attribute_nnsight.py +++ b/circuit_tracer/attribution/attribute_nnsight.py @@ -1,7 +1,7 @@ """ Build an **attribution graph** that captures the *direct*, *linear* effects between features and next-token logits for a *prompt-specific* -**local replacement model** using the NNSight backend. +**local replacement model**. High-level algorithm (matches the 2025 ``Attribution Graphs`` paper): https://transformer-circuits.pub/2025/attribution-graphs/methods.html diff --git a/circuit_tracer/attribution/attribute_transformerlens.py b/circuit_tracer/attribution/attribute_transformerlens.py index 449f3088..72f456a1 100644 --- a/circuit_tracer/attribution/attribute_transformerlens.py +++ b/circuit_tracer/attribution/attribute_transformerlens.py @@ -1,7 +1,7 @@ """ Build an **attribution graph** that captures the *direct*, *linear* effects between features and next-token logits for a *prompt-specific* -**local replacement model** using the TransformerLens backend. +**local replacement model**. High-level algorithm (matches the 2025 ``Attribution Graphs`` paper): https://transformer-circuits.pub/2025/attribution-graphs/methods.html diff --git a/circuit_tracer/graph.py b/circuit_tracer/graph.py index 7996f3d9..432fa96d 100644 --- a/circuit_tracer/graph.py +++ b/circuit_tracer/graph.py @@ -69,7 +69,7 @@ def __init__( cfg: The cfg of the model. selected_features (torch.Tensor): Indices into active_features for selected nodes. activation_values (torch.Tensor): Activation values for selected features. - scan (Optional[Union[str,List[str]]], optional): The identifier of the + scan (Union[str,List[str]] | None, optional): The identifier of the transcoders used in the graph. Without a scan, the graph cannot be uploaded (since we won't know what transcoders were used). Defaults to None attribution_targets: Attribution targets container. When provided, diff --git a/circuit_tracer/replacement_model/replacement_model_nnsight.py b/circuit_tracer/replacement_model/replacement_model_nnsight.py index a5851633..8f46aeaf 100644 --- a/circuit_tracer/replacement_model/replacement_model_nnsight.py +++ b/circuit_tracer/replacement_model/replacement_model_nnsight.py @@ -355,8 +355,8 @@ def fetch_activations( logits = save(self.output.logits) - # If `activation_layers` is None we only need activations for certain - # layers during this forward pass, so avoid creating and saving the full cache. + # activation_layers is None means that we only need the acts for those layers, during this forward pass + # So we don't bother creating / saving the whole cache if activation_layers is not None: activation_cache = None @@ -473,8 +473,7 @@ def ensure_tokenized(self, prompt: str | torch.Tensor | list[int]) -> torch.Tens dummy_bos_token_id = next(filter(None, candidate_bos_token_ids)) if dummy_bos_token_id is None: warnings.warn( - "No suitable special token found for BOS token replacement. " - "The first token will be ignored.", + "No suitable special token found for BOS token replacement. The first token will be ignored." ) else: tokens = torch.cat([torch.tensor([dummy_bos_token_id], device=tokens.device), tokens]) @@ -570,19 +569,16 @@ def setup_intervention_with_freeze( Args: inputs (str | torch.Tensor): The inputs to intervene on - constrained_layers (range | None): Whether to apply interventions only to a - certain range. Mostly applicable to CLTs. If the given range includes - all model layers, we also freeze LayerNorm denominators to compute - direct effects. None means no constraints (iterative patching). + constrained_layers (range | None): whether to apply interventions only to a certain range. + Mostly applicable to CLTs. If the given range includes all model layers, we also freeze + layernorm denominators, computing direct effects. None means no constraints (iterative patching) Returns: - tuple[torch.Tensor, list[Callable]]: The freeze hooks needed to run the - desired intervention. + tuple[torch.Tensor, list[Callable]]: The freeze hooks needed to run the desired intervention. """ def get_locs_to_freeze(): - # This must be in a function invoked only within a trace context. Otherwise - # the `.source` attribute cannot be read twice. + # this needs to go in a function that is called only in a trace context! otherwise you can't get the .source twice locs_to_freeze = {"attention": self.attention_locs} if constrained_layers: if set(range(self.cfg.n_layers)).issubset(set(constrained_layers)): # type: ignore @@ -596,8 +592,8 @@ def get_locs_to_freeze(): activation_matrix, activation_fn = self.get_activation_fn() cache = {} - # Somehow `self` can be replaced with an `EnvoyWrapper`, which causes issues. - # Use local references to avoid that problem. + # somehow, self is getting corrupted / changed somehow into type `EnvoyWrapper`, which causes issues. + # This gets around it. transcoders = self.transcoders skip_transcoder = self.skip_transcoder @@ -642,13 +638,13 @@ def freeze_fn(freeze_loc_name, loc_type_to_freeze, direct_effects_barrier=None): assert len(original_outputs) == len(cached_values) for orig, cached in zip(original_outputs, cached_values): assert orig.shape == cached.shape, ( - f"Activations shape {orig.shape} does not match cached values " - f"shape {cached.shape} at hook {loc_to_freeze.name}" + f"Activations shape {orig.shape} does not match cached values" + f" shape {cached.shape} at hook {loc_to_freeze.name}" ) else: assert original_outputs.shape == cached_values.shape, ( - f"Activations shape {original_outputs.shape} != {cached_values.shape} " - f"at hook {loc_to_freeze.name}" + f"Activations shape {original_outputs.shape} does not match cached values" + f" shape {cached_values.shape} at hook {loc_to_freeze.name}" ) if freeze_loc_name == "feature_output" and skip_transcoder: @@ -737,8 +733,7 @@ def _perform_feature_intervention( layer, feature_idxs ) - # Handle both 2D [n_feature_idxs, d_model] and 3D - # [n_feature_idxs, n_remaining_layers, d_model] cases + # Handle both 2D [n_feature_idxs, d_model] and 3D [n_feature_idxs, n_remaining_layers, d_model] cases if decoder_vectors.ndim == 2: # Single-layer transcoder case: [n_feature_idxs, d_model] decoder_vectors = decoder_vectors * new_values.unsqueeze(1) @@ -780,12 +775,12 @@ def feature_intervention( Args: input (_type_): the input prompt to intervene on - interventions (Sequence[Intervention]): A list of interventions to perform. - Each entry should be a tuple (layer, position, feature_idx, value) - constrained_layers (range | None): Whether to apply interventions only to a - certain layer range. Mostly applicable to CLTs. If the given range includes - all model layers, we also freeze LayerNorm denominators to compute direct - effects. None means no constraints (iterative patching). + intervention_dict (Sequence[Intervention]): A list of interventions to perform, formatted as + a list of (layer, position, feature_idx, value) + constrained_layers (range | None): whether to apply interventions only to a certain range, freezing + all MLPs within the layer range before doing so. This is mostly applicable to CLTs. If the given + range includes all model layers, we also freeze layernorm denominators, computing direct effects. + None means no constraints (iterative patching) apply_activation_function (bool): whether to apply the activation function when recording the activations to be returned. This is useful to set to False for testing purposes, as attribution predicts the change in pre-activation @@ -877,28 +872,24 @@ def feature_intervention_generate( return_activations: bool = True, **kwargs, ) -> tuple[str, torch.Tensor, torch.Tensor | None]: - """Given the input and a dictionary of features to intervene on, this - performs the intervention and generates a continuation. It returns the - logits and activations at each generation position. This function accepts - additional kwargs valid for HookedTransformer.generate(). Note that - `freeze_attention` applies only to the first token generated. - - If `kv_cache` is True (default), generation is faster because the model - caches KV pairs and only processes the new token per step. If False, - the model performs a full forward pass across all tokens. Due to numerical - precision, logits/activations from `feature_intervention_generate(...)` - may differ from `feature_intervention(...)` unless `kv_cache` is False. + """Given the input, and a dictionary of features to intervene on, performs the + intervention, and generates a continuation, along with the logits and activations at each generation position. + This function accepts all kwargs valid for HookedTransformer.generate(). Note that freeze_attention applies + only to the first token generated. + + Note that if kv_cache is True (default), generation will be faster, as the model will cache the KVs, and only + process the one new token per step; if it is False, the model will generate by doing a full forward pass across + all tokens. Note that due to numerical precision issues, you are only guaranteed that the logits / activations of + model.feature_intervention_generate(s, ...) are equivalent to model.feature_intervention(s, ...) if kv_cache is False. Args: input (_type_): the input prompt to intervene on interventions (list[tuple[int, Union[int, slice, torch.Tensor]], int, int | torch.Tensor]): A list of interventions to perform, formatted as a list of (layer, position, feature_idx, value) - constrained_layers: (range | None = None): Whether to freeze MLPs and - transcoders, attention patterns, and LayerNorm denominators for a layer - range. This applies only to the first token generated. - freeze_attention (bool): Whether to freeze all attention patterns (applies to - the first token generated). + constrained_layers: (range | None = None): whether to freeze all MLPs/transcoders / + attn patterns / layernorm denominators. This will only apply to the very first token generated. If + freeze_attention (bool): whether to freeze all attention patterns. Applies only to first token generated apply_activation_function (bool): whether to apply the activation function when recording the activations to be returned. This is useful to set to False for testing purposes, as attribution predicts the change in pre-activation @@ -1004,11 +995,8 @@ def feature_intervention_generate( def get_feature_input_loc(self, layer: int): """ - Returns a feature input location wrapped in an EnvoyWrapper. - - Some feature inputs expose `.input` while others expose `.output`. An - EnvoyWrapper normalizes this so that `.output` always returns the - relevant value. + Returns a feature input loc wrapped in an EnvoyWrapper. This is necessary because some feature inputs need .input, and + some need .output. An EnvoyWrapper just wraps them such that .output always returns the relevant value. """ return EnvoyWrapper( self._resolve_attr(self, self._feature_input_pattern.format(layer=layer)), diff --git a/circuit_tracer/replacement_model/replacement_model_transformerlens.py b/circuit_tracer/replacement_model/replacement_model_transformerlens.py index 84f2d33b..0984a3e3 100644 --- a/circuit_tracer/replacement_model/replacement_model_transformerlens.py +++ b/circuit_tracer/replacement_model/replacement_model_transformerlens.py @@ -79,8 +79,7 @@ def from_config( transcoders: TranscoderSet | CrossLayerTranscoder, # Accept both **kwargs, ) -> "TransformerLensReplacementModel": - """Create a TransformerLensReplacementModel from a HookedTransformerConfig - and a TranscoderSet. + """Create a TransformerLensReplacementModel from a given HookedTransformerConfig and TranscoderSet Args: config (HookedTransformerConfig): the config of the HookedTransformer @@ -100,8 +99,7 @@ def from_pretrained_and_transcoders( transcoders: TranscoderSet | CrossLayerTranscoder, # Accept both **kwargs, ) -> "TransformerLensReplacementModel": - """Create a TransformerLensReplacementModel from a HookedTransformer name - and a TranscoderSet. + """Create a TransformerLensReplacementModel from the name of HookedTransformer and TranscoderSet Args: model_name (str): the name of the pretrained HookedTransformer @@ -397,8 +395,7 @@ def ensure_tokenized(self, prompt: str | torch.Tensor | list[int]) -> torch.Tens dummy_bos_token_id = next(filter(None, candidate_bos_token_ids)) if dummy_bos_token_id is None: warnings.warn( - "No suitable special token found for BOS token replacement. " - "The first token will be ignored.", + "No suitable special token found for BOS token replacement. The first token will be ignored." ) else: tokens = torch.cat([torch.tensor([dummy_bos_token_id], device=tokens.device), tokens]) @@ -464,9 +461,8 @@ def setup_intervention_with_freeze( Args: inputs (str | torch.Tensor): The inputs to intervene on - constrained_layers: (tuple[int,int] | range | None = None): Whether to freeze - attention, LayerNorm, and MLPs within a specified layer range. Defaults - to None. + constrained_layers: (tuple[int,int] | range | None = None): Whether to freeze not just attention, but also + LayerNorm and MLPs, at the specified layer range. Defaults to None. Returns: list[tuple[str, Callable]]: The freeze hooks needed to run the desired intervention. @@ -567,10 +563,10 @@ def _get_feature_intervention_hooks( input (_type_): the input prompt to intervene on intervention_dict (list[Intervention]): A list of interventions to perform, formatted as a list of (layer, position, feature_idx, value) - constrained_layers (range | tuple | None): Whether to apply interventions only - to a certain layer range. Mostly applicable to CLTs. If the given range - includes all model layers, we also freeze LayerNorm denominators to compute - direct effects. None means no constraints (iterative patching). + constrained_layers (range | tuple | None): whether to apply interventions only to a certain range. + Mostly applicable to CLTs. If the given range includes all model layers, + we also freeze layernorm denominators, computing direct effects. None means no + constraints (iterative patching) apply_activation_function (bool): whether to apply the activation function when recording the activations to be returned. This is useful to set to False for testing purposes, as attribution predicts the change in pre-activation @@ -730,20 +726,20 @@ def feature_intervention( sparse: bool = False, return_activations: bool = True, ) -> tuple[torch.Tensor, torch.Tensor | None]: - """Given the input and a dictionary of features to intervene on, this performs - the intervention and returns logits and feature activations. If `freeze_attention` - or `constrained_layers` is True, attention patterns, MLPs and LayerNorms may be - frozen. When `constrained_layers` is set, effects do not propagate through those - layers (useful for CLTs). Otherwise, effects propagate through transcoders and - LayerNorms. + """Given the input, and a dictionary of features to intervene on, performs the + intervention, and returns the logits and feature activations. If freeze_attention or + constrained_layers is True, attention patterns will be frozen, along with MLPs and + LayerNorms. If constrained_layers is set, the effects of intervention will not propagate + through the constrained layers, and CLTs will write only to those layers. Otherwise, the + effects of the intervention will propagate through transcoders / LayerNorms Args: input (_type_): the input prompt to intervene on interventions (list[tuple[int, int, slice | torch.Tensor], int, int | torch.Tensor]): A list of interventions to perform, formatted as a list of (layer, position, feature_idx, value) - constrained_layers (range | tuple | None): Whether to apply interventions only - to a specific range. Mostly applicable to CLTs. + constrained_layers (range | tuple | None): whether to apply interventions only to a certain range. + Mostly applicable to CLTs. freeze_attention (bool): whether to freeze all attention patterns an layernorms apply_activation_function (bool): whether to apply the activation function when recording the activations to be returned. This is useful to set to False for @@ -810,21 +806,18 @@ def feature_intervention_generate( This function accepts all kwargs valid for HookedTransformer.generate(). Note that freeze_attention applies only to the first token generated. - Note that if `kv_cache` is True (default), generation will be faster because - the model caches KV pairs and only processes the new token per step. If - `kv_cache` is False, the model does a full forward pass across all tokens. - Due to numerical precision, logits/activations from - `feature_intervention_generate(...)` may differ from `feature_intervention(...)` - unless `kv_cache` is False. + Note that if kv_cache is True (default), generation will be faster, as the model will cache the KVs, and only + process the one new token per step; if it is False, the model will generate by doing a full forward pass across + all tokens. Note that due to numerical precision issues, you are only guaranteed that the logits / activations of + model.feature_intervention_generate(s, ...) are equivalent to model.feature_intervention(s, ...) if kv_cache is False. Args: input (_type_): the input prompt to intervene on interventions (list[tuple[int, int, slice | torch.Tensor], int, int | torch.Tensor]): A list of interventions to perform, formatted as a list of (layer, position, feature_idx, value) - constrained_layers: (tuple[int,int] | range | None = None): Whether to freeze - MLPs/transcoders, attention patterns, and LayerNorm denominators for a - layer range. This only applies to the first token generated. + constrained_layers: (tuple[int,int] | range | None = None): whether to freeze all MLPs/transcoders / + attn patterns / layernorm denominators. This will only apply to the very first token generated. freeze_attention (bool): whether to freeze all attention patterns. apply_activation_function (bool): whether to apply the activation function when recording the activations to be returned. This is useful to set to False for diff --git a/circuit_tracer/utils/tl_nnsight_mapping.py b/circuit_tracer/utils/tl_nnsight_mapping.py index a28acaba..e33fb5b7 100644 --- a/circuit_tracer/utils/tl_nnsight_mapping.py +++ b/circuit_tracer/utils/tl_nnsight_mapping.py @@ -4,21 +4,18 @@ @dataclass class TransformerLens_NNSight_Mapping: - """Mapping specifying important locations in NNSight models, as well as mapping - from TL Hook Points to NNSight locations""" + """Mapping specifying important locations in NNSight models, as well as mapping from TL Hook Points to NNSight locations""" model_architecture: str # HuggingFace model architecture attention_location_pattern: str # Location of the attention patterns layernorm_scale_location_patterns: list[str] # Location of the Layernorm denominators - # Location immediately before logits (location from which we will attribute for logit tokens) - pre_logit_location: str - # Location of the embedding Module (location to which we will attribute for embeddings) - embed_location: str + pre_logit_location: str # Location immediately before the logits (the location from which we will attribute for logit tokens) + embed_location: str # Location of the embedding Module (the location to which we will attribute for embeddings) embed_weight: str # Location of the embedding weight matrix unembed_weight: str # Location of the unembedding weight matrix - # Mapping from (TransformerLens Hook) to a tuple representing an NNSight Envoy location, and - # whether we want its input or output - feature_hook_mapping: dict[str, tuple[str, Literal["input", "output"]]] + feature_hook_mapping: dict[ + str, tuple[str, Literal["input", "output"]] + ] # Mapping from (TransformerLens Hook) to a tuple representing an NNSight Envoy location, and whether we want its input or output # Create an instance with the original configuration values @@ -167,8 +164,7 @@ def get_mapping(model_architecture: str) -> TransformerLens_NNSight_Mapping: """Get the TransformerLens-NNSight mapping for a given model architecture. Args: - model_architecture: The model architecture name (e.g., 'Gemma2ForCausalLM', - 'Llama2ForCausalLM') + model_architecture: The model architecture name (e.g., 'Gemma2ForCausalLM', 'Llama2ForCausalLM') Returns: TransformerLens_NNSight_Mapping: The mapping configuration for the specified architecture @@ -242,10 +238,8 @@ def convert_nnsight_config_to_transformerlens(config): """Convert NNsight config to TransformerLens config format. Args: - config: NNsight config object or UnifiedConfig (pass-through) or HookedTransformerConfig - - Returns: - UnifiedConfig: A unified configuration object + config: NNsight config object + return_unified: If True, return UnifiedConfig instead of HookedTransformerConfig """ # If already a UnifiedConfig, return as-is if isinstance(config, UnifiedConfig): From 787cf1825c1898533c6dd3730e65ceaf7b33df8c Mon Sep 17 00:00:00 2001 From: Daniel Dale Date: Thu, 12 Feb 2026 12:33:53 -0800 Subject: [PATCH 13/18] adjust `test_custom_target_correctness` to adopt our standard attribution validation approach, remove attribution_targets ref from `Graph`, remove unnecessary vocab_indices properties, convert multi-token string detection handling from warning to error --- .../attribution/attribute_nnsight.py | 4 +- .../attribution/attribute_transformerlens.py | 4 +- circuit_tracer/attribution/targets.py | 26 +- circuit_tracer/graph.py | 48 +--- tests/test_attribution_targets.py | 254 ++++++++---------- tests/test_graph.py | 10 +- 6 files changed, 149 insertions(+), 197 deletions(-) diff --git a/circuit_tracer/attribution/attribute_nnsight.py b/circuit_tracer/attribution/attribute_nnsight.py index 25b590f6..ea8fdf7b 100644 --- a/circuit_tracer/attribution/attribute_nnsight.py +++ b/circuit_tracer/attribution/attribute_nnsight.py @@ -287,7 +287,9 @@ def _run_attribution( graph = Graph( input_string=model.tokenizer.decode(input_ids), input_tokens=input_ids, - attribution_targets=targets, + logit_targets=targets.logit_targets, + logit_probabilities=targets.logit_probabilities, + vocab_size=targets.vocab_size, active_features=activation_matrix.indices().T, activation_values=activation_matrix.values(), selected_features=selected_features, diff --git a/circuit_tracer/attribution/attribute_transformerlens.py b/circuit_tracer/attribution/attribute_transformerlens.py index 72f456a1..c82b57e1 100644 --- a/circuit_tracer/attribution/attribute_transformerlens.py +++ b/circuit_tracer/attribution/attribute_transformerlens.py @@ -265,7 +265,9 @@ def _run_attribution( graph = Graph( input_string=model.tokenizer.decode(input_ids), input_tokens=input_ids, - attribution_targets=targets, + logit_targets=targets.logit_targets, + logit_probabilities=targets.logit_probabilities, + vocab_size=targets.vocab_size, active_features=activation_matrix.indices().T, activation_values=activation_matrix.values(), selected_features=selected_features, diff --git a/circuit_tracer/attribution/targets.py b/circuit_tracer/attribution/targets.py index c06bd170..9a5292d9 100644 --- a/circuit_tracer/attribution/targets.py +++ b/circuit_tracer/attribution/targets.py @@ -15,7 +15,6 @@ from collections.abc import Sequence from typing import NamedTuple import logging -import warnings import torch @@ -150,18 +149,9 @@ def vocab_size(self) -> int: """ return self.tokenizer.vocab_size - @property - def vocab_indices(self) -> list[int]: - """All vocabulary indices including virtual indices (>= vocab_size). - - Returns: - List of vocabulary indices - """ - return [target.vocab_idx for target in self.logit_targets] - @property def token_ids(self) -> torch.Tensor: - """Tensor of vocabulary indices. + """Tensor of vocabulary indices (including virtual indices >= vocab_size). Returns a torch.Tensor of vocab indices on the same device as other tensors, suitable for indexing into logit vectors or embeddings. @@ -170,7 +160,9 @@ def token_ids(self) -> torch.Tensor: torch.Tensor: Long tensor of vocabulary indices """ return torch.tensor( - self.vocab_indices, dtype=torch.long, device=self.logit_probabilities.device + [target.vocab_idx for target in self.logit_targets], + dtype=torch.long, + device=self.logit_probabilities.device, ) def to(self, device: str | torch.device) -> "AttributionTargets": @@ -299,12 +291,12 @@ def _from_str( if not ids: raise ValueError(f"String token {token_str!r} encoded to empty token sequence.") if len(ids) > 1: - warnings.warn( - f"String token {token_str!r} encoded to {len(ids)} tokens; " - f"using only the last token (index {ids[-1]}). " - f"Consider providing single-token strings for more predictable behavior." + raise ValueError( + f"String token {token_str!r} encoded to {len(ids)} tokens " + f"(IDs: {ids}). Each string must map to exactly one token. " + f"Consider providing single-token strings." ) - token_id = ids[-1] + token_id = ids[0] assert 0 <= token_id < vocab_size, ( f"Token {token_str!r} resolved to index {token_id}, " f"out of vocabulary range [0, {vocab_size})" diff --git a/circuit_tracer/graph.py b/circuit_tracer/graph.py index 432fa96d..ba98e98b 100644 --- a/circuit_tracer/graph.py +++ b/circuit_tracer/graph.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, NamedTuple +from typing import NamedTuple import warnings import torch @@ -14,9 +14,6 @@ from circuit_tracer.utils import get_default_device from circuit_tracer.attribution.targets import LogitTarget -if TYPE_CHECKING: - from circuit_tracer.attribution.targets import AttributionTargets - class Graph: input_string: str @@ -41,10 +38,9 @@ def __init__( cfg, selected_features: torch.Tensor, activation_values: torch.Tensor, + logit_targets: list[LogitTarget], + logit_probabilities: torch.Tensor, scan: str | list[str] | None = None, - attribution_targets: AttributionTargets | None = None, - logit_targets: list[LogitTarget] | None = None, - logit_probabilities: torch.Tensor | None = None, vocab_size: int | None = None, ): """ @@ -69,35 +65,16 @@ def __init__( cfg: The cfg of the model. selected_features (torch.Tensor): Indices into active_features for selected nodes. activation_values (torch.Tensor): Activation values for selected features. + logit_targets: List of LogitTarget records describing each logit target. + logit_probabilities: Tensor of logit target probabilities/weights. scan (Union[str,List[str]] | None, optional): The identifier of the transcoders used in the graph. Without a scan, the graph cannot be uploaded (since we won't know what transcoders were used). Defaults to None - attribution_targets: Attribution targets container. When provided, - logit_targets, logit_probabilities, and vocab_size are extracted from it. - logit_targets: List of LogitTarget records. Required if attribution_targets - is not provided. - logit_probabilities: Logit probabilities. Required if attribution_targets - is not provided. vocab_size: Vocabulary size. If not provided, defaults to cfg.d_vocab. """ - if attribution_targets is not None: - if logit_targets is not None or logit_probabilities is not None: - raise ValueError( - "Cannot specify both attribution_targets and " - "(logit_targets, logit_probabilities). Use one or the other." - ) - self.logit_targets = attribution_targets.logit_targets - self.logit_probabilities = attribution_targets.logit_probabilities - self.vocab_size = attribution_targets.vocab_size - elif logit_targets is not None and logit_probabilities is not None: - self.logit_targets = logit_targets - self.logit_probabilities = logit_probabilities - self.vocab_size = vocab_size if vocab_size is not None else cfg.d_vocab - else: - raise ValueError( - "Must provide either attribution_targets or both logit_targets and " - "logit_probabilities" - ) + self.logit_targets = logit_targets + self.logit_probabilities = logit_probabilities + self.vocab_size = vocab_size if vocab_size is not None else cfg.d_vocab self.input_string = input_string self.adjacency_matrix = adjacency_matrix @@ -123,11 +100,6 @@ def to(self, device): # logit_targets is list[LogitTarget], no device transfer needed self.logit_probabilities = self.logit_probabilities.to(device) - @property - def vocab_indices(self) -> list[int]: - """All vocabulary indices.""" - return [target.vocab_idx for target in self.logit_targets] - @property def logit_token_ids(self) -> torch.Tensor: """Tensor of logit target token IDs. @@ -138,7 +110,9 @@ def logit_token_ids(self) -> torch.Tensor: torch.Tensor: Long tensor of vocabulary indices """ return torch.tensor( - self.vocab_indices, dtype=torch.long, device=self.logit_probabilities.device + [target.vocab_idx for target in self.logit_targets], + dtype=torch.long, + device=self.logit_probabilities.device, ) @property diff --git a/tests/test_attribution_targets.py b/tests/test_attribution_targets.py index 734c7dcf..4e5855e3 100644 --- a/tests/test_attribution_targets.py +++ b/tests/test_attribution_targets.py @@ -405,8 +405,8 @@ def test_attribution_targets_tokens_property(mock_data): assert tokens == ["arbitrary", "custom_func"] -def test_attribution_targets_vocab_indices(mock_data): - """Test vocab_indices property for tuple targets (virtual indices).""" +def test_attribution_targets_virtual_token_ids(mock_data): + """Test token_ids property for tuple targets (virtual indices).""" logits, unembed_proj, tokenizer = mock_data vocab_size = tokenizer.vocab_size @@ -422,7 +422,7 @@ def test_attribution_targets_vocab_indices(mock_data): ) expected = [vocab_size + 0, vocab_size + 1, vocab_size + 2] - assert targets.vocab_indices == expected + assert targets.token_ids.tolist() == expected def test_attribution_targets_token_ids_real(mock_data): @@ -477,8 +477,8 @@ def test_attribution_targets_utility_methods(mock_data, test_method, expected_va # === Multi-token encoding tests === -def test_attribution_targets_multi_token_warning(mock_data): - """Test that multi-token strings trigger a warning.""" +def test_attribution_targets_multi_token_error(mock_data): + """Test that multi-token strings raise a ValueError.""" logits, unembed_proj, tokenizer = mock_data # Mock tokenizer to return multi-token encoding for a specific string @@ -491,18 +491,14 @@ def multi_token_encode(text, add_special_tokens=False): tokenizer.encode = multi_token_encode - with pytest.warns(UserWarning, match="encoded to 3 tokens"): - targets = AttributionTargets( + with pytest.raises(ValueError, match="encoded to 3 tokens"): + AttributionTargets( attribution_targets=["multi_token_string"], logits=logits, unembed_proj=unembed_proj, tokenizer=tokenizer, ) - # Verify it used the last token - assert len(targets) == 1 - assert targets.logit_targets[0].vocab_idx == 30 - # Restore original encode tokenizer.encode = original_encode @@ -656,127 +652,6 @@ def _cfg_backend(backend: str): return model, n_layers_range, unembed_proj -def _run_custom_target_correctness(backend: str): - """Backend-agnostic logic for custom target correctness test. - - 1. Build a CustomTarget logit(x) - logit(y) - 2. Run attribution using that custom target - 3. Find top attributed features via first-order adjacency scores - 4. Ablate those features → verify logit diff magnitude decreases - 5. Amplify those features (10x) → verify logit diff magnitude increases - 6. Verify unrelated logits are not dramatically affected - - NOTE: All logit comparisons use ``zero_softcap()`` so that baseline and - intervention logits live in the same (unsoftcapped) space. Without this, - Gemma-2's ``output_logits_soft_cap`` compresses baseline logits, making - direct comparison with unsoftcapped intervention logits invalid. - """ - - prompt = "The capital of the state containing Dallas is" - token_x, token_y = "▁Austin", "▁Dallas" - unrelated_tokens = ["▁banana", "▁pillow"] # for stability check - - model, n_layers_range, _ = _cfg_backend(backend) - custom_target, idx_x, idx_y = _build_custom_diff_target( - model, prompt, token_x, token_y, backend - ) - assert model.tokenizer is not None - unrelated_indices = [ - model.tokenizer.encode(tok, add_special_tokens=False)[-1] for tok in unrelated_tokens - ] - - graph = attribute(prompt, model, attribution_targets=[custom_target], batch_size=256) - - # Validate graph structure - assert len(graph.logit_targets) == 1 - assert graph.logit_targets[0].token_str == custom_target.token_str - # Virtual index (custom target uses index >= vocab_size) - assert graph.logit_targets[0].vocab_idx >= graph.vocab_size - - # Get baseline logits w/o softcap - input_ids = model.ensure_tokenized(prompt) - with torch.no_grad(), model.zero_softcap(): - baseline_logits, _ = model.get_activations(input_ids) - baseline_logits = baseline_logits.squeeze(0)[-1] - baseline_x = baseline_logits[idx_x].item() - baseline_y = baseline_logits[idx_y].item() - baseline_diff = baseline_x - baseline_y - baseline_unrelated = [baseline_logits[idx].item() for idx in unrelated_indices] - - # Get top features from attribution graph (by first-order adjacency scores) - top_features = _get_top_features(graph, n=5) - - ablation_interventions = [(layer, pos, feat_idx, 0.0) for layer, pos, feat_idx in top_features] - - with model.zero_softcap(): - ablated_logits, _ = model.feature_intervention( - input_ids, - ablation_interventions, - constrained_layers=n_layers_range, - return_activations=False, - ) - ablated_logits = ablated_logits.squeeze(0)[-1] - ablated_x = ablated_logits[idx_x].item() - ablated_y = ablated_logits[idx_y].item() - ablated_diff = ablated_x - ablated_y - ablated_unrelated = [ablated_logits[idx].item() for idx in unrelated_indices] - - # === Amplification by 10x === - # Get pre-activation feature values for amplification targets - with torch.no_grad(): - _, act_cache = model.get_activations(input_ids, apply_activation_function=False) - - amplify_interventions = [ - (layer, pos, feat_idx, act_cache[layer, pos, feat_idx].item() * 10.0) - for layer, pos, feat_idx in top_features - ] - - with model.zero_softcap(): - amplified_logits, _ = model.feature_intervention( - input_ids, - amplify_interventions, - constrained_layers=n_layers_range, - return_activations=False, - ) - amplified_logits = amplified_logits.squeeze(0)[-1] - amplified_x = amplified_logits[idx_x].item() - amplified_y = amplified_logits[idx_y].item() - amplified_diff = amplified_x - amplified_y - amplified_unrelated = [amplified_logits[idx].item() for idx in unrelated_indices] - - # === Directional assertions === - # The custom target direction is logit(x) - logit(y), so baseline_diff > 0. - # Ablating top features that contribute to this direction should decrease the diff. - # Amplifying those features should increase the diff. - assert abs(ablated_diff) < abs(baseline_diff), ( - f"Ablation of top features should decrease |logit diff|: " - f"|baseline_diff|={abs(baseline_diff):.4f}, |ablated_diff|={abs(ablated_diff):.4f}" - ) - assert abs(amplified_diff) > abs(baseline_diff), ( - f"Amplification of top features should increase |logit diff|: " - f"|baseline_diff|={abs(baseline_diff):.4f}, |amplified_diff|={abs(amplified_diff):.4f}" - ) - - # === Unrelated logit stability check === - # Verify that unrelated tokens are not affected more than the target tokens. - # The max individual target logit change provides an upper bound for unrelated changes. - max_target_abl_change = max(abs(ablated_x - baseline_x), abs(ablated_y - baseline_y)) - max_target_amp_change = max(abs(amplified_x - baseline_x), abs(amplified_y - baseline_y)) - - for i, tok in enumerate(unrelated_tokens): - unrelated_abl_change = abs(ablated_unrelated[i] - baseline_unrelated[i]) - unrelated_amp_change = abs(amplified_unrelated[i] - baseline_unrelated[i]) - - assert unrelated_abl_change < max_target_abl_change, ( - f"Unrelated token '{tok}' ablation change ({unrelated_abl_change:.4f}) " - f"should be less than max target logit change ({max_target_abl_change:.4f})" - ) - assert unrelated_amp_change < max_target_amp_change, ( - f"Unrelated token '{tok}' amplification change ({unrelated_amp_change:.4f}) " - f"should be less than max target logit change ({max_target_amp_change:.4f})" - ) - - def _run_attribution_format_consistency(backend: str): """Backend-agnostic logic for attribution target format consistency test. @@ -878,6 +753,113 @@ def _run_attribution_format_consistency(backend: str): ), "Adjacency matrices differ between None and Sequence[CustomTarget] modes" +def _run_custom_target_correctness( + backend: str, + n_samples: int = 20, + act_atol: float = 5e-4, + act_rtol: float = 1e-5, + logit_atol: float = 1e-4, + logit_rtol: float = 1e-3, +): + """Verify custom target direction feature attribution driven interventions produce expected activation/logit changes + + For a ``logit(x) − logit(y)`` custom direction, randomly samples features, doubles each feature's pre-activation + value (under constrained/frozen-layer conditions), and checks that both the activation changes and the custom + logit-difference change match the adjacency matrix predictions within acceptable tolerances. + + * **Activation changes** match ``adjacency_matrix[:n_features, node]`` within act_atol/act_rtol. + * **Custom logit-difference change** matches the adjacency logit-node prediction within logit_atol/logit_rtol. + + We use the same linear-regime conditions as our other attribution validation tests, e.g. ``verify_feature_edges``: + + * ``constrained_layers=range(n_layers)`` — freezes all layer norms, MLPs, and attention, preventing non-linear + propagation. + * ``apply_activation_function=False`` — operates on pre-activation values. + * ``model.zero_softcap()`` — removes the final logit softcap. + * Intervention = doubling the pre-activation (delta = old_activation). Because the adjacency column already encodes + the full effect of the feature at its current activation level, doubling adds exactly one copy of the predicted + effect. + """ + prompt = "The capital of the state containing Dallas is" + token_x, token_y = "▁Austin", "▁Dallas" + + model, n_layers_range, _ = _cfg_backend(backend) + custom_target, idx_x, idx_y = _build_custom_diff_target( + model, prompt, token_x, token_y, backend + ) + + graph = attribute(prompt, model, attribution_targets=[custom_target], batch_size=256) + + device = next(model.parameters()).device + adjacency_matrix = graph.adjacency_matrix.to(device) + active_features = graph.active_features.to(device) + n_features = active_features.size(0) + n_logits = len(graph.logit_targets) + + # --- baseline (pre-activation, unsoftcapped) --- + with model.zero_softcap(): + logits, activation_cache = model.get_activations( + graph.input_tokens, apply_activation_function=False + ) + logits = logits.squeeze(0) + + relevant_activations = activation_cache[ + active_features[:, 0], active_features[:, 1], active_features[:, 2] + ] + baseline_logit_diff = logits[-1, idx_x] - logits[-1, idx_y] + + # --- per-feature exact checks --- + random_order = torch.randperm(n_features) + chosen_nodes = random_order[: min(n_samples, n_features)] + + for chosen_node in chosen_nodes: + layer, pos, feat_idx = active_features[chosen_node].tolist() + old_activation = activation_cache[layer, pos, feat_idx] + new_activation = old_activation * 2 + + expected_effects = adjacency_matrix[:, chosen_node] + expected_act_diff = expected_effects[:n_features] + expected_logit_diff = expected_effects[-n_logits:] # (1,) for single target + + with model.zero_softcap(): + new_logits, new_act_cache = model.feature_intervention( + graph.input_tokens, + [(layer, pos, feat_idx, new_activation)], + constrained_layers=n_layers_range, + apply_activation_function=False, + ) + new_logits = new_logits.squeeze(0) + + # -- activation check -- + assert new_act_cache is not None + new_relevant_activations = new_act_cache[ + active_features[:, 0], active_features[:, 1], active_features[:, 2] + ] + assert torch.allclose( + new_relevant_activations, + relevant_activations + expected_act_diff, + atol=act_atol, + rtol=act_rtol, + ), ( + f"Activation mismatch for feature ({layer}, {pos}, {feat_idx}): " + f"max diff = {(new_relevant_activations - relevant_activations - expected_act_diff).abs().max():.6e}" + ) + + # -- logit-difference check -- + new_logit_diff = new_logits[-1, idx_x] - new_logits[-1, idx_y] + actual_logit_change = (new_logit_diff - baseline_logit_diff).unsqueeze(0) + assert torch.allclose( + actual_logit_change, + expected_logit_diff, + atol=logit_atol, + rtol=logit_rtol, + ), ( + f"Logit-diff mismatch for feature ({layer}, {pos}, {feat_idx}): " + f"predicted={expected_logit_diff.item():.6e}, " + f"actual={actual_logit_change.item():.6e}" + ) + + @pytest.fixture(autouse=False) def cleanup_cuda(): yield @@ -888,11 +870,11 @@ def cleanup_cuda(): @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") @pytest.mark.parametrize("backend", ["transformerlens", "nnsight"]) def test_custom_target_correctness(cleanup_cuda, backend): - """Verify custom attribution targets produce valid results. + """Verify custom target direction feature attribution driven interventions produce expected activation/logit changes - Constructs logit(x) - logit(y) direction, runs attribution, then - verifies that ablating/amplifying top features changes the logit difference - in the expected directions. + For a ``logit(x) − logit(y)`` custom direction, randomly samples features, doubles each feature's pre-activation + value (under constrained/frozen-layer conditions), and checks that both the activation changes and the custom + logit-difference change match the adjacency matrix predictions within acceptable tolerances. Args: cleanup_cuda: Fixture for CUDA cleanup after test diff --git a/tests/test_graph.py b/tests/test_graph.py index d1c7d307..da3ec8fb 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -192,7 +192,7 @@ def test_graph_with_tensor_logit_targets(): assert graph_tensor.logit_targets[2].token_str == "" # Verify properties work - assert graph_tensor.vocab_indices == [262, 290, 314] + assert graph_tensor.logit_token_ids.tolist() == [262, 290, 314] assert torch.equal(graph_tensor.logit_token_ids, torch.tensor([262, 290, 314])) # Test with LogitTarget list format (current) @@ -212,8 +212,8 @@ def test_graph_with_tensor_logit_targets(): activation_values=torch.tensor([1.5]), ) - # Verify both formats produce same vocab_indices - assert graph_tensor.vocab_indices == graph_list.vocab_indices + # Verify both formats produce same logit_token_ids + assert torch.equal(graph_tensor.logit_token_ids, graph_list.logit_token_ids) assert graph_tensor.vocab_size == graph_list.vocab_size @@ -286,7 +286,7 @@ def test_graph_serialization_with_logit_targets(logit_targets_input, expected_to loaded_graph = Graph.from_pt(tmp_path) # Verify loaded graph has correct data - assert loaded_graph.vocab_indices == [262, 290, 314] + assert loaded_graph.logit_token_ids.tolist() == [262, 290, 314] assert loaded_graph.vocab_size == 50257 assert torch.equal(loaded_graph.logit_token_ids, torch.tensor([262, 290, 314])) assert torch.equal(loaded_graph.logit_probabilities, torch.tensor([0.5, 0.3, 0.2])) @@ -352,7 +352,7 @@ def test_graph_from_pt_legacy_tensor_format(): assert loaded_graph.logit_targets[1].vocab_idx == 290 assert loaded_graph.logit_targets[2].vocab_idx == 314 assert loaded_graph.logit_targets[0].token_str == "" - assert loaded_graph.vocab_indices == [262, 290, 314] + assert loaded_graph.logit_token_ids.tolist() == [262, 290, 314] finally: if os.path.exists(tmp_path): os.unlink(tmp_path) From 7ea7f1497b02c563b04f21f9d8b4af23ac3ad0c2 Mon Sep 17 00:00:00 2001 From: Daniel Dale Date: Tue, 17 Feb 2026 17:09:29 -0800 Subject: [PATCH 14/18] very rough exploratory draft of attribution_targets_demo.ipynb to solicit maintainer/community feedback --- .gitignore | 2 + circuit_tracer/utils/demo_utils.py | 384 ++- demos/attribute_demo.ipynb | 117 +- demos/attribution_targets_demo.ipynb | 2705 ++++++++++++++++++++++ tests/test_tutorial_notebook_backends.py | 535 ++++- 5 files changed, 3636 insertions(+), 107 deletions(-) create mode 100644 demos/attribution_targets_demo.ipynb diff --git a/.gitignore b/.gitignore index 14ac5454..53068cb7 100644 --- a/.gitignore +++ b/.gitignore @@ -77,6 +77,8 @@ target/ # Jupyter Notebook .ipynb_checkpoints +demos/**/graphs +demos/**/graph_files # IPython profile_default/ diff --git a/circuit_tracer/utils/demo_utils.py b/circuit_tracer/utils/demo_utils.py index 9e9c2c97..803317b5 100644 --- a/circuit_tracer/utils/demo_utils.py +++ b/circuit_tracer/utils/demo_utils.py @@ -1,3 +1,4 @@ +import gc import html import json import urllib.parse @@ -6,9 +7,317 @@ import torch from IPython.display import HTML, display +from circuit_tracer.graph import compute_node_influence + Feature = namedtuple("Feature", ["layer", "pos", "feature_idx"]) +def get_unembed_vecs(model, token_ids: list[int], backend: str) -> list[torch.Tensor]: + """Extract unembedding column vectors for the given token IDs. + + Handles the orientation difference between TransformerLens (d_model, d_vocab) + and NNSight (d_vocab, d_model) unembedding matrices. + + Args: + model: A ``ReplacementModel`` instance. + token_ids: Vocabulary indices whose unembed columns to extract. + backend: ``"transformerlens"`` or ``"nnsight"``. + + Returns: + List of 1-D tensors, one per token ID, each of shape ``(d_model,)``. + """ + unembed = model.unembed.W_U if backend == "transformerlens" else model.unembed_weight + d_vocab = model.tokenizer.vocab_size + if unembed.shape[0] == d_vocab: + return [unembed[tid] for tid in token_ids] + return [unembed[:, tid] for tid in token_ids] + + +def cleanup_cuda() -> None: + """Run garbage collection and free CUDA cache.""" + gc.collect() + torch.cuda.empty_cache() + + +def get_top_features(graph, n: int = 10) -> tuple[list[tuple[int, int, int]], list[float]]: + """Extract the top-N feature nodes from the graph by total multi-hop influence. + + Uses ``compute_node_influence`` to rank features by their total effect + on *all* logit targets (direct + indirect paths), weighted by each + target's probability. + + Args: + graph: A Graph object with ``adjacency_matrix``, ``selected_features``, + ``active_features``, ``logit_targets``, and ``logit_probabilities``. + n: Number of top features to return. + + Returns: + Tuple of (features, scores) where *features* is a list of + ``(layer, pos, feature_idx)`` tuples and *scores* is the + corresponding influence values. + """ + n_logits = len(graph.logit_targets) + n_features = len(graph.selected_features) + + # Build logit weight vector + logit_weights = torch.zeros( + graph.adjacency_matrix.shape[0], device=graph.adjacency_matrix.device + ) + logit_weights[-n_logits:] = graph.logit_probabilities + + # Multi-hop influence across all logit targets + node_influence = compute_node_influence(graph.adjacency_matrix, logit_weights) + feature_influence = node_influence[:n_features] + + top_k = min(n, n_features) + top_values, top_indices = torch.topk(feature_influence, top_k) + + features = [ + tuple(graph.active_features[graph.selected_features[i]].tolist()) for i in top_indices + ] + scores = top_values.tolist() + return features, scores + + +def display_top_features_comparison( + feature_sets: dict[str, list[tuple[int, int, int]]], + scores_sets: dict[str, list[float]] | None = None, + neuronpedia_model: str | None = None, + neuronpedia_set: str = "gemmascope-transcoder-16k", +): + """Display top features from multiple attribution configurations side by side. + + Args: + feature_sets: Mapping from config label to list of ``(layer, pos, feat_idx)`` tuples. + scores_sets: Optional mapping from config label to list of attribution scores. + If ``None``, scores are omitted from the display. + neuronpedia_model: Neuronpedia model slug (e.g. ``"gemma-2-2b"``). + When provided, feature indices become clickable links. + neuronpedia_set: Neuronpedia set name (default ``"gemmascope-transcoder-16k"``). + """ + labels = list(feature_sets.keys()) + colors = ["#2471A3", "#27AE60", "#8E44AD", "#E67E22", "#C0392B", "#16A085"] + + style = """ + + """ + + body = '
' + for i, label in enumerate(labels): + color = colors[i % len(colors)] + features = feature_sets[label] + scores = scores_sets.get(label) if scores_sets else None + body += '
' + body += ( + f'
{html.escape(label)}
' + ) + body += "" + if scores is not None: + body += "" + body += "" + for j, (layer, pos, feat_idx) in enumerate(features): + score_cell = f"" if scores is not None else "" + if neuronpedia_model is not None: + np_url = ( + f"https://www.neuronpedia.org/{neuronpedia_model}/" + f"{layer}-{neuronpedia_set}/{feat_idx}" + ) + feat_link = f'{feat_idx}' + else: + feat_link = str(feat_idx) + node_cell = f'' + body += f"{node_cell}{score_cell}" + body += "
#NodeScore
{scores[j]:.4f}({layer}, {pos}, {feat_link})
{j + 1}
" + body += "
" + + display(HTML(style + body)) + + +def display_token_probs( + logits: torch.Tensor, + token_ids: list[int], + labels: list[str], + title: str = "", +) -> None: + """Display softmax probabilities for specific tokens as a styled HTML table. + + Probabilities are shown as percentages (3 decimal places) when ≥ 0.001, + otherwise in scientific notation (2 significant figures). + + Args: + logits: Raw logits tensor (at least 2-D; last position is used). + token_ids: Vocabulary indices to display. + labels: Human-readable label for each token. + title: Optional heading rendered above the table. + """ + probs = torch.softmax(logits.squeeze(0)[-1].float(), dim=-1) + + def _fmt(p: float) -> str: + return f"{p * 100:.3f}%" if p >= 1e-3 else f"{p:.2e}" + + rows = "" + for i, (tid, label) in enumerate(zip(token_ids, labels)): + p = probs[tid].item() + logit_val = logits.squeeze(0)[-1, tid].item() + row_class = "even-row" if i % 2 == 0 else "odd-row" + rows += ( + f'' + f'{html.escape(label)}' + f'{_fmt(p)}' + f'{logit_val:.4f}' + f"\n" + ) + + title_html = ( + f'
{html.escape(title)}
' + if title + else "" + ) + + markup = f""" +
+ {title_html} + + + + + + + + + + {rows} + +
TokenProbabilityLogit
+
+ """ + display(HTML(markup)) + + +def display_ablation_chart( + groups: dict[str, dict[str, float]], + logit_diffs: dict[str, float] | None = None, + title: str = "", + colors: list[str] | None = None, +) -> None: + """Display ablation results as a grouped bar chart with logit-difference line. + + Args: + groups: Mapping from group label (e.g. ``"Baseline"``) to a dict + of ``{token_label: probability}``. + logit_diffs: Optional mapping from group label to logit difference, + plotted as a dashed line on a secondary y-axis. + title: Chart title. + colors: Bar colours for each token. Defaults to a built-in palette. + """ + import matplotlib.pyplot as plt + import numpy as np + + group_labels = list(groups.keys()) + token_labels = list(next(iter(groups.values())).keys()) + n_groups = len(group_labels) + n_tokens = len(token_labels) + + if colors is None: + colors = ["#2471A3", "#E67E22", "#27AE60", "#C0392B", "#8E44AD"][:n_tokens] + + x = np.arange(n_groups) + width = 0.8 / n_tokens + + fig, ax1 = plt.subplots(figsize=(8, 5.0)) + + for i, tok in enumerate(token_labels): + vals = [groups[g].get(tok, 0) for g in group_labels] + offset = (i - (n_tokens - 1) / 2) * width + bars = ax1.bar( + x + offset, + vals, + width * 0.9, + label=tok, + color=colors[i], + alpha=0.85, + ) + for bar, v in zip(bars, vals): + ax1.text( + bar.get_x() + bar.get_width() / 2, + bar.get_height() + 0.005, + f"{v:.3f}", + ha="center", + va="bottom", + fontsize=8, + ) + + ax1.set_ylabel("Probability") + ax1.set_xticks(x) + ax1.set_xticklabels(group_labels) + max_prob = max(max(groups[g].get(t, 0) for t in token_labels) for g in group_labels) + ax1.set_ylim(0, max_prob * 1.4) + + if logit_diffs is not None: + ax2 = ax1.twinx() + diff_vals = [logit_diffs.get(g, 0) for g in group_labels] + ax2.plot( + x, + diff_vals, + "k--o", + label="Logit diff", + linewidth=1.5, + markersize=5, + ) + ax2.set_ylabel("Logit difference") + ax2.legend(loc="upper right") + + ax1.legend(loc="upper left") + if title: + ax1.set_title(title, fontsize=13, fontweight="bold") + fig.tight_layout() + plt.show() + + def get_topk(logits: torch.Tensor, tokenizer, k: int = 5): probs = torch.softmax(logits.squeeze()[-1], dim=-1) topk = torch.topk(probs, k) @@ -16,10 +325,29 @@ def get_topk(logits: torch.Tensor, tokenizer, k: int = 5): # Now let's create a version that's more adaptive to dark/light mode -def display_topk_token_predictions(sentence, original_logits, new_logits, tokenizer, k: int = 5): - """ - Version that tries to be more adaptive to both dark and light modes - using higher contrast elements and CSS variables where possible +def display_topk_token_predictions( + sentence, + original_logits, + new_logits, + tokenizer, + k: int = 5, + key_tokens: list[tuple[str, int]] | None = None, +): + """Display top-k token predictions before and after an intervention. + + Adaptive to both dark and light modes using higher-contrast elements + and CSS variables where possible. + + Args: + sentence: The input prompt string. + original_logits: Logits before the intervention. + new_logits: Logits after the intervention. + tokenizer: Tokenizer for decoding token IDs. + k: Number of top tokens to show per section. + key_tokens: Optional list of ``(token_label, token_id)`` pairs. + When provided, a third *Key Tokens* table is rendered showing + the probabilities of these specific tokens in both the original + and new distributions. """ original_tokens = get_topk(original_logits, tokenizer, k) @@ -186,6 +514,54 @@ def display_topk_token_predictions(sentence, original_logits, new_logits, tokeni + """ + + # Optional key-tokens section + if key_tokens: + orig_probs = torch.softmax(original_logits.squeeze()[-1], dim=-1) + new_probs = torch.softmax(new_logits.squeeze()[-1], dim=-1) + + html += """ +
+
Key Tokens
+ + + + + + + + + + + """ + for i, (label, tid) in enumerate(key_tokens): + p_orig = orig_probs[tid].item() + p_new = new_probs[tid].item() + relative = (p_new - p_orig) / max(p_orig, 1e-9) + sign = "+" if relative >= 0 else "" + bar_width = int(p_new / max(max_prob, 1e-9) * 100) + row_class = "even-row" if i % 2 == 0 else "odd-row" + html += f""" + + + + + + + """ + html += """ + +
TokenOriginalNewChange
{label}{p_orig:.4f}{p_new:.4f} +
+
+ {sign}{relative * 100:.1f}% +
+
+
+ """ + + html += """ """ diff --git a/demos/attribute_demo.ipynb b/demos/attribute_demo.ipynb index d188de25..cfc48a10 100644 --- a/demos/attribute_demo.ipynb +++ b/demos/attribute_demo.ipynb @@ -17,7 +17,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -44,7 +44,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": { "id": "P8fNhpqzmS8k" }, @@ -72,58 +72,11 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": { "id": "BBsETpl0mS8l" }, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "ad0d25d11e5f4aacae3fd4fbba264d1e", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Fetching 26 files: 0%| | 0/26 [00:00\n", + " \"Open\n", + "\n", + "\n", + "This tutorial explores how to use the **attribution targets API** to attribute back from arbitrary tokens or functions thereof. The `AttributionTargets` class (in `circuit_tracer.attribution.targets`) accepts four input formats:\n", + "\n", + "1. **`None`** — *Salient logits* (default): auto-select the most probable next tokens via `max_n_logits` / `desired_logit_prob`\n", + "2. **`Sequence[str]`** — *Token strings*: attribute from explicitly named tokens (e.g., `[\"▁Austin\", \"▁Dallas\"]`)\n", + "3. **`Sequence[TargetSpec]`** — *Custom targets*: attribute from an arbitrary direction in the residual stream (e.g., a `CustomTarget` encoding `logit(Austin) − logit(Dallas)`)\n", + "4. **`torch.Tensor`** — *Token ID tensor*: attribute from specific vocabulary indices\n", + "\n", + "We use the capital-city prompt from the other demos: the model must resolve *\"capital of the state containing Dallas\"* via multi-hop reasoning (Dallas → Texas → Austin).\n", + "\n", + "After comparing the top features discovered under each mode, we run some relevant causal interventions." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# @title Colab Setup Environment\n", + "\n", + "try:\n", + " import google.colab\n", + "\n", + " !mkdir -p repository && cd repository && \\\n", + " git clone https://github.com/safety-research/circuit-tracer && \\\n", + " curl -LsSf https://astral.sh/uv/install.sh | sh && \\\n", + " uv pip install -e circuit-tracer/\n", + "\n", + " import sys\n", + " from huggingface_hub import notebook_login\n", + "\n", + " sys.path.append(\"repository/circuit-tracer\")\n", + " sys.path.append(\"repository/circuit-tracer/demos\")\n", + " notebook_login(new_session=False)\n", + " IN_COLAB = True\n", + "except ImportError:\n", + " IN_COLAB = False" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "P8fNhpqzmS8k" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/mnt/cache/speediedan/.venvs/ct_dev/lib/python3.13/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n", + " warnings.warn(\n" + ] + } + ], + "source": [ + "from functools import partial\n", + "\n", + "import torch\n", + "\n", + "from circuit_tracer import ReplacementModel, attribute\n", + "from circuit_tracer.attribution.targets import CustomTarget\n", + "from circuit_tracer.utils import create_graph_files\n", + "from circuit_tracer.utils.demo_utils import (\n", + " cleanup_cuda,\n", + " display_ablation_chart,\n", + " display_token_probs,\n", + " display_topk_token_predictions,\n", + " display_top_features_comparison,\n", + " get_top_features,\n", + " get_unembed_vecs,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZN_3kEyfmS8k" + }, + "source": [ + "## Setup\n", + "\n", + "Load the model and define helper functions. We use `google/gemma-2-2b` with the Gemma Scope transcoders, the same configuration used in the other demos. Change `backend` to `'nnsight'` if you prefer the NNSight backend." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "BBsETpl0mS8l" + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "70526ca651f843d6bc193da8ad5abbf3", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Fetching 26 files: 0%| | 0/26 [00:00\n", + "
Baseline probabilities
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + " \n", + "
TokenProbabilityLogit
▁Austin41.380%26.1250
▁Dallas2.998%23.5000
▁Texas5.600%24.1250
\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Token X: '▁Austin' (vocab id 22605)\n", + "Token Y: '▁Dallas' (vocab id 26865)\n", + "Texas: '▁Texas' (vocab id 9447)\n", + "Custom target: logit(▁Austin)-logit(▁Dallas) prob=3.8477e-01\n", + "Semantic target: Concept: Capitals − States prob=1.0557e-01\n" + ] + } + ], + "source": [ + "prompt = \"Fact: the capital of the state containing Dallas is\"\n", + "token_x, token_y = \"▁Austin\", \"▁Dallas\"\n", + "\n", + "# Shared attribution kwargs (apply to all runs)\n", + "# Note: max_n_logits / desired_logit_prob only apply to salient-logit mode\n", + "attr_kwargs = dict(\n", + " batch_size=256,\n", + " max_feature_nodes=8192,\n", + " offload=\"disk\" if IN_COLAB else \"cpu\",\n", + " verbose=True,\n", + ")\n", + "\n", + "# Build the custom diff-target: logit(Austin) − logit(Dallas)\n", + "custom_target, idx_x, idx_y = build_custom_diff_target(\n", + " model, prompt, token_x, token_y, backend=backend\n", + ")\n", + "\n", + "# Build the semantic concept target: Capital Cities − States\n", + "capitals = [\"▁Austin\", \"▁Sacramento\", \"▁Olympia\", \"▁Atlanta\"]\n", + "states = [\"▁Texas\", \"▁California\", \"▁Washington\", \"▁Georgia\"]\n", + "semantic_target = build_semantic_concept_target(\n", + " model, prompt, capitals, states,\n", + " label=\"Concept: Capitals − States\", backend=backend,\n", + ")\n", + "\n", + "# Also track Texas — the intermediate hop in the multi-hop chain\n", + "idx_texas = model.tokenizer.encode(\"▁Texas\", add_special_tokens=False)[-1]\n", + "\n", + "# Bind the tokenizer and key tokens for display helpers\n", + "display_topk = partial(\n", + " display_topk_token_predictions,\n", + " tokenizer=model.tokenizer,\n", + " key_tokens=[(token_x, idx_x), (token_y, idx_y), (\"▁Texas\", idx_texas)],\n", + ")\n", + "\n", + "# Show baseline token probabilities\n", + "input_ids = model.ensure_tokenized(prompt)\n", + "with torch.no_grad():\n", + " baseline_logits, _ = model.get_activations(input_ids)\n", + "\n", + "key_ids = [idx_x, idx_y, idx_texas]\n", + "key_labels = [token_x, token_y, \"▁Texas\"]\n", + "display_token_probs(baseline_logits, key_ids, key_labels, title=\"Baseline probabilities\")\n", + "\n", + "print(f\"\\nToken X: {token_x!r} (vocab id {idx_x})\")\n", + "print(f\"Token Y: {token_y!r} (vocab id {idx_y})\")\n", + "print(f\"Texas: '▁Texas' (vocab id {idx_texas})\")\n", + "print(f\"Custom target: {custom_target.token_str} prob={custom_target.prob:.4e}\")\n", + "print(f\"Semantic target: {semantic_target.token_str} prob={semantic_target.prob:.4e}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RUn1YKnUmS8l" + }, + "source": [ + "## Automatic Target Selection — Salient Logits (`None`)\n", + "\n", + "When `attribution_targets` is `None` (the default), `AttributionTargets` auto-selects the most probable next tokens until `desired_logit_prob` cumulative probability is reached (capped at `max_n_logits`). This is the standard mode used by `attribute_demo.ipynb`." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "id": "2tLE4FzdmS8m" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Phase 0: Precomputing activations and vectors\n", + "Precomputation completed in 0.21s\n", + "Found 9152 active features\n", + "Phase 1: Running forward pass\n", + "Forward pass completed in 0.10s\n", + "Phase 2: Building input vectors\n", + "Using 10 salient logits with cumulative probability 0.7422\n", + "Will include 8192 of 9152 feature nodes\n", + "Input vectors built in 1.37s\n", + "Phase 3: Computing logit attributions\n", + ":0: UserWarning: Full backward hook is firing when gradients are computed with respect to module outputs since no inputs require gradients. See https://docs.pytorch.org/docs/main/generated/torch.nn.Module.html#torch.nn.Module.register_full_backward_hook for more details.\n", + "Logit attributions completed in 0.09s\n", + "Phase 4: Computing feature attributions\n", + "Feature influence computation: 100%|██████████| 8192/8192 [00:02<00:00, 4001.74it/s]\n", + "Feature attributions completed in 2.05s\n", + "Attribution completed in 6.97s\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Salient-logits graph: 10 targets, 9152 active features\n" + ] + } + ], + "source": [ + "graph_salient = attribute(\n", + " prompt=prompt, model=model,\n", + " max_n_logits=10, desired_logit_prob=0.95,\n", + " **attr_kwargs,\n", + ")\n", + "print(f\"Salient-logits graph: {len(graph_salient.logit_targets)} targets, \"\n", + " f\"{graph_salient.active_features.shape[0]} active features\")\n", + "\n", + "# Free CUDA memory before next run\n", + "cleanup_cuda()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "w3cdLLfJmS8m" + }, + "source": [ + "## Token-String Targets — `Sequence[str]`\n", + "\n", + "Pass a list of token strings (e.g., `[\"▁Austin\", \"▁Dallas\"]`) to focus attribution on exactly those logits. Internally, each string is tokenized and its softmax probability and unembedding vector are computed automatically — you only need to supply the surface forms." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "id": "Vh8HPtimmS8m" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Phase 0: Precomputing activations and vectors\n", + "Precomputation completed in 0.18s\n", + "Found 9152 active features\n", + "Phase 1: Running forward pass\n", + "Forward pass completed in 0.09s\n", + "Phase 2: Building input vectors\n", + "Using 2 specified logit targets with cumulative probability 0.4434\n", + "Will include 8192 of 9152 feature nodes\n", + "Input vectors built in 1.35s\n", + "Phase 3: Computing logit attributions\n", + "Logit attributions completed in 0.05s\n", + "Phase 4: Computing feature attributions\n", + "Feature influence computation: 100%|██████████| 8192/8192 [00:02<00:00, 3998.39it/s]\n", + "Feature attributions completed in 2.05s\n", + "Attribution completed in 6.96s\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "String-targets graph: 2 targets, 9152 active features\n" + ] + } + ], + "source": [ + "graph_str = attribute(\n", + " prompt=prompt, model=model,\n", + " attribution_targets=[token_x, token_y],\n", + " **attr_kwargs,\n", + ")\n", + "print(f\"String-targets graph: {len(graph_str.logit_targets)} targets, \"\n", + " f\"{graph_str.active_features.shape[0]} active features\")\n", + "\n", + "# Free CUDA memory before next run\n", + "cleanup_cuda()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EQuFE-eimS8m" + }, + "source": [ + "## Custom Logit-Difference Target — `Sequence[TargetSpec]`\n", + "\n", + "Pass a `CustomTarget` (or any `TargetSpec` — a tuple of `(token_str, prob, vec)`) that encodes an arbitrary direction in the residual stream. Here the direction is `logit(Austin) − logit(Dallas)`, so attribution will trace the circuit that drives the *correct* answer over the surface-level attractor, rather than tracing individual logit values separately." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "gMZ8Ee-KmS8m" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Phase 0: Precomputing activations and vectors\n", + "Precomputation completed in 0.18s\n", + "Found 9152 active features\n", + "Phase 1: Running forward pass\n", + "Forward pass completed in 0.08s\n", + "Phase 2: Building input vectors\n", + "Using 1 custom attribution targets with total weight 0.3848\n", + "Will include 8192 of 9152 feature nodes\n", + "Input vectors built in 1.34s\n", + "Phase 3: Computing logit attributions\n", + "Logit attributions completed in 0.05s\n", + "Phase 4: Computing feature attributions\n", + "Feature influence computation: 100%|██████████| 8192/8192 [00:02<00:00, 3929.05it/s]\n", + "Feature attributions completed in 2.09s\n", + "Attribution completed in 7.08s\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Custom-target graph: 1 targets, 9152 active features\n" + ] + } + ], + "source": [ + "graph_custom = attribute(\n", + " prompt=prompt, model=model,\n", + " attribution_targets=[custom_target],\n", + " **attr_kwargs,\n", + ")\n", + "print(f\"Custom-target graph: {len(graph_custom.logit_targets)} targets, \"\n", + " f\"{graph_custom.active_features.shape[0]} active features\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Semantic Direction — Concept Target\n", + "\n", + "Instead of a pairwise logit difference, we can attribute to an **abstract concept direction** in the residual stream. We build a CustomTarget for an abstract concept direction via vector rejection. For each (capital, state) pair, project the capital vector onto the state vector and subtract that projection, leaving pure \"capital-ness\"." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Phase 0: Precomputing activations and vectors\n", + "Precomputation completed in 0.14s\n", + "Found 9152 active features\n", + "Phase 1: Running forward pass\n", + "Forward pass completed in 0.08s\n", + "Phase 2: Building input vectors\n", + "Using 1 custom attribution targets with total weight 0.1056\n", + "Will include 8192 of 9152 feature nodes\n", + "Input vectors built in 1.34s\n", + "Phase 3: Computing logit attributions\n", + "Logit attributions completed in 0.05s\n", + "Phase 4: Computing feature attributions\n", + "Feature influence computation: 100%|██████████| 8192/8192 [00:02<00:00, 4067.53it/s]\n", + "Feature attributions completed in 2.02s\n", + "Attribution completed in 6.83s\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Semantic-target graph: 1 targets, 9152 active features\n" + ] + } + ], + "source": [ + "graph_semantic = attribute(\n", + " prompt=prompt, model=model,\n", + " attribution_targets=[semantic_target],\n", + " **attr_kwargs,\n", + ")\n", + "print(f\"Semantic-target graph: {len(graph_semantic.logit_targets)} targets, \"\n", + " f\"{graph_semantic.active_features.shape[0]} active features\")\n", + "\n", + "# Free CUDA memory before feature comparison\n", + "cleanup_cuda()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yDGiO8jBmS8m" + }, + "source": [ + "## Compare Top Features\n", + "\n", + "Extract the top-10 features from each graph (ranked by multi-hop influence) and display them side by side. Feature indices link to their [Neuronpedia](https://www.neuronpedia.org/) dashboards. The *Custom Target* column highlights features that specifically drive the Austin-vs-Dallas logit difference — the multi-hop reasoning circuit (Dallas → Texas → capital → Austin)." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "id": "185O1Ck1mS8m" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + "
Salient Logits
#NodeScore
1(20, 10, 15589)0.0197
2(24, 10, 6044)0.0159
3(21, 10, 5943)0.0118
4(23, 10, 12237)0.0083
5(14, 9, 2268)0.0080
6(16, 9, 25)0.0074
7(25, 10, 13300)0.0072
8(20, 9, 15589)0.0069
9(24, 10, 6394)0.0061
10(24, 10, 13277)0.0057
Strings [▁Austin, ▁Dallas]
#NodeScore
1(20, 10, 15589)0.0155
2(24, 10, 6044)0.0122
3(21, 10, 5943)0.0095
4(23, 10, 12237)0.0074
5(14, 9, 2268)0.0058
6(16, 9, 25)0.0057
7(25, 10, 13300)0.0054
8(20, 9, 15589)0.0054
9(24, 10, 6394)0.0051
10(4, 9, 13154)0.0040
Custom (logit(▁Austin)-logit(▁Dallas))
#NodeScore
1(19, 10, 7477)0.0057
2(23, 10, 12237)0.0035
3(18, 10, 8959)0.0028
4(0, 2, 16200)0.0026
5(14, 9, 2268)0.0025
6(25, 10, 583)0.0025
7(1, 4, 1000)0.0023
8(18, 10, 6101)0.0022
9(0, 3, 3820)0.0022
10(0, 5, 2848)0.0022
Semantic (Concept: Capitals − States)
#NodeScore
1(21, 10, 5943)0.0036
2(24, 10, 6394)0.0020
3(23, 10, 12237)0.0017
4(24, 10, 5999)0.0012
5(20, 10, 15589)0.0011
6(19, 10, 2695)0.0010
7(24, 10, 6044)0.0010
8(22, 10, 4999)0.0010
9(18, 10, 6101)0.0009
10(0, 2, 16200)0.0008
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "top_salient, scores_salient = get_top_features(graph_salient, n=10)\n", + "top_str, scores_str = get_top_features(graph_str, n=10)\n", + "top_custom, scores_custom = get_top_features(graph_custom, n=10)\n", + "top_semantic, scores_semantic = get_top_features(graph_semantic, n=10)\n", + "\n", + "display_top_features_comparison(\n", + " {\n", + " \"Salient Logits\": top_salient,\n", + " f\"Strings [{token_x}, {token_y}]\": top_str,\n", + " f\"Custom ({custom_target.token_str})\": top_custom,\n", + " f\"Semantic ({semantic_target.token_str})\": top_semantic,\n", + " },\n", + " scores_sets={\n", + " \"Salient Logits\": scores_salient,\n", + " f\"Strings [{token_x}, {token_y}]\": scores_str,\n", + " f\"Custom ({custom_target.token_str})\": scores_custom,\n", + " f\"Semantic ({semantic_target.token_str})\": scores_semantic,\n", + " },\n", + " neuronpedia_model=\"gemma-2-2b\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "98579UbGmS8m" + }, + "source": [ + "## Amplify the Austin-Dallas Custom Difference Circuit\n", + "\n", + "To confirm the custom-target features are causally meaningful, we amplify them by 10× and check that the Austin-vs-Dallas logit gap widens (i.e., the model becomes even more confident Austin is correct)." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "
\n", + "
Before amplification
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + " \n", + "
TokenProbabilityLogit
▁Austin41.380%26.1250
▁Dallas2.998%23.5000
▁Texas5.600%24.1250
\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + "
After 10× amplification
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + " \n", + "
TokenProbabilityLogit
▁Austin83.206%24.7500
▁Dallas5.91e-0417.5000
▁Texas5.28e-095.8750
\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "logit(Austin) − logit(Dallas): 2.6250 → 7.2500 (Δ = +4.6250)\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + "
\n", + "
Input Sentence:
\n", + "
Fact: the capital of the state containing Dallas is
\n", + " \n", + "
\n", + "
Original Top 5 Tokens
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TokenProbabilityDistribution
Austin0.414\n", + "
\n", + "
\n", + " 41.4%\n", + "
\n", + "
Texas0.056\n", + "
\n", + "
\n", + " 5.6%\n", + "
\n", + "
the0.056\n", + "
\n", + "
\n", + " 5.6%\n", + "
\n", + "
not0.056\n", + "
\n", + "
\n", + " 5.6%\n", + "
\n", + "
Fort0.044\n", + "
\n", + "
\n", + " 4.4%\n", + "
\n", + "
\n", + " \n", + "
New Top 5 Tokens
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TokenProbabilityDistribution
Austin0.832\n", + "
\n", + "
\n", + " 83.2%\n", + "
\n", + "
in0.017\n", + "
\n", + "
\n", + " 1.7%\n", + "
\n", + "
AUSTIN0.010\n", + "
\n", + "
\n", + " 1.0%\n", + "
\n", + "
Irving0.009\n", + "
\n", + "
\n", + " 0.9%\n", + "
\n", + "
D0.009\n", + "
\n", + "
\n", + " 0.9%\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + "
Key Tokens
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TokenOriginalNewChange
▁Austin0.41410.8320\n", + "
\n", + "
\n", + " +100.9%\n", + "
\n", + "
▁Dallas0.03000.0006\n", + "
\n", + "
\n", + " -98.0%\n", + "
\n", + "
▁Texas0.05590.0000\n", + "
\n", + "
\n", + " -100.0%\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Get activations for interventions\n", + "input_ids = model.ensure_tokenized(prompt)\n", + "original_logits, activations = model.get_activations(input_ids, sparse=True)\n", + "\n", + "# Baseline\n", + "display_token_probs(original_logits, key_ids, key_labels, title=\"Before amplification\")\n", + "\n", + "# Amplify top custom-target features by 10×\n", + "intervention_tuples = [\n", + " (layer, pos, feat_idx, 10.0 * activations[layer, pos, feat_idx])\n", + " for (layer, pos, feat_idx) in top_custom\n", + "]\n", + "\n", + "new_logits, _ = model.feature_intervention(input_ids, intervention_tuples)\n", + "\n", + "display_token_probs(new_logits, key_ids, key_labels, title=\"After 10× amplification\")\n", + "\n", + "orig_gap = (original_logits.squeeze(0)[-1, idx_x] - original_logits.squeeze(0)[-1, idx_y]).item()\n", + "new_gap = (new_logits.squeeze(0)[-1, idx_x] - new_logits.squeeze(0)[-1, idx_y]).item()\n", + "print(f\"\\nlogit(Austin) − logit(Dallas): {orig_gap:.4f} → {new_gap:.4f} (Δ = {new_gap - orig_gap:+.4f})\")\n", + "\n", + "display_topk(prompt, original_logits, new_logits)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Amplify the Semantic Concept Circuit\n", + "\n", + "Same amplification test for the **semantic concept** features. We compare a modest 2× boost (a gentle nudge along the concept axis) with a strong 10× boost to observe the difference in behaviour." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "
\n", + "
Before amplification (semantic)
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + " \n", + "
TokenProbabilityLogit
▁Austin41.380%26.1250
▁Dallas2.998%23.5000
▁Texas5.600%24.1250
\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + "
After 2× amplification (semantic)
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + " \n", + "
TokenProbabilityLogit
▁Austin61.581%26.3750
▁Dallas3.474%23.5000
▁Texas0.470%21.5000
\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "logit(Austin) − logit(Dallas): 2.6250 → 2.8750 (Δ = +0.2500) [2×]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + "
\n", + "
Input Sentence:
\n", + "
Fact: the capital of the state containing Dallas is
\n", + " \n", + "
\n", + "
Original Top 5 Tokens
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TokenProbabilityDistribution
Austin0.414\n", + "
\n", + "
\n", + " 41.4%\n", + "
\n", + "
Texas0.056\n", + "
\n", + "
\n", + " 5.6%\n", + "
\n", + "
the0.056\n", + "
\n", + "
\n", + " 5.6%\n", + "
\n", + "
not0.056\n", + "
\n", + "
\n", + " 5.6%\n", + "
\n", + "
Fort0.044\n", + "
\n", + "
\n", + " 4.4%\n", + "
\n", + "
\n", + " \n", + "
New Top 5 Tokens
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TokenProbabilityDistribution
Austin0.617\n", + "
\n", + "
\n", + " 61.7%\n", + "
\n", + "
Dallas0.035\n", + "
\n", + "
\n", + " 3.5%\n", + "
\n", + "
Fort0.035\n", + "
\n", + "
\n", + " 3.5%\n", + "
\n", + "
San0.031\n", + "
\n", + "
\n", + " 3.1%\n", + "
\n", + "
not0.031\n", + "
\n", + "
\n", + " 3.1%\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + "
Key Tokens
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TokenOriginalNewChange
▁Austin0.41410.6172\n", + "
\n", + "
\n", + " +49.1%\n", + "
\n", + "
▁Dallas0.03000.0347\n", + "
\n", + "
\n", + " +15.4%\n", + "
\n", + "
▁Texas0.05590.0047\n", + "
\n", + "
\n", + " -91.6%\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + "
After 10× amplification (semantic)
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + " \n", + "
TokenProbabilityLogit
▁Austin65.185%26.1250
▁Dallas0.266%20.6250
▁Texas2.92e-112.2969
\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "logit(Austin) − logit(Dallas): 2.6250 → 5.5000 (Δ = +2.8750) [10×]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + "
\n", + "
Input Sentence:
\n", + "
Fact: the capital of the state containing Dallas is
\n", + " \n", + "
\n", + "
Original Top 5 Tokens
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TokenProbabilityDistribution
Austin0.414\n", + "
\n", + "
\n", + " 41.4%\n", + "
\n", + "
Texas0.056\n", + "
\n", + "
\n", + " 5.6%\n", + "
\n", + "
the0.056\n", + "
\n", + "
\n", + " 5.6%\n", + "
\n", + "
not0.056\n", + "
\n", + "
\n", + " 5.6%\n", + "
\n", + "
Fort0.044\n", + "
\n", + "
\n", + " 4.4%\n", + "
\n", + "
\n", + " \n", + "
New Top 5 Tokens
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TokenProbabilityDistribution
Austin0.652\n", + "
\n", + "
\n", + " 65.2%\n", + "
\n", + "
San0.113\n", + "
\n", + "
\n", + " 11.3%\n", + "
\n", + "
Austin0.025\n", + "
\n", + "
\n", + " 2.5%\n", + "
\n", + "
Fort0.025\n", + "
\n", + "
\n", + " 2.5%\n", + "
\n", + "
Washington0.020\n", + "
\n", + "
\n", + " 2.0%\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + "
Key Tokens
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TokenOriginalNewChange
▁Austin0.41410.6523\n", + "
\n", + "
\n", + " +57.5%\n", + "
\n", + "
▁Dallas0.03000.0027\n", + "
\n", + "
\n", + " -91.1%\n", + "
\n", + "
▁Texas0.05590.0000\n", + "
\n", + "
\n", + " -100.0%\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Baseline\n", + "display_token_probs(original_logits, key_ids, key_labels, title=\"Before amplification (semantic)\")\n", + "\n", + "orig_gap = (original_logits.squeeze(0)[-1, idx_x] - original_logits.squeeze(0)[-1, idx_y]).item()\n", + "\n", + "# --- 2× amplification (gentle nudge along the concept axis) ---\n", + "sem_amp_tuples_2 = [\n", + " (layer, pos, feat_idx, 2.0 * activations[layer, pos, feat_idx])\n", + " for (layer, pos, feat_idx) in top_semantic\n", + "]\n", + "\n", + "sem_amp_logits_2, _ = model.feature_intervention(input_ids, sem_amp_tuples_2)\n", + "\n", + "display_token_probs(sem_amp_logits_2, key_ids, key_labels, title=\"After 2× amplification (semantic)\")\n", + "\n", + "sem_gap_2 = (sem_amp_logits_2.squeeze(0)[-1, idx_x] - sem_amp_logits_2.squeeze(0)[-1, idx_y]).item()\n", + "print(f\"\\nlogit(Austin) − logit(Dallas): {orig_gap:.4f} → {sem_gap_2:.4f} (Δ = {sem_gap_2 - orig_gap:+.4f}) [2×]\")\n", + "\n", + "display_topk(prompt, original_logits, sem_amp_logits_2)\n", + "\n", + "# --- 10× amplification (strong boost) ---\n", + "sem_amp_tuples_10 = [\n", + " (layer, pos, feat_idx, 10.0 * activations[layer, pos, feat_idx])\n", + " for (layer, pos, feat_idx) in top_semantic\n", + "]\n", + "\n", + "sem_amp_logits_10, _ = model.feature_intervention(input_ids, sem_amp_tuples_10)\n", + "\n", + "display_token_probs(sem_amp_logits_10, key_ids, key_labels, title=\"After 10× amplification (semantic)\")\n", + "\n", + "sem_gap_10 = (sem_amp_logits_10.squeeze(0)[-1, idx_x] - sem_amp_logits_10.squeeze(0)[-1, idx_y]).item()\n", + "print(f\"\\nlogit(Austin) − logit(Dallas): {orig_gap:.4f} → {sem_gap_10:.4f} (Δ = {sem_gap_10 - orig_gap:+.4f}) [10×]\")\n", + "\n", + "display_topk(prompt, original_logits, sem_amp_logits_10)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Ablate the Austin-Dallas Custom Difference Circuit\n", + "\n", + "Now do the opposite: zero out progressively more custom-target features to remove the Austin-driving circuit. With enough of the multi-hop reasoning path suppressed, the model can no longer resolve the correct answer and reverts to nearby concepts — the intermediate state (Texas) rather than its capital." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAxUAAAHqCAYAAAByRmPvAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjgsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvwVt1zgAAAAlwSFlzAAAPYQAAD2EBqD+naQAApThJREFUeJzs3XdYFFfbBvB76R1EQECQIipgAcWGJTZUxIYaxRbA3htqDBp7Euy9d4099oJGRVGM2MWGBQ0EUVBsoKAisN8ffs7LSnFZylDu33XN5e7MmTPP7K7LPjOnSKRSqRREREREREQKUhI7ACIiIiIiKt6YVBARERERUZ4wqSAiIiIiojxhUkFERERERHnCpIKIiIiIiPKESQUREREREeUJkwoiIiIiIsoTJhVERERERJQnTCqIiIiIiChPmFQQUb7atGkTJBIJJBIJmjZtKtc+06ZNE/bx9fUt0PiaNm0qHGvTpk0FeqyiSpH3qLTz9fUVXrNp06YVyDEU+WxaW1sL+wQHB3+3rrycR3bHKkkK433OTmF+DxIVBBWxA6D8kZ6ejsOHD2Pbtm24fPkynj9/DlVVVZQvXx4uLi7w8vJCu3btIJFICjyW4OBg4Q+Os7MzPD09C/yY+SUsLAwHDhwA8OUPaEn6Yl+0aBHevn0L4MsfTmtra1HjKSgZfwiMHj0aBgYGosWSX0rLe0fiefv2LRYtWiQ8L+wf1IqSSqXYtGkTVq9ejYcPH+Ljx48wNTWFk5MTRowYgebNm4sdYr4oid9rVPIwqSgBnj9/jm7duuHcuXMy6z9+/Ij79+/j/v372LZtG968eVMoX0TBwcGYPn06AMDHx6fYJRVfY2/SpEmJSyr+++8/AF+uYpbUH6Zf3z/gyw/wbz/zS5cuRUJCAgCgcuXKhRmawkrLe0e5t2fPHnz8+BEAUL169e+WnzRpEvr37w8AqFChgrD+7du3Mv93skoqcnuswjB+/HjMnz9fZl1kZCQiIyPh5ORUrJKKvn37ws3NDQBQrlw5mW3f+14jKgqYVBRzycnJaN26NW7evAkAUFJSgq+vL9q1awd9fX08efIEx44dw759+0SOlL71+fNnSKVSqKmpiR1KqVJUfgyRON6/fw8dHR2xw8g3tWvXzlX5SpUqoVKlSoVyrMKwZs0a4fHo0aPRrl07PH/+HP/880+mH+ZFXYUKFWQSPaLihn0qirnFixcLCQUAbNu2DevXr0enTp3QvHlz+Pj4YOfOnbhz5w60tLQA5NyeOrv2pG/fvsW4ceNgb28PTU1NqKurw9zcHE2aNMH48eORnJyMqKgoSCQSmSsqmzdvFurLeHX16y3rZs2awdDQEKqqqjA1NUXHjh0RFBSU6Ty/1iGRSHDr1i0MGTIExsbG0NXVRfv27REVFQWpVIrFixejUqVKUFdXh4ODA7Zt2yb3aymRSNCnTx/h+dmzZ2WOCwBpaWkYOXIkGjdujPLly0NLSwvq6uqwsrJCr169EBYWJlPn19fk6xIbGwtfX1+YmJhAXV0d4eHhAIBXr16hf//+MDIygra2Npo0aYILFy7k2L43OTkZc+bMQd26daGnpwd1dXVUqlQJfn5+iI+PF8p9baf79Uo3ADRr1kzudsOPHz9G3759UatWLZQrVw5qamrQ1taGo6MjxowZgxcvXuS4/4MHD+Dp6QkDAwPo6uqibdu2uHv3bo77KHLsr69VRjY2NpnalefUbv358+cYP348HB0doaWlBU1NTdjb22PMmDF49uyZTNlv/x/dvXsXHTt2hL6+PrS1teHh4YFHjx7J7PPt5+F7cvPe5Sb27ISFhaFs2bJC/VOmTBG23bt3D/3794etrS00NDSgp6eHhg0bYtOmTZBKpTL1fPu5PXToEOrXrw9NTU0YGxtj0KBBSEpKkiumb+s6cOAA6tatC01NTZiYmGDQoEF48+aNzD4Z3+ONGzdi0aJFcHBwgJqaGn799Veh3M2bN+Ht7Q0rKyuoq6tDT08PdevWxbx58/Dp06cc4zp27Bjq168PLS2tbOPYsWMHOnbsCDs7OxgYGEBVVRVly5ZFkyZNsGHDhkyv27e2bNkCJycnaGhowMLCAhMmTBDuFHyV234OWX2nNG3aFDY2NjLlMn5Ov9ab07FiYmIwevRo4W+Ejo4OXFxcsHDhQnz+/FmmrDx/T+Slr68vPPbw8ECLFi3Qs2dPLF++HEOGDJG7Hnm8e/cOM2fORK1ataCrqwt1dXXY2tpiwIABiIiIyFQ+t9/rWfWpkPd7LSe3bt2Cu7s7tLW1YWhoiB49eiAmJibb9zO3n9tvv1PXrVuHGjVq5Pi5pRJKSsValSpVpACkAKTNmzeXa5+NGzcK+zRp0kRmm4+Pj7Bt6tSpwvoffvhBWJ/VEhsbK42MjMyxjJWVlVQqlUpTU1Olnp6eOZb9/fffZeLKuK1y5cqZytvY2EgHDx6cZV0XLlyQ63XJKZ6v/1U+fPiQYxk1NTXpxYsXhTq/fU0qVaok8/zGjRvSpKQkabVq1TLVpa6uLq1atWqW70d8fHyW+3xdypcvL/3333+lUqlUOnXq1BxjzlhvVo4dO5bj/tbW1tI3b95k+fmytraWlilTJtM++vr60vDwcGGfjDH6+PgodOyMn92slo0bN0qlUqm0SZMmmdZJpVJpeHi41MTEJNv9jYyMpDdv3szyPM3MzKTa2tqZ9nF0dJSmpaVl+3n4Hnnfu7zE/vU74ObNm9KyZcsK6+fMmSOU379/v1RDQyPb+nv16iVNT08Xymd8L+zs7LLcZ9CgQd89/2/rcnBwyLIuJycnaXJysrBPxvf42/9zo0aNkkqlUumOHTukqqqq2Z6Ti4uLNDExMcs4nJ2dpRKJ5LtxeHl55fj+fY0lq7hr1qyZ5T7u7u4yr7WVlZWw7cyZM1nWlfFzntV3fMayWS1f683uWKGhoVIDA4Ns92/WrJn048ePQnl5/p7Ia/z48cJ+VlZW0piYGLn3zUp2fwNjY2MzfZYyLlpaWtKTJ08K5RX5Xs/qe1De77Xs3L59W6qnp5dpPysrK6mhoWGW72dePreOjo5yfW6pZOKdimIsKSkJDx48EJ63atWqQI7z8uVLob+GpaUldu7ciaCgIGzduhUTJkxAtWrVIJFIYGZmhpCQEJmr/W3atEFISAhCQkKwZ88eAMDy5cuFztCqqqqYMWMGAgMDhXa+wJd2v5cvX84ynvj4eKxfvx7btm2DtrY2gC9taFetWoXhw4cjMDAQDRo0EMovWbJErvMMCQnBxIkThefOzs5C7CEhIQAAFRUVTJ48Gdu2bcOxY8cQHByMwMBAjBkzBgCQkpKCGTNmZHuM6OhozJgxA3///TfWrFkDIyMjzJ8/H3fu3AEAqKmp4Y8//sCRI0fg4eGR7RX9YcOGCfs4Oztjx44dOHbsGLp06QIAePr0KXx8fAB8aacbEhICU1NTmdfk63n17ds3x9fFysoKs2bNwp49e3DixAkEBwdj//79cHd3B/Dl6vvatWuz3DcqKgqOjo7Yv38/Nm7cKDRHSEhIwMiRI3M8bm6PPWnSJOF9+uqvv/4SztPDwyPHY/Xu3Vu481GpUiXs2LEDf/31FxwdHQF8+X/Qq1cvpKenZ9o3NjYWVapUwd69e7Fo0SKoqqoCAMLDw3Hy5Mnvnmd25H3v8hI7ANy5cwctWrTAq1evIJFIsHz5cowfPx7Al/9vP/30k3ClcfDgwTh+/Dj+/PNPWFlZAfhyh3Tjxo1Z1v3o0SP06NEDR44ckblyvH79erx//z5Xr8e9e/fQr18/BAYG4rfffhNe55s3b2LBggVZ7hMREYEOHTpg//79OHDgAFq2bIm4uDj069dPuILepk0bHD58GCtWrBCufF+7dg2//PJLlnWGhYWhb9++342jQ4cOWLVqFQ4dOoQzZ84gKCgI69evh5GREQBg2bJliIuLy/YYP//8MwIDA+Hn5yesP378OLZv356bl+27li5dir/++ktmXcbvvpo1a2a776dPn+Dl5SUMJNClSxccPXoUe/bsQY0aNQAAZ86cwe+//w5A/r8n8li0aJHM6/3ff/+hadOmePr0qbBuwoQJwlX0b+8k58bQoUOFuxHlypXDhg0bcODAATRq1AjAlzvHvXr1Eu7AKfK9npW8fq+NGjUKiYmJAAAzMzNs3rwZe/bsgba2Nl6/fp3lPnn53N67d6/QPrdUBImd1ZDiYmJiZK4ErF27Vq79cnun4sOHD1JlZWUpAGn16tWl165dk3748CHb+rO76vxVxitwI0aMkNlWu3ZtYdvQoUOF9RnPc8WKFcJ6Dw8PYX3dunWF9X/99ZewvlatWsL6W7duSUNCQmSWW7duyfXafPXPP/9If/zxR6mlpaVUTU0t0xUZQ0NDoey3V6aXLFmSqb6MV7NGjhwprP/06ZPU3Nw80/vx5s0b4f0AIN2+fbtwLmfOnJG5+nr//n2hvuyuMspjy5Yt0ubNm0uNjIxkjv116dy5c5avoaampjQ+Pl7YtmfPHmGbRCKRvnz5UiqV5vyZyc2xpVLZz0pkZGSmc8nqCu7Nmzdl9rt27ZpQ/s6dOzLbLl++nOk8VVVVZa6Quru75/ie51ZO711eY7e1tZUaGxtLAUiVlZUzXflcunSpULZatWoy/3cmTZokbKtfv76wT8bvkapVqwpXKNPS0qRaWlrCtoz/97KTsa46derIbBs+fLiwrUaNGsL6jO+xi4tLpjoXL14sbDc2Npb5Plu2bJmwTU9PT5qamqpwHC9fvpROmDBBWr16dam2tnaWdzcOHTqUZdxdu3aVOUa7du2EbR06dBDW58edCqlUvrtoWR3r8OHDMq/luXPnhM9Hxs+OmZmZVCrN/d+T7Ozfv1+o28HBQdqlSxfheaVKlaRPnz6VSqX/+7+oqqoqTUpK+m69Wb0+r1+/liopKQnr9+7dK5SPj4+XampqCtt2794tlUpz/70uleb8Pfi977WsxMfHy+y3b98+Ydu33w0ZPzuF8bmlkokdtYuxb0d/ePXqVYEcR0NDAz4+PtiwYQNu374NFxcXKCkpoUKFCqhXrx769OmD1q1by13f/fv3hcdfr/JkfH716tVM5TLKeBeibNmywmNXV1fh8dcrKgBkrsaMGDECZ8+elamvSZMmco+5fvLkSbRp0wZpaWnZlvm2XXVGX+8kZJSxLW7Dhg2Fx2pqaqhbt65wV+erhw8fyhy/Z8+e2R7vzp07qFKlSrbb5TFlyhTMnDkzxzLZnbO9vb3Me5Hx/ZZKpXj8+LHMe5ifx86NjJ81TU1N1KpVS3hetWpVGBgYCFdi79+/jzp16sjsb29vj/LlywvPM55TdlcD80teY//333+Fx9OnT8804tnXfj/Al89T48aNs4zj61XZbzVv3ly48qykpIQyZcoIbeZz+9pk9X2xbNkyAMiyTTsAdO7cOdO6jK9Z7dq1oaGhkeUxEhMT8ezZM1haWuY6jg8fPqBhw4Yyd5Ozkt3nN6tjHDlyROYYRUHGz0d8fDx++OGHLMvFxsbi1atXKFu2bL78PQkICBAef+170rFjRxw7dgwRERFo1qwZFi5ciL///hvAl3b/X/sV5lZERITMXb6M742RkRGqVKki3AX5+tnK7fd6Qfi2T1fGOL79bviqtHxuqWCw+VMxpq2tLfOD8dSpU3Ltl/HWcmpqqsy2jB18M1qzZg22bt2K7t27o1q1alBTU0NUVBR27doFd3d3HDx4UIEzUEzGjnlKSv/7CGc3xJ70O50hc2Pu3LnCD/q6detiz549CAkJwY4dO+Q6npmZWaZ1Gd+P/J5HJLfNS771+fNnmeYFvXr1wrFjxxASEoKff/5ZWJ9ds5rieuzcMjQ0lHmuovK/6zX5+fkrCMrKysLjRYsWyfxIzI3sPmtivzZZ/Z8rDPv37xd+mGlra2PJkiU4c+YMQkJCZEYgKwqf38Ly9TOSH39Pbt++LTy2t7eHqqoq9uzZI/xwfvjwIdq2bSt8xkaPHp3/J5SDgvxeVyQGeePg55bygklFMZfxquKpU6cytYv9KiIiAikpKQCAMmXKCOsztj19+/Ytzp8/n+X+SkpK6NWrF3bs2IHbt28jKSkJc+fOFbZn/FGd8Yd+Vl889vb2wuN//vlHZlvG5xnL5Zfg4GBIpVKZJeNdiu/FHh0dLTyePHkyunTpgkaNGmVKzrKT1Zd6xuEdL168KDxOSUnJsl9J5cqVZX4IPnjwINM5SaVSvH//XuhXIc+5ZeXVq1cyo/SsWrUK7u7uaNSokVx3xu7fvy9TLuP7K5FIULFixXw/dsbXWN7zzPhZ+/DhA27cuCE8Dw8Pl7maVxCfy+/J6b3La+wNGzaEt7c3gC/t3d3c3GSucDo4OAiPGzRokOVn7evnraDl9H1hZ2eX5T5Z/Z/L+Dpcu3ZNZmSajHXq6ellmZTIE0fG7wp3d3eMGDECTZs2RY0aNRATE5NlrLk9Rn7K+BkD5P+/k/HzUaFCBWGo7Kw+H1/74OTm70l2dHV1hceHDh0CAGhpaeHIkSNCX46vevfu/d2+BzmpVKmSzOuT8b149eqVzFX9r5+t3H6vf48i32t2dnYy+2WM486dO5nuUgDF73NLRQubPxVzo0aNws6dO4VhZXv06IETJ06gXbt20NPTw9OnT3H8+HH89ddfeP78OdTU1GQm/IqKioKvry9q166N9evXCx26vmVnZwcPDw+4uLjA3NwcaWlpMpPtZfyjnLHpR0hICI4ePQp9fX2YmprCzs4Ovr6+wg+fVatWwcTEBC4uLti/fz+uXLki7JvxB3FhyRj7rVu3sG/fPpiYmMDAwADVqlWDra2t8Adk4cKFUFVVxePHj2WGqcytbt26CVfdli9fDlNTU1StWhXr1q3LcihQAwMDdO7cWUggPTw8MH78eNjZ2eHt27f477//cO7cOWHiw4znFhkZCeDLUL9KSkpQUVFBjRo1oKenl2Vs5cqVg7a2tvDjfuLEiWjfvj1Onz6dbcfcjD58+IBOnTph7NixePv2Lfz9/YVtzZs3z7Hpk6LHLlu2LF6+fAngy+erXbt2UFJSQt26dbOdE6RGjRqoVasWrl+/DuDL/6Pp06dDWVlZZojkatWqwcXF5bvnnZ2oqCiZoTvlvVKf03uX19glEgnWr1+PN2/e4PDhw4iNjUWLFi1w7tw5WFlZwcvLCxMnTsT79+9x4cIF/Pjjj+jZsyf09fXx9OlTPHjwAIGBgfD09MTUqVMVfm3kcfnyZQwcOBCdO3fGjRs3sHr1amFbt27d5K6nW7du8Pf3R3JyMl68eIEff/wRgwcPRkxMDCZNmiSU6927t8ydldzEYWtrK6wLCgrCn3/+CX19fcybN0+uJnt79uyBv78/mjRpgtOnTwtNSHJ7rvIyNDSERCIRPpMLFy5E3bp1oaSkJNNs5lstW7aEpaUlnjx5gujoaLRu3RoDBgyAiYkJYmNj8fjxY5w4cQKVKlUS/t/m5u9Jdrp06YKVK1cCgDCkq6urK6KiojLt/+LFC6Smpmb5XsqjTJky6NixI/bv3w/gy0AZCQkJMDQ0xPz58/HhwwcAgLGxsZC85PZ7/XsU+V4rW7YsmjdvLgzT/jVuTU3NbP+vFrfPLRUxhdN1gwpSbGzsd4foAyAz7GfLli0zbVdTU5MZrjVjBzJ1dfUc687YcS08PFymU9vXpV+/flKpVL4hZX/77TeZc8y4LWMntew6HZ45c0ZY/3UoW3m8fv1apiPp16VFixZSqTT7IU6bNm2aZSdHeTo/Zjf0oJqamswQmhnP78WLFzkOKZvVefv7+2dZLiQkJMfX5JdffvnuOWfs1J6xE3D58uWlOjo6mfbV09OT3rlzR9gnuw6KuT22VCqV9ujRI8t9njx5IpVKs+/Aevfu3RyHZS1btux3h2X9Ki+dYbPyvfcuP2L/8OGDzPdIxYoVhc6u+/bty3FI2W/PM7vzl0pzP2CAPEO5Vq9eXaYTbnbvcUbyDCmbkJCQZRwZh/LOLo6kpCSpra1tpjKmpqZSe3v7LOPLGHfGMhmXli1bygxTnF8dtaVSqdTV1TXT8ZSVlb97rAsXLuQ4pOy3/69z8/ckO2/evJHWqlUrx3oyflaGDBny3Tpzen3kGVL2xIkTQnlFvtdz6qj9ve+17Ny5c0eqq6ubaT9LS8ssh5TN6+c2u6GQv/3cUsnE5k8lgKmpKc6cOYP9+/fjxx9/RIUKFaChoQEdHR1UqVIFPXv2xMGDB2X6ImzZsgXdunWDnp4etLS0hCuTGTs7ZxQQEIAOHTrA2toaOjo6UFZWhrGxMdzd3REYGCjTGdLBwQFbtmxB1apVhaEWM1JWVsa+ffuwYcMGNGnSBAYGBlBRUYGJiQk6dOiAU6dOyVwtLExlypTBvn37ULt2bairq2fa7u7ujr1798LZ2RmampqoUKECpkyZIjOra25paWnhzJkz6Nu3LwwNDaGpqYlGjRrh1KlTMn1mvg6fC3y5Inb58mXMmzcP9evXh76+PlRVVWFubo769etj0qRJ2Lt3r8xxfv31VwwaNAgmJia5auM7c+ZMzJw5U5j0rEaNGti2bZtcd5Ls7Oxw/vx5uLu7Q1dXF9ra2nB3d8f58+dRtWrVAjn24sWL4eXlJVx5lZejoyNu3bqFsWPHwt7eHhoaGtDQ0EDlypUxatQo3Lp1K1OzisLyvfcuP2LX0NDAoUOH4OzsDODLxINubm6Ij49Hp06dcOPGDQwcOBB2dnbQ0NCAtrY27Ozs0K5dO6xatQpDhw4tiFOX0bFjRxw9ehT16tWDhoYGjIyMMGDAAJw5cybXnXC7d++Oy5cvo3fv3rC0tISqqqowYducOXNw/vz5bO/gde/eHXv27IGLi0u2cWhpaeH06dPo1KkTDA0Noa+vjw4dOuD8+fNyzfQ8YcIErFy5Eo6OjlBTU4O5uTnGjRuHgwcPZmqqlF/+/PNPeHh4yDQtkoerqytu374NPz8/VK1aVZh80cbGBi1btsTChQtlhtrOzd+T7BgYGODChQuYM2cOXFxcoK2tDVVVVVhaWqJr1644ePAgDh8+LLxWK1euxOLFi3P3gmRgamqKq1evYvr06XB2doaWlhbU1NRgbW2Nfv364caNG2jZsqVQXpHv9Zwo+r1WtWpVnD9/Hq1atYKWlhb09fXRtWtX/PPPPzLNqL7GkdfP7ciRI7F582bUqFFDmNSwoD+3VHRIpNIi3ouQqBSQSqWZ/lB8/PgRdnZ2Qr+XAwcOoGPHjmKERyQaX19fbN68GQAwderU784AT1RUFOXv9du3bwsXGpSUlPDy5UuZ/pa50bRpU2FUxY0bN2YaQY5KD/apICoCvLy84OrqikaNGsHMzAz//fcf/vjjD+EPj6mpqcxVMCIiKtqKwvf6x48f0bRpUwwfPhzOzs7Q1dXFzZs3MWHCBKFMu3btFE4oiDJiUkFUBERHR2c7cpeOjg62b9+u8BjrRERU+IrK9/qlS5dw6dKlLLdVqlRJ6PBOlFdMKoiKgJ9++gnq6up48OABXr9+DTU1NdjY2MDNzQ2jR48WhmIkIqLioSh8r6uqqmLEiBEICQlBdHQ0EhMToaOjAwcHB3h6emLYsGFy9+sg+h72qSAiIiIiojxhV3wiIiIiIsoTJhVERERERJQnpa5PRWpqKm7cuIFy5cpxzGQiIiIiypX09HQ8f/4cNWvWVHim9pKo1L0SN27cQN26dcUOg4iIiIiKscuXL6NOnTpih1FklLqk4uuMkJcvX4aZmZnI0RARERFRcRIbG4u6devKNct4aVLqkoqvTZ7MzMxgYWEhcjREREREVByxGb0svhpERERERJQnTCqIiIiIiChPmFQQEREREVGelLo+FfJKS0vD58+fxQ6DckFVVRXKyspih0FERCQjPT0dKSkpYodBcuLvCcUwqfiGVCpFXFwc3r59K3YopAADAwOYmppCIpGIHQoRERFSUlIQGRmJ9PR0sUOhXODvidxjUvGNrwmFiYkJtLS0+GEqJqRSKZKTk/HixQsA4HDBREQkOqlUitjYWCgrK8PS0pKjBRUD/D2hOCYVGaSlpQkJRdmyZcUOh3JJU1MTAPDixQuYmJjw1iUREYkqNTUVycnJMDc3h5aWltjhkJz4e0IxTJkz+NqHgv/xi6+v7x37wxARkdjS0tIAAGpqaiJHQrnF3xO5x6QiC2zyVHzxvSMioqKGf5uKH75nucekgoiIiIiI8oRJRSny008/4Y8//hA7DABfrgAcOHBA7vLHjx+Hs7MzR88gIiIqAaytrbFo0aI81xMVFQWJRIKwsDAAQHBwMCQSicwongcOHICdnR2UlZUxevTobNdR3rCjtpxaBRws1OOd8O+Yq/K+vr7YvHkzgC/jK1eoUAHe3t6YOHEiVFRUcPPmTQQGBmLlypWZ9t2xYwd69+6NwYMHY/ny5fkS/1fTpk3DgQMHhP/sX8XGxqJMmTJy1+Pu7o7Jkydj27Zt+Omnn/I1RiIiIvofX19fvH37NlcX/3LrypUr0NbWFp5LJBLs378fnp6eeaq3QYMGiI2Nhb6+vrBu0KBB6NOnD0aOHAldXd1s11He8E5FCeLu7o7Y2FhERERg7NixmDZtGubOnQsAWLp0Kbp27QodHZ1M+61fvx4///wzduzYgY8fPxZKrKamplBXV8/VPr6+vliyZEkBRURERFQ0RUREwN/fHz169IC/vz8iIiLEDinPjI2NC2RgHDU1NZn5Jd6/f48XL16gdevWMDc3h66ubpbrKO+YVJQg6urqMDU1hZWVFYYMGQI3NzccOnQIaWlp2LNnD9q3b59pn8jISFy4cAG//PILKleujH379slsnzZtGpydnWXWLVq0CNbW1sLz4OBg1K1bF9ra2jAwMEDDhg3x33//YdOmTZg+fTpu3rwJiUQCiUSCTZs2AZBt/vT11uW+ffvQrFkzaGlpwcnJCaGhoTLHbd++Pa5evYqgoKAS9+VKRESUlY0bN8Le3h5z587F7t27MXfuXNjb2wt/T8Vw9uxZ1K1bF+rq6jAzM8Mvv/yC1NRUYfu7d+/Qq1cvaGtrw8zMDAsXLkTTpk1lmhllbP709TdFp06dIJFIZH5jfOvy5cuoWbMmNDQ0ULt2bdy4cUNme8bmT8HBwULC0Lx5c0gkkmzXUd4xqSjBNDU1kZKSglu3biEhIQG1a9fOVGbjxo1o27Yt9PX10bt3b6xfvz5Xx0hNTYWnpyeaNGmCW7duITQ0FAMHDoREIoGXlxfGjh2LqlWrIjY2FrGxsfDy8sq2rkmTJmHcuHEICwtD5cqV0aNHD5kvqQoVKkBPTw8tW7YsUl+uREREuZGUlJTtkrHFQEREBPr374/09HSkpaXJ/NuvXz/cuXNHrnrz09OnT+Hh4YE6derg5s2bWLlyJdavX4/ffvtNKOPn54d//vkHhw4dwsmTJxESEoLr169nW+eVK1cAfPlNEhsbKzz/1vv379GuXTs4Ojri2rVrmDZtGsaNG5dtvQ0aNMCDBw8AAHv37kVsbGy26yjv2KeiBJJKpQgKCsLff/+NESNG4L///oOysjJMTExkyqWnp2PTpk1YunQpAKB79+4YO3YsIiMjYWNjI9exEhMTkZCQgHbt2qFixYoAAAcHB2G7jo4OVFRUYGpq+t26xo0bh7Zt2wIApk+fjqpVq+LRo0ewt7cH8OXLNTExEcD/xv7+ql+/fmjUqBEsLCzkipuIiEgsWTVF/srDwwNHjx4FAGzYsCHbAUrS09Ph4eGB6OhoYZ21tTVevnyZqaxUKs1jxP+zYsUKWFpaYtmyZZBIJLC3t8ezZ88wYcIETJkyBUlJSdi8eTO2b9+OFi1aAPiSLJibm2dbp7GxMQDAwMAgx98L27dvR3p6OtavXw8NDQ1UrVoVMTExGDJkSJbl1dTUhN8+hoaGQt1ZraO8452KEuTIkSPQ0dGBhoYG2rRpAy8vL0ybNg0fPnyAurp6pjGXT548iaSkJHh4eAAAjIyM0LJlS2zYsEHuYxoaGsLX1xetW7dG+/btsXjxYsTGxioUf40aNYTHZmZmAL7MZvlVTnFJJJJc32UhIiIqyqKionLcXlj9IDO6d+8eXF1dZX5TNGzYEO/fv0dMTAz+/fdffP78GXXr1hW26+vro0qVKvly7Bo1akBDQ0NY5+rqmud6KX/wTkUJ0qxZM6xcuRJqamowNzeHisqXt9fIyAjJyclISUmRmdVz/fr1eP36tTAdPfDlysetW7cwffp0KCkpQUlJKdMVjm9nl9y4cSNGjhyJ48ePY9euXfj1119x8uRJ1K9fP1fxq6qqCo+/flllvEKT05erVCr97pcvERFRUfD+/ftstykrKwuPra2toaysnOnu/NdyPj4+Muv4d5DExDsVJYi2tjbs7OxQoUIFIaEAIHS0Dg8PF9a9evUKBw8exM6dOxEWFiYsN27cwJs3b3DixAkAX25JxsXFySQW3w4PCwA1a9aEv78/Lly4gGrVqmH79u0Avtx6zOrLUBE5NW36XscuIiKiokJbWzvbJeNV+L59+2bbdEkqlWLQoEFy1ZufHBwcEBoaKhPXP//8A11dXVhYWMDW1haqqqoy/SISEhLw8OHDHOtVVVX97u8FBwcH3Lp1S+YOzcWLFxU8E8pvTCpKAWNjY9SqVQvnz58X1v35558oW7YsunXrhmrVqgmLk5MTPDw8hKZETZs2RXx8PObMmYPHjx9j+fLlOHbsmFBPZGQk/P39ERoaiv/++w8nTpxARESE0K/C2toakZGRCAsLw8uXL/Hp0yeFz8PJySnbbenp6ejbt6/CdRMRERU1lSpVwvr166GkpARlZWWZf9evXw87O7sCO3ZCQoLMRcewsDA8efIEQ4cOxZMnTzBixAjcv38fBw8exNSpU+Hn5wclJSXo6urCx8cH48ePx5kzZ3D37l3069cPSkpKmZphZ2RtbY2goCDExcXhzZs3WZbp2bMnJBIJBgwYgPDwcAQGBmLevHkF9RJQLjGpKCX69++Pbdu2Cc83bNggDN32rS5duuDQoUN4+fIlHBwcsGLFCixfvhxOTk64fPmyzEgLWlpauH//Prp06YLKlStj4MCBGDZsmHD1pEuXLnB3d0ezZs1gbGyMHTt2KHwOISEhaNq0qcyXqpLSl4+wVCrFrl27FK6biIioKPL19cWDBw8wfvx4dOvWDePHj8eDBw/g6+tboMcNDg5GzZo1ZZbp06ejfPnyCAwMxOXLl+Hk5ITBgwejX79++PXXX4V9FyxYAFdXV7Rr1w5ubm5o2LAhHBwcZO7CfGv+/Pk4efIkLC0tUbNmzSzL6Ojo4PDhw7h9+zZq1qyJSZMmYfbs2fl+7qQYiTQ/hwQoBmJiYmBpaYknT55kak7z8eNHYeSjnD74xdGHDx9QpUoV7Nq1q1h2anr58iWqVKmCq1evIi0tDevXr0dUVBSsra2hrq6O6dOnAwAOHDiAypUrl8j3kIiIipeS/LsiN5KSklC+fHnMnz8f/fr1EzscueT03uX0W7I0Y0ftUkJTUxNbtmzJcqi54iAqKgorVqwQhroNCAiQ2Z6cnIynT5+iadOmePbsmRghEhEREYAbN27g/v37qFu3LhISEjBjxgwAQMeOHUWOjAoSk4pSpGnTpmKHoLDatWtnOXnfV7NmzYJEIslTnw0iIiLKH/PmzcODBw+gpqYGFxcXhISEwMjISOywqAAxqaAS4WvfCuBLp+3Zs2dj4MCBwnwXREREVDhq1qyJa9euiR0GFTJ21KYSZ968eZg2bRratWuX41jgRERERJQ/mFRQidO9e3cYGRnh+vXr6N69O1JTU8UOiYiIiKhEY1JBJU6FChWwZ88eaGho4OjRoxg5cmS2kwcREREVNP4NKn7S09PFDqHYYZ8KKpHq1auHbdu24ccff8TKlSthY2OD8ePHix0WERGVIqqqqpBIJIiPj4exsXGOk79R0SCVSpGSkoL4+HgoKSlBTU1N7JCKDSYVVGJ17twZCxYswJgxY/Dzzz/DysoK3bp1EzssIiIqJZSVlWFhYYGYmBhERUWJHQ7lgpaWFipUqCAzEAzljEkFlWijR49GZGQkVq9ezStERERU6HR0dFCpUiV8/vxZ7FBITsrKylBRUeHvhlxiUlGK/PTTT3BwcMDEiRPzve5p06bhwIEDCAsLAwD4+vri7du3OHDgQJ7rTklJQeXKlbFnz54c56rIzoIFCzBgwABUq1Ytz7EQERHllrKyMpSVlcUOg6hAMamQ04slboV6PJORp3JV3tfXF5s3bwbwpQ1nhQoV4O3tjYkTJ0JFRQU3b95EYGAgVq5cKezTtGlTnD17FgCgpqYGIyMj1KpVC3369EHnzp3z72TySE1NDePGjcOECRMQFBSU6/2VlZVlEoqYmBioq6vD2Ng4P8MkIiIiKrXYUKwEcXd3R2xsLCIiIjB27FhMmzYNc+fOBQAsXboUXbt2hY6Ojsw+AwYMQGxsLB4/foy9e/fC0dER3bt3x8CBA8U4hWz16tUL58+fx927d/NUz61bt1C/fn106NABHz58yKfoiIiIiEo3JhUliLq6OkxNTWFlZYUhQ4bAzc0Nhw4dQlpaGvbs2YP27dtn2kdLSwumpqawsLBA/fr1MXv2bKxevRpr167FqVP/u1syYcIEVK5cGVpaWrC1tcXkyZNz1T70+PHjaNSoEQwMDFC2bFm0a9cOjx8/FranpKRg+PDhMDMzg4aGBqysrBAQECBsL1OmDBo2bIidO3cq+Op8oaamhuTkZFy8eBG9evVCWlpanuojIiIiIiYVJZqmpiZSUlJw69YtJCQkyN0fwcfHB2XKlMG+ffuEdbq6uti0aRPCw8OxePFirF27FgsXLpQ7lqSkJPj5+eHq1asICgqCkpISOnXqJIwDvWTJEhw6dAi7d+/GgwcPsG3bNlhbW8vUUbduXYSEhMh9zKzY29vj4MGDUFNTw/79+zFu3Lg81UdERERE7FNRIkmlUgQFBeHvv//GiBEj8N9//0FZWRkmJiZy7a+kpITKlSvLDH/366+/Co+tra0xbtw47Ny5Ez///LNcdXbp0kXm+YYNG2BsbIzw8HBUq1YN0dHRqFSpEho1agSJRAIrK6tMdZibm+O///6T63g5ady4MTZv3owePXpg0aJFsLGxwciRI/NcLxEREVFpxTsVJciRI0ego6MDDQ0NtGnTBl5eXpg2bRo+fPgAdXX1XA2NJpVKZcrv2rULDRs2hKmpKXR0dPDrr78iOjpa7voiIiLQo0cP2NraQk9PT7gL8bUOX19fhIWFoUqVKhg5ciROnDiRqQ5NTU0kJyfLfcycdO/eHbNmzQLwZdjZ/BilioiIiKi0YlJRgjRr1gxhYWGIiIjAhw8fsHnzZmhra8PIyAjJyclISUmRq560tDRERETAxsYGABAaGopevXrBw8MDR44cwY0bNzBp0iS56wOA9u3b4/Xr11i7di0uXbqES5cuAYBQR61atRAZGYmZM2fiw4cP6NatG3788UeZOl6/fp2vIzb9/PPPGDRoEKRSKWbPni00xSIiIiKi3GHzpxJEW1sbdnZ2mdY7OzsDAMLDw4XHOdm8eTPevHkjNFm6cOECrKysMGnSJKFMbpohvXr1Cg8ePMDatWvRuHFjAMD58+czldPT04OXlxe8vLzw448/wt3dHa9fv4ahoSEA4M6dO6hZs6bcx/0eiUSCZcuWwczMDGPGjOGsmUREREQK4q+oUsDY2Bi1atXK8od8cnIy4uLiEBMTg4sXL2LChAkYPHgwhgwZgmbNmgEAKlWqhOjoaOzcuROPHz/GkiVLsH//frmPX6ZMGZQtWxZr1qzBo0ePcPr0afj5+cmUWbBgAXbs2IH79+/j4cOH+Ouvv2BqagoDAwOhTEhICFq1aqXYi5ANFRUVTJ06FXp6esK61NTUfD0GERERlV4BAQGoU6cOdHV1YWJiAk9PTzx48CDHfTZt2gSJRCKzaGhoyJSRSqWYMmUKzMzMoKmpCTc3N0RERBTkqeSISUUp0b9/f2zbti3T+rVr18LMzAwVK1ZE586dER4ejl27dmHFihVCmQ4dOmDMmDEYPnw4nJ2dceHCBUyePFnuYyspKWHnzp24du0aqlWrhjFjxgjzZ3ylq6uLOXPmoHbt2qhTpw6ioqIQGBgo3D0IDQ1FQkJCpiZR+UkqlWLu3Llo0aIFPn78WGDHISIiotLj7NmzGDZsGC5evIiTJ0/i8+fPaNWqFZKSknLcT09PD7GxscLybSuROXPmYMmSJVi1ahUuXboEbW1ttG7dWrTfMBKpVCoV5cgiiYmJgaWlJZ48eQILCwuZbR8/fkRkZCRsbGwyZYPF3YcPH1ClShXs2rULrq6uYoeTa15eXnBycsLEiRNzLJeX9/DZs2dwcHBAYmIiunfvjm3btrFJFBEREcnI6bekPOLj42FiYoKzZ8/ihx9+yLLMpk2bMHr0aLx9+zbL7VKpFObm5hg7dqwwPH5CQgLKlSuHTZs2oXv37rmOK6/4i6mU0NTUxJYtW/Dy5UuxQ8m1lJQUVK9eHWPGjCnQ45ibm2Pfvn1QUVHBzp07ZfqQEBEREWX07t07JCYmCsunT5/k2i8hIQEAhD6j2Xn//j2srKxgaWmJjh074u7du8K2yMhIxMXFwc3NTVinr6+PevXqITQ0VIGzyTsmFaVI06ZNs5xVu6hTU1PDr7/+Ck1NzQI/VosWLbBu3ToAwKxZs7BmzZoCPyYREREVP46OjtDX1xeWgICA7+6Tnp6O0aNHo2HDhqhWrVq25apUqYINGzbg4MGD2Lp1K9LT09GgQQPExMQAAOLi4gAA5cqVk9mvXLlywrbCxtGfiL7h4+ODqKgoTJs2DUOHDoWFhQU8PDzEDouIiIiKkPDwcJQvX154rq6u/t19hg0bhjt37mQ5eE5Grq6uMs3VGzRoAAcHB6xevRozZ85UPOgCxDsVRFmYMmUKfH19kZaWhm7duuHp06dih0RERERFiK6uLvT09ITle0nF8OHDceTIEZw5cybXfTFUVVVRs2ZNPHr0CABgamoKAHj+/LlMuefPnwvbChuTCqIsSCQSrFmzBu7u7pgzZ47MlQgiIiIieUmlUgwfPhz79+/H6dOnhcmFcyMtLQ23b9+GmZkZAMDGxgampqYICgoSyiQmJuLSpUuiDcjD5k9E2VBVVcXRo0c5AhQREREpbNiwYdi+fTsOHjwIXV1doc+Dvr6+0F/U29sb5cuXF/plzJgxA/Xr14ednR3evn2LuXPn4r///kP//v0BfLn4OXr0aPz222+oVKkSbGxsMHnyZJibm8PT01OU82RSQZSDjAnF69evMXv2bMycORNqamoiRkVERETFxcqVKwF8GTAno40bN8LX1xcAEB0dLfOb482bNxgwYADi4uJQpkwZuLi44MKFC3B0dBTK/Pzzz0hKSsLAgQPx9u1bNGrUCMePHxdtWgTOU5FBSZ6norQoqPcwPT0d9erVw9WrV+Ht7S3MdElERESlS17nqSipikS7juXLl8Pa2hoaGhqoV68eLl++nG1ZeaYtJ8pvSkpKmDFjBpSVlbFlyxZMnz5d7JCIiIiIigzRk4pdu3bBz88PU6dOxfXr1+Hk5ITWrVvjxYsX2e7zvWnLKWs//fQT/vjjD7HD+K5Vq1YVyfk02rRpgxUrVgAApk+fjk2bNokbEBEREVERIXqfigULFmDAgAHo06cPgC8/KI8ePYoNGzbgl19+yXIfiURS6MNluZ/0L9TjHW/5/QlUMvL19cXmzZsBfOlgXKFCBXh7e2PixIlQUVHBzZs3ERgYiJUrVyIqKuq7Iw9kbOdX2Pr27YuZM2ciJCQEjRs3FiWG7AwcOBBRUVEICAjAgAEDYGFhITObJREREVFpJOqdipSUFFy7dk3mR5mSkhLc3NxynGI8p2nLv/Xp0yeZKdTfvXuXr+dQlLi7uyM2NhYREREYO3Yspk2bhrlz5wIAli5diq5du0JHRweWlpYyd3rGjh2LqlWryqzz8vIS7TzU1NTQs2dPLFmyRLQYcvLbb7+hR48eSE1NRZcuXXD79m2xQyIiIiISlahJxcuXL5GWlparKca/N235twICAmSmUM/Ya76kUVdXh6mpKaysrDBkyBC4ubnh0KFDSEtLw549e4QmRcrKyjA1NRUWHR0dqKioCM9NTEywaNEi2NjYQFNTE05OTtizZw+AL2Mtu7m5oXXr1vjax//169ewsLDAlClTAHwZS7lfv37C/lWqVMHixYtlYg0ODkbdunWhra0NAwMDNGzYUKYZW/v27XHo0CF8+PChMF66XFFSUsLGjRvxww8/wNDQECoqot/wIyIiIhKV6H0qcsvV1RXe3t5wdnZGkyZNsG/fPhgbG2P16tVZlvf390dCQoKwhIeHF3LE4tHU1ERKSgpu3bqFhIQE1K5dW679AgICsGXLFqxatQp3797FmDFj0Lt3b5w9exYSiQSbN2/GlStXhDsJgwcPRvny5YWkIj09HRYWFvjrr78QHh6OKVOmYOLEidi9ezcAIDU1FZ6enmjSpAlu3bqF0NBQDBw4UGY0pdq1ayM1NRWXLl3K51clf6irq2P//v0IDQ2Fg4OD2OEQERERiUrUS6xGRkZQVlbO0xTj305b/i11dXWZadMTExMVD7iYkEqlCAoKwt9//40RI0bgv//+g7KyMkxMTL6776dPn/DHH3/g1KlTwoyMtra2OH/+PFavXo0mTZqgfPnyWL16Nby9vREXF4fAwEDcuHFDuGKvqqoqMzqSjY0NQkNDsXv3bnTr1g2JiYlISEhAu3btULFiRQDI9MNcS0sL+vr6RboTvqGhoczza9euoUaNGlBVVRUpIiIiIiJxiHqnQk1NDS4uLjJTjKenpyMoKEjuKca/nba8NDty5Ah0dHSgoaGBNm3awMvLC9OmTcOHDx+grq4u17wKjx49QnJyMlq2bAkdHR1h2bJlCx4/fiyU69q1Kzp16oRZs2Zh3rx5qFSpkkw9y5cvh4uLC4yNjaGjo4M1a9YgOjoawJcf476+vmjdujXat2+PxYsXIzY2NlMsmpqaSE5OzuOrUji2bdsGV1dXDBs2DKVs6hciIiIi8Ud/8vPzg4+PD2rXro26deti0aJFSEpKEkaDyu205aVZs2bNsHLlSqipqcHc3Fy4c2BkZITk5GSkpKR8dybo9+/fAwCOHj2K8uXLy2zLeMcnOTkZ165dg7KyMiIiImTK7dy5E+PGjcP8+fPh6uoKXV1dzJ07V6Yp08aNGzFy5EgcP34cu3btwq+//oqTJ0+ifv36QpnXr1/D2NhYsRejkOnq6iItLQ1r166FjY0N/P0Ld7QwIiIiIjGJnlR4eXkhPj4eU6ZMQVxcHJydnXH8+HGh87Yi05aXVtra2rCzs8u03tnZGQAQHh4uPM6Oo6Mj1NXVER0djSZNmmRbbuzYsVBSUsKxY8fg4eGBtm3bonnz5gCAf/75Bw0aNMDQoUOF8hnvcnxVs2ZN1KxZE/7+/nB1dcX27duFpOLx48f4+PEjatas+b3TLhI6dOiAxYsXY8SIEZg4cSKsrKzQs2dPscMiIiIiKhSiJxUAMHz4cAwfPjzLbcHBwTLPFy5ciIULFxZCVCWHsbExatWqhfPnz383qdDV1cW4ceMwZswYpKeno1GjRkhISMA///wDPT09+Pj4CPOIhIaGolatWhg/fjx8fHxw69YtlClTBpUqVcKWLVvw999/w8bGBn/++SeuXLkizI0RGRmJNWvWoEOHDjA3N8eDBw8QEREBb29vIY6QkBDY2toKfS6Kg+HDhyMyMhILFixAnz59YGFhgR9++EHssIiIiIgKXLEb/YkU079/f2zbtk2usjNnzsTkyZMREBAABwcHuLu74+jRo7CxsUF8fDz69euHadOmoVatWgC+zC5drlw5DB48GAAwaNAgdO7cGV5eXqhXrx5evXolc9dCS0sL9+/fR5cuXVC5cmUMHDgQw4YNw6BBg4QyO3bswIABA/LxFSgcc+fORZcuXZCSkgJPT0/cv39f7JCIiIiICpxEWsp6lcbExMDS0hJPnjyBhYWFzLaPHz8iMjISNjY20NDQECnCgvHhwwdUqVIFu3btkrsTvFju3r2L5s2b4+HDh9DX18/VvkXhPfzw4QNatGiB0NBQ/Pzzz5g9e7YocRAREVH+y+m3ZGlWJJo/UcHT1NTEli1b8PLlS7FD+a7Y2Fhs2bIl1wlFUaGpqSlMzjh69GixwyEiIiIqcEwqSpGmTZuKHYJc3NzcxA4hz4yNjTFmzBjheVpaGoAvs5kTERERlTTsU0FUwJKTk9G1a1eMHj2ac1gQERFRicQ7FUQF7Pz589i/fz+AL7OL+/n5iRwRERERUf7inQqiAtaqVSvMnTsXADBu3Djs3btX5IiIiIiI8heTiiykp6eLHQIpqKi+d2PHjsWwYcMglUrRu3dvhIaGih0SERERUb5h86cM1NTUoKSkhGfPnsHY2BhqamqQSCRih0VykEqlSElJQXx8PJSUlKCmpiZ2SDIkEgkWL16M6OhoHD58GB06dEBoaGiWM6ATERERFTdMKjJQUlKCjY0NYmNj8ezZM7HDIQVoaWmhQoUKUFIqejfhlJWVsWPHDjRt2hRXr15Fly5dcOPGjSIZKxEREVFuMKn4hpqaGipUqIDU1FRhGFAqHpSVlaGiolKk7y5pa2sLdyqWLFnChIKIiIhKBCYVWZBIJFBVVYWqqqrYoVAJZGpqikuXLhXp5IeIiIgoN3iZlEgEGROKGzdu4PfffxcxGiIiIqK84Z0KIhE9f/4cTZo0wbt372BgYIBhw4aJHRIRERFRrvFOBZGIypUrhwkTJgAARo4cicOHD4scEREREVHuMakgEtnEiRPRv39/pKeno3v37rh69arYIRERERHlCpMKIpFJJBKsWLECrVu3RnJyMtq1a4eoqCixwyIiIiKSG5MKoiJAVVUVu3fvRo0aNfD8+XN4eHjgzZs3YodFREREJBcmFURFhJ6eHgIDA2FhYQFra2uoqHAcBSIiIioe+KuFqAgpX748zp8/j/LlyzOpICIiomKDdyqIihgrKyshoZBKpTh37pzIERERERHljEkFUREllUoxZMgQNGnSBOvWrRM7HCIiIqJsMakgKqIkEglMTEwAAIMHD8bff/8tckREREREWWNSQVSETZ8+HT/99BPS0tLw448/4ubNm2KHRERERJQJkwqiIkwikWDdunVo1qwZ3r9/j7Zt2yImJkbssIiIiIhkMKkgKuLU1NSwb98+ODo64unTp2jbti0SExPFDouIiIhIwKSCqBgwMDBAYGAgTE1NcefOHY4IRUREREUKB8InKiasrKxw5MgRYcZtIiIioqKCSQVRMeLi4iLz/NOnT1BXVxcpGiIiIqIv2PyJqJh6/PgxatSogT///FPsUIiIiKiUY1JBVExt3boVDx8+RL9+/XD69GmxwyEiIqJSjEkFUTE1efJkdOvWDZ8/f0bnzp1x9+5dsUMiIiKiUopJBVExpaSkhM2bN6NRo0ZISEiAh4cHYmNjxQ6LiIiISiEmFUTFmIaGBg4cOIDKlSsjOjoa7dq1w/v378UOi4iIiP5fQEAA6tSpA11dXZiYmMDT0xMPHjzIcZ+1a9eicePGKFOmDMqUKQM3NzdcvnxZpoyvry8kEonM4u7uXpCnkiMmFUTFXNmyZREYGAhjY2Ncv34d48aNEzskIiIi+n9nz57FsGHDcPHiRZw8eRKfP39Gq1atkJSUlO0+wcHB6NGjB86cOYPQ0FBYWlqiVatWePr0qUw5d3d3xMbGCsuOHTsK+nSyJZFKpVLRji6CmJgYWFpa4smTJ7CwsBA7HKJ8c/HiRfj7+2PXrl0wMTEROxwiIqISKa+/JePj42FiYoKzZ8/ihx9+kGuftLQ0lClTBsuWLYO3tzeAL3cq3r59iwMHDuQ6hoLAOxVEJUT9+vVx+vRpJhRERESF4N27d0hMTBSWT58+ybVfQkICAMDQ0FDuYyUnJ+Pz58+Z9gkODoaJiQmqVKmCIUOG4NWrV/KfQD5jUkFUgkgkEuHxxo0bsXv3bhGjISIiKrkcHR2hr68vLAEBAd/dJz09HaNHj0bDhg1RrVo1uY81YcIEmJubw83NTVjn7u6OLVu2ICgoCLNnz8bZs2fRpk0bpKWlKXQ+ecUZtYlKoGPHjqFv375QU1ODubk5GjVqJHZIREREJUp4eDjKly8vPFdXV//uPsOGDcOdO3dw/vx5uY8za9Ys7Ny5E8HBwdDQ0BDWd+/eXXhcvXp11KhRAxUrVkRwcDBatGghd/35hXcqiEqgVq1awdPTEykpKejYseN3R5kgIiKi3NHV1YWenp6wfC+pGD58OI4cOYIzZ87I3Rdj3rx5mDVrFk6cOIEaNWrkWNbW1hZGRkZ49OiR3OeQn5hUEJVAysrK2LZtG+rWrYvXr1/Dw8MDL168EDssIiKiUkcqlWL48OHYv38/Tp8+DRsbG7n2mzNnDmbOnInjx4+jdu3a3y0fExODV69ewczMLK8hK4RJBVEJpaWlhcOHD8PW1hb//vsvOnTogOTkZLHDIiIiKlWGDRuGrVu3Yvv27dDV1UVcXBzi4uLw4cMHoYy3tzf8/f2F57Nnz8bkyZOxYcMGWFtbC/t8nYvq/fv3GD9+PC5evIioqCgEBQWhY8eOsLOzQ+vWrQv9HAEmFUQlmomJCQIDA2FoaIhLly6hd+/eonXgIiIiKo1WrlyJhIQENG3aFGZmZsKya9cuoUx0dDRiY2Nl9klJScGPP/4os8+8efMAfGmRcOvWLXTo0AGVK1dGv3794OLigpCQELn6dhQEdtQuwiIiIuDj44OXL19CX18fmzZtQtWqVbMsK5VK0aJFC1y/fh1v374F8CWL7dKlC65du4bU1FRh/bd8fX2xefNmvHnzBgYGBgVzMiSaKlWq4ODBg3Bzc4OzszOUlHgtgYiIqLDIMyVccHCwzPOoqKgcy2tqauLvv//OQ1T5j78uirBBgwZh4MCBePjwISZMmABfX99syy5cuBAVK1aUWaeqqooJEybg1KlT2e63b98+qKqq5lfIVEQ1atQIDx8+xJQpU2SGnSUiIiLKD0wqiqgXL17g6tWr6N27NwCgS5cuePLkSZY9+u/evYsDBw7gl19+kVmvrq6O5s2bZ3v34fnz5/jjjz+wYMGCfI+fip4KFSoIj5OSkvDPP/+IGA0RERGVJEwqiqgnT57AzMwMKipfWqhJJBJUqFAB0dHRMuU+f/6MAQMGYPXq1VBWVs7VMQYMGIA5c+ZAV1c33+Kmou/169do2rQp3NzccOnSJbHDISIiohKASUUxN336dHTu3BkODg652m/dunWoUKECmjdvXkCRUVGlp6cHExMTfPz4Ee3bt8fjx4/FDomIiIiKOSYVRZSlpSViY2ORmpoK4Esnn+joaJkmLABw9uxZLF26FNbW1mjUqBESExNhbW2N+Pj4HOs/c+YMDh48CGtra1hbWwMAatSogRs3bhTI+VDRoaKigl27dqFmzZqIj4+Hh4cHXr16JXZYREREVIxx9KciysTEBLVq1cLWrVvh6+uLvXv3wsLCAnZ2djLlQkJChMdRUVFwdnb+7ogBALBt2zaZ5xKJBLdu3eLoT6WEjo4Ojhw5AldXVzx8+BCenp44efIkNDQ0xA6NiIiIiiHeqSjCVq9ejdWrV6Ny5cqYNWsWNm7cCADo378/Dh06JFcdNWrUgKurKxITE2FhYYGffvqpIEOmYsTc3ByBgYHQ19fH+fPn4evri/T0dLHDIiIiomJIIpVn8NwSJCYmBpaWlnjy5AksLCzEDodIdKdPn4a7uzsMDAxw+fJloTkcERERZcbfkllj8yeiUq558+bYtWsXnJycmFAQERGRQphUEBE6deok8zw5ORlaWloiRUNERETFDftUEJGMw4cPw8bGBtevXxc7FCIiIiommFQQkUAqlWL58uV48eIF2rZtm2myRSIiIqKssPlTIWoVcFDsEPLVCf+OYodA+UwikWDXrl1o3Lgxbt++DQ8PD5w/f55DDRMREVGOeKeCiGTo6+vj6NGjMDc3x927d9G5c2ekpKSIHRYREREVYUwqiCgTS0tLHD16FDo6Ojhz5gz69++PUjb6NBEREeUCkwoiypKzszP27NkDZWVl/Pnnn5lmYSciIiL6in0qiChbrVu3xqpVq3D9+nV0795d7HCIiIioiGJSQUQ56t+/v9ghEBERURFXJJo/LV++HNbW1tDQ0EC9evVw+fJlufbbuXMnJBIJPD09CzZAIgIAfP78GaNGjcKtW7fEDoWIiIiKENGTil27dsHPzw9Tp07F9evX4eTkhNatW+PFixc57hcVFYVx48ahcePGhRQpEU2bNg1LliyBh4cHnj59KnY4REREVESInlQsWLAAAwYMQJ8+feDo6IhVq1ZBS0sLGzZsyHaftLQ09OrVC9OnT4etrW0hRktUuo0bNw729vZ4+vQp2rZti8TERLFDIiIioiJA1KQiJSUF165dg5ubm7BOSUkJbm5uCA0NzXa/GTNmwMTEBP369SuMMIno/5UpUwaBgYEwMTHBzZs30a1bN3z+/FnssIiIiEhkoiYVL1++RFpaGsqVKyezvly5coiLi8tyn/Pnz2P9+vVYu3atXMf49OkTEhMTheXdu3d5jpuoNLOxscHRo0ehpaWFv//+G0OHDuUcFkRERKWc6M2fcuPdu3f46aefsHbtWhgZGcm1T0BAAPT19YXF0dGxgKMkKvlq166NnTt3QklJCevWrcOsWbPEDomIiIhEJGpSYWRkBGVlZTx//lxm/fPnz2Fqapqp/OPHjxEVFYX27dtDRUUFKioq2LJlCw4dOgQVFRU8fvw40z7+/v5ISEgQlvDw8AI7H6LSpH379liyZAl0dHRQq1YtscMhIiIiEYmaVKipqcHFxQVBQUHCuvT0dAQFBcHV1TVTeXt7e9y+fRthYWHC0qFDBzRr1gxhYWGwtLTMtI+6ujr09PSERVdXt0DPiag0GTZsGB48eIDWrVuLHQoRERGJSPTJ7/z8/ODj44PatWujbt26WLRoEZKSktCnTx8AgLe3N8qXL4+AgABoaGigWrVqMvsbGBgAQKb1RFQ4zM3NhccRERFITU2Fg4ODiBERERFRYRM9qfDy8kJ8fDymTJmCuLg4ODs74/jx40Ln7ejoaCgpFauuH0Sl0vXr19GqVSvo6Ojg4sWLWTZhJCIiopJJ9KQCAIYPH47hw4dnuS04ODjHfTdt2pT/ARFRrlWoUAFlypTBo0eP0K5dO5w9exba2tpih0VERESFgLcAiChfGBkZ4dixYyhbtiyuXbuGHj16IC0tTeywiIiIqBAwqSCifGNnZ4dDhw5BXV0dhw8fxujRozmHBRERUSnApIKI8lWDBg2wdetWSCQSLFu2DAsXLhQ7JCIiIipgTCqIKN/9+OOPmDt3LgBg3759SE1NFTkiIiIiKkhFoqM2EZU8fn5+MDQ0RPfu3aGiwq8aIiKikox3KoioQEgkEvTp0weamprCunfv3okYERERERUUJhVEVOCkUikmTZqEWrVq4eXLl2KHQ0RERPmMSQURFbi3b99i+/btePToETp06IAPHz6IHRIRERHlIyYVRFTgypQpg8DAQBgYGCA0NBQ//fQT0tPTxQ6LiIiI8gmTCiIqFA4ODjhw4ADU1NSwd+9e/Pzzz2KHRERERPmESQURFZomTZpg48aNAID58+dj+fLlIkdERERE+YFJBREVqp49e+L3338HAIwaNQqPHj0SOSIiIiLKKw4eT0SFzt/fH8+ePYOrqyvs7OzEDoeIiIjyiEkFERU6iUSCZcuWiR0GERER5RM2fyIi0cXGxqJnz554/fq12KEQERGRAphUEJGopFIpunXrhh07dqBz58749OmT2CERERHlm4CAANSpUwe6urowMTGBp6cnHjx48N39/vrrL9jb20NDQwPVq1dHYGCgzHapVIopU6bAzMwMmpqacHNzQ0REREGdxncxqSAiUUkkEixfvhy6uro4e/Ys+vbtyzksiIioxDh79iyGDRuGixcv4uTJk/j8+TNatWqFpKSkbPe5cOECevTogX79+uHGjRvw9PSEp6cn7ty5I5SZM2cOlixZglWrVuHSpUvQ1tZG69at8fHjx8I4rUwkUqlUKsqRRRITEwNLS0s8efIEFhYWhXrsVgEHC/V4Be2Ef0exQ6AS5OTJk/Dw8EBqaiomTpwojBBFRERUlOT1t2R8fDxMTExw9uxZ/PDDD1mW8fLyQlJSEo4cOSKsq1+/PpydnbFq1SpIpVKYm5tj7NixGDduHAAgISEB5cqVw6ZNm9C9e3fFTi4PeKeCiIqEli1bYu3atQCAP/74Q3hMRERUFL179w6JiYnCIm/z3YSEBACAoaFhtmVCQ0Ph5uYms65169YIDQ0FAERGRiIuLk6mjL6+PurVqyeUKWxMKoioyPD19cWUKVMAAEOGDMGJEydEjoiIiChrjo6O0NfXF5aAgIDv7pOeno7Ro0ejYcOGqFatWrbl4uLiUK5cOZl15cqVQ1xcnLD967rsyhQ2DilLREXKtGnTEBUVhatXr6JKlSpih0NERJSl8PBwlC9fXniurq7+3X2GDRuGO3fu4Pz58wUZmiiYVBBRkSKRSLB27VokJyfDwMBA7HCIiIiypKurCz09PbnLDx8+HEeOHMG5c+e+2xfD1NQUz58/l1n3/PlzmJqaCtu/rjMzM5Mp4+zsLHdMAPDx40doaGjkap+ssPkTERU5ampqMgnFiRMnhDaoRERExYlUKsXw4cOxf/9+nD59GjY2Nt/dx9XVFUFBQTLrTp48CVdXVwCAjY0NTE1NZcokJibi0qVLQpmcpKenY+bMmShfvjx0dHTw77//AgAmT56M9evX5+b0BEwqiKhIW79+Pdzd3fHjjz/i8+fPYodDRESUK8OGDcPWrVuxfft26OrqIi4uDnFxcfjw4YNQxtvbG/7+/sLzUaNG4fjx45g/fz7u37+PadOm4erVqxg+fDiAL3f1R48ejd9++w2HDh3C7du34e3tDXNzc3h6en43pt9++w2bNm3CnDlzoKamJqyvVq0a1q1bp9B5MqkgoiKtZs2a0NLSwqlTpzBw4ECUslGwiYiomFu5ciUSEhLQtGlTmJmZCcuuXbuEMtHR0YiNjRWeN2jQANu3b8eaNWvg5OSEPXv24MCBAzKdu3/++WeMGDECAwcORJ06dfD+/XscP35crqZMW7ZswZo1a9CrVy8oKysL652cnHD//n2FzpN9KoioSKtVqxZ2796N9u3bY9OmTbCxsRFGiCIiIirq5LkYFhwcnGld165d0bVr12z3kUgkmDFjBmbMmJHrmJ4+fQo7O7tM69PT0xVuFcA7FURU5Hl4eGD58uUAgKlTp2LLli0iR0RERFR8OTo6IiQkJNP6PXv2oGbNmgrVyTsVRFQsDB48GJGRkZgzZw769esHCwsLNG/eXOywiIiIip0pU6bAx8cHT58+RXp6Ovbt24cHDx5gy5YtMrN45wbvVBBRsREQEAAvLy+kpqbi5MmTYodDRERULHXs2BGHDx/GqVOnoK2tjSlTpuDevXs4fPgwWrZsqVCdvFNBRMWGkpISNm3ahI4dO6J79+5ih0NERFRsNW7cOF8v0PFOBREVKxoaGujRowckEgkAICUlBUlJSSJHRUREVHxcuXIFly5dyrT+0qVLuHr1qkJ1MqkgomIrISEBHh4e6NatG1JTU8UOh4iIqFgYNmwYnjx5kmn906dPMWzYMIXqZFJBRMXWo0ePcOHCBQQGBmL48OGcw4KIiEgO4eHhqFWrVqb1NWvWRHh4uEJ1MqkgomLLxcUF27dvh0QiwerVqzFnzhyxQyIiIiry1NXV8fz580zrY2NjoaKiWJdrJhVEVKx5enpi4cKFAIBffvkFO3fuFDkiIiKioq1Vq1bw9/dHQkKCsO7t27eYOHGiwqM/MakgomJv1KhRGD16NADAx8cnywl9iIiI6It58+bhyZMnsLKyQrNmzdCsWTPY2NggLi4O8+fPV6hOJhVEVCLMmzcPnTp1QkpKCry9vfH582exQyIiIiqSypcvj1u3bmHOnDlwdHSEi4sLFi9ejNu3b8PS0lKhOjlPBRGVCMrKyti6dSt8fHwwefJkqKqqih0SERFRkaWtrY2BAwfmW31MKoioxNDS0sJff/0ldhhERERFXkREBM6cOYMXL14gPT1dZtuUKVNyXR+TCiIqsUJCQrBx40asXbsWysrKYodDRERUJKxduxZDhgyBkZERTE1NhQllAUAikRReUnHmzBk0a9ZMkV2JiArF27dv0a5dOyQmJkJPTw+LFi0SOyQiIqIi4bfffsPvv/+OCRMm5FudCnXUdnd3R8WKFfHbb79lORsfEZHYDAwMsGbNGgDA4sWLsXjxYpEjIiIiKhrevHmDrl275mudCiUVT58+xfDhw7Fnzx7Y2tqidevW2L17N1JSUvI1OCKivPDy8sLs2bMBAGPGjMH+/ftFjoiIiEh8Xbt2xYkTJ/K1ToWaPxkZGWHMmDEYM2YMrl+/jo0bN2Lo0KEYOnQoevbsiX79+sHJySlfAyUiUsT48eMRGRmJVatWoWfPnjhz5gzq168vdlhERESisbOzw+TJk3Hx4kVUr14904iJI0eOzHWdEqlUKs1rYM+ePcOaNWswa9YsqKio4OPHj3B1dcWqVatQtWrVvFafr2JiYmBpaYknT57AwsKiUI/dKuBgoR6voJ3w7yh2CERySU1NhaenJ44ePQojIyNcuXIF1tbWYodFRETFkJi/JfOLjY1NttskEgn+/fffXNep8OhPnz9/xsGDB7FhwwacPHkStWvXxrJly9CjRw/Ex8fj119/RdeuXREeHq7oIYiI8oWKigp27tyJJk2awM7ODqampmKHREREJJrIyMh8r1OhpGLEiBHYsWMHpFIpfvrpJ8yZMwfVqlUTtmtra2PevHkwNzfPt0CJiPJCR0cHQUFB0NPTg5KSQt3JiIiISpSUlBRERkaiYsWKUFHJ20wTCv1lDQ8Px9KlS/Hs2TMsWrRIJqH4ysjICGfOnMlTcERE+cnAwEBIKNLT07F9+/ZME/4QERGVdMnJyejXrx+0tLRQtWpVREdHA/hy42DWrFkK1alQUjF16lR07doV6urqMutTU1Nx7tw5AF+aGzRp0kShoIiIClqfPn3Qq1cv+Pv7ix0KERFRofL398fNmzcRHBwMDQ0NYb2bmxt27dqlUJ0KJRXNmjXD69evM61PSEjgpHhEVCy0aNECADBnzhysWrVK5GiIiIgKz4EDB7Bs2TI0atRIZjbtqlWr4vHjxwrVqVBSIZVKZQL46tWrV9DW1lYoECKiwuTt7Y0ZM2YAAIYNG4ajR4+KHBEREVHhiI+Ph4mJSab1SUlJWf7Gl0euemR07twZwJehpnx9fWWaP6WlpeHWrVto0KCBQoEQERW2X3/9FVFRUdiwYQO8vLxw9uxZuLi4iB0WERFRgapduzaOHj2KESNGAICQSKxbtw6urq4K1ZmrpEJfXx/AlzsVurq60NTUFLapqamhfv36GDBggEKBEBEVNolEglWrVuHJkyc4efIk2rVrh4sXL8LKykrs0IiIiArMH3/8gTZt2iA8PBypqalYvHgxwsPDceHCBZw9e1ahOnOVVGzcuBEAYG1tjXHjxrGpExEVe6qqqtizZw8aN26M+/fv49atW0wqiIioRGvUqBFu3ryJgIAAVK9eHSdOnECtWrUQGhqK6tWrK1SnQgPSTp06VaGDEREVRXp6ejh69CgiIyPRuHFjscMhIiIqMJ8/f8agQYMwefJkrF27Nt/qlTupqFWrFoKCglCmTBnUrFkzx04c169fz5fgiIgKi4WFBSwsLITnr169gqGhocId1oiIiIoiVVVV7N27F5MnT87XeuVOKjp27Ch0zPb09MzXIIiIipLbt2/Dw8MDffr0EUaIIiIiKik8PT1x4MABjBkzJt/qlDupyNjkic2fiKgku3LlCmJiYjBz5kxYW1ujb9++YodERESUbypVqoQZM2bgn3/+gYuLS6Z+0iNHjsx1nQr1qSAiKsn69u2Lx48f448//sCgQYNgaWmJli1bih0WERFRvli/fj0MDAxw7do1XLt2TWabRCIp2KSiTJkycrctzmq2bSKi4uS3335DVFQUtm/fji5duuD8+fOoUaOG2GERERHlWWRkZL7XKXdSsWjRonw/OBFRUSWRSLBhwwY8e/YMwcHB8PDwwKVLl1C+fHmxQyMiIsoXKSkpiIyMRMWKFaGikrcGTHLv7ePjk6cDEREVN+rq6ti3bx8aNmyIe/fuYdy4cdixY4fYYREREeVJcnIyRowYgc2bNwMAHj58CFtbW4wYMQLly5fHL7/8kus6leQtmJiYKPM4pyW3li9fDmtra2hoaKBevXq4fPlytmX37duH2rVrw8DAANra2nB2dsaff/6Z62MSEcmjTJkyCAwMRI8ePbBq1SqxwyEiIsozf39/3Lx5E8HBwdDQ0BDWu7m5YdeuXQrVmas+FbGxsTAxMYGBgUGW/SukUikkEgnS0tLkDmDXrl3w8/PDqlWrUK9ePSxatAitW7fGgwcPYGJikqm8oaEhJk2aBHt7e6ipqeHIkSPo06cPTExM0Lp1a7mPS0QkL2tra2zfvl3sMIiIiPLFgQMHsGvXLtSvX1/mN33VqlXx+PFjheqUO6k4ffo0DA0NAQBnzpxR6GBZWbBgAQYMGIA+ffoAAFatWoWjR49iw4YNWd56adq0qczzUaNGYfPmzTh//jyTCiIqFIsWLUJycjImTpwodihERES5Fh8fn+XF+6SkJIUnfZU7qWjSpEmWj/MiJSUF165dg7+/v7BOSUkJbm5uCA0N/e7+UqkUp0+fxoMHDzB79ux8iYmIKCchISHCZEFWVlbo1auXyBERERHlTu3atXH06FGMGDECAIREYt26dXB1dVWoToW7eb958wbr16/HvXv3AACOjo7o06ePcDdDHi9fvkRaWhrKlSsns75cuXK4f/9+tvslJCSgfPny+PTpE5SVlbFixYpsx5D/9OkTPn36JDx/9+6d3PEREX2rcePGGDduHObNm4c+ffqgfPnyme6gEhERFWV//PEH2rRpg/DwcKSmpmLx4sUIDw/HhQsXcPbsWYXqlLujdkbnzp2DtbU1lixZgjdv3uDNmzdYsmQJbGxscO7cOYUCyQ1dXV2EhYXhypUr+P333+Hn54fg4OAsywYEBEBfX19YHB0dCzw+IirZZs+eja5du+Lz58/o1KmTcHGFiIioOGjUqBHCwsKQmpqK6tWr48SJEzAxMUFoaChcXFwUqlMilUqlud2pevXqcHV1xcqVK6GsrAwASEtLw9ChQ3HhwgXcvn1brnpSUlKgpaWFPXv2wNPTU1jv4+ODt2/f4uDBg3LV079/fzx58gR///13pm3f3ql4+vQpHB0d8eTJE1hYWMhVf35pFSDf+RQXJ/w7ih0CkWg+fvyIFi1a4MKFC7CyssLFixdhamoqdlhERFTAYmJiYGlpKcpvybzw8/PDzJkzoa2tjXPnzqFBgwZ5npsiI4XuVDx69Ahjx44VEgoAUFZWhp+fHx49eiR3PWpqanBxcUFQUJCwLj09HUFBQblqz5Weni6TOGSkrq4OPT09YdHV1ZW7XiKi7GhoaODgwYOoVKkS/vvvP7Rr1w4pKSlih0VERJSlpUuX4v379wCAZs2a4fXr1/lav0JJRa1atbK83X/v3j04OTnlqi4/Pz+sXbsWmzdvxr179zBkyBAkJSUJo0F5e3vLdOQOCAjAyZMn8e+//+LevXuYP38+/vzzT/Tu3VuRUyEiUpiRkRECAwNhYmICb29vqKmpiR0SEREVMefOnUP79u1hbm4OiUSCAwcO5Fje19cXEokk01K1alWhzLRp0zJtt7e3z7Her10Xzp49C6lUitDQUJw7dy7LRRFy3/O4deuW8HjkyJEYNWoUHj16hPr16wMALl68iOXLl2PWrFm5CsDLywvx8fGYMmUK4uLi4OzsjOPHjwudt6Ojo6Gk9L/cJykpCUOHDkVMTAw0NTVhb2+PrVu3wsvLK1fHJSLKD3Z2doiIiICenp7YoRARURGUlJQEJycn9O3bF507d/5u+cWLF8v8nk5NTYWTkxO6du0qU65q1ao4deqU8Px7TZnmzp2LwYMHIyAgABKJBJ06dcqyXG7nnBP2k7dPhZKSEiQSCb5XXNFACouY7eDYp4Ko5Hv79i1OnDiBbt26iR0KEREVgLz8lpRIJNi/f79MX+LvOXDgADp37ozIyEhYWVkB+HKn4sCBAwgLC8vV8QHg/fv30NPTy3aiaQDQ19fPdb1y36mIjIzMdeVERKVJYmIiGjZsiPDwcEgkkkxXlYiIqOR49+4dEhMThefq6upQV1fP9+OsX78ebm5uQkLxVUREBMzNzaGhoQFXV1cEBASgQoUK2dbztaO2jo4Ozpw5AxsbG3E6altZWcm9EBGVRrq6umjevDkA4KeffsI///wjckRERFRQHB0dZaYtCAgIyPdjPHv2DMeOHUP//v1l1terVw+bNm3C8ePHsXLlSkRGRqJx48Y5zseWsaN28+bN872jdp7Sk/DwcERHR2ca8aRDhw55CoqIqDiSSCRYtGgRoqOjcejQIXTs2BGhoaGoVKmS2KEREVE+Cw8PR/ny5YXnBXGXYvPmzTAwMMjUXKpNmzbC4xo1aqBevXqwsrLC7t270a9fvyzr+tpRu1WrVkJH7TJlymRZ9ocffsh1rAolFf/++y86deqE27dvy/Sz+DrFd1HuU0FEVJCUlZWxfft2NGvWDFeuXEGbNm0QGhoKY2NjsUMjIqJ8pKurW6CDdEilUmzYsAE//fTTd0cXNDAwQOXKlXOc2qGgO2orNKTsqFGjYGNjgxcvXkBLSwt3797FuXPnULt27WxntiYiKi20tbVx+PBh2NjY4PHjx+jQoQM+fPggdlhERFSMnD17Fo8ePcr2zkNG79+/x+PHj2FmZpZtGU9PT8TFxSExMRFSqRQPHjzAmzdvMi2KNotS6E5FaGgoTp8+DSMjIygpKUFJSQmNGjVCQEAARo4ciRs3bigUDBFRSVGuXDkEBgaiQYMGePbsGWJjY2Frayt2WEREVMjev38vcwchMjISYWFhMDQ0RIUKFeDv74+nT59iy5YtMvutX78e9erVQ7Vq1TLVOW7cOLRv3x5WVlZ49uwZpk6dCmVlZfTo0eO78RRUR22FakpLSxNmpjYyMsKzZ89QpUoVWFlZ4cGDB/kWHBFRcWZvb49jx46hQoUKOV49IiKikuvq1ato1qyZ8NzPzw8A4OPjg02bNiE2NhbR0dEy+yQkJGDv3r1YvHhxlnXGxMSgR48eePXqFYyNjdGoUSNcvHgxx6a2iYmJQnOtmjVrIjk5OduyijTrUiipqFatGm7evAkbGxvUq1cPc+bMgZqaGtasWcMrcUREGdSrV0/meVxcHExNTUWKhoiIClvTpk1znOdt06ZNmdbp6+vn+KN/586duY6jTJkyiI2NhYmJCQwMDIS+0BlJpVKF+1QolFT8+uuvSEpKAgDMmDED7dq1Q+PGjVG2bFns2rVLkSqJiEq87du3o1+/fti5cyc6duTkkUREVHhOnz4NQ0NDAMCZM2fyvX6FkorWrVsLj+3s7HD//n28fv0aZcqUyTLrISKiL53uPn78iB49eiA4OBh169YVOyQiIiolmjRpkuXj/JLn3hlPnjwBAFhaWuY5GCKikmz58uWIjo7G8ePH0b59e1y8eBE2NjZih0VERKXArVu35C5bo0aNXNevUFKRmpqK6dOnY8mSJcLMfDo6OhgxYgSmTp0KVVVVRaolIirRVFRUsHv3bvzwww8ICwuDh4cHLly4kO3kQ0RERPnF2dlZmF/uey2LCm2eihEjRmDNmjWYM2cObty4gRs3bmDOnDlYv349Ro4cqUiVRESlgq6uLo4ePQoLCwvcv38fnTp1wqdPn8QOi4iISrjIyEj8+++/iIyMxN69e2FjY4MVK1YIv+VXrFiBihUrYu/evQrVr9Cdiu3bt2Pnzp2Zpgi3tLREjx49sHLlSoWCISIqDczNzREYGIhGjRrh7Nmz2LBhA4YMGSJ2WEREVIJZWVkJj7t27YolS5bAw8NDWPf1t/zkyZPh6emZ6/oVSirU1dVhbW2dab2Njc13pxEnIiKgevXq2Lt3L86ePYtBgwaJHQ4REZUit2/fzrJPn42NDcLDwxWqU6HmT8OHD8fMmTNlbtl/+vQJv//+O4YPH65QIEREpY2bmxtmzpwJJSWFvoqJiIgU4uDggICAAKSkpAjrUlJSEBAQAAcHB4XqlPtORefOnWWenzp1ChYWFnBycgIA3Lx5EykpKWjRooVCgRARlWYfPnxA37594e3tLdO0lIiIKL+tWrUK7du3h4WFhTDS061btyCRSHD48GGF6pQ7qdDX15d53qVLF5nnHFKWiEhxixYtws6dO3HkyBGcO3cONWvWFDskIiIqoerWrYt///0X27Ztw/379wEAXl5e6NmzJ7S1tRWqU+6kYuPGjQodgIiIvm/s2LE4deoUTp8+jbZt2+LixYuoUKGC2GEREVEJpa2tjYEDB+ZbfXlqyBsfH4/z58/j/PnziI+Pz6+YiIhKHTU1NezduxdVq1ZFbGws2rZti4SEBLHDIiIikotCSUVSUhL69u0LMzMz/PDDD/jhhx9gbm6Ofv36ITk5Ob9jJCIqFQwMDBAYGAgzMzPcuXMHXbp0kelER0REVFQplFT4+fnh7NmzOHz4MN6+fYu3b9/i4MGDOHv2LMaOHZvfMRIRlRoVKlTA0aNHoa2tjaCgIIwYMULskIiIiL5LoaRi7969WL9+Pdq0aQM9PT3o6enBw8MDa9euxZ49e/I7RiKiUqVmzZrYvXs3ypUrB29vb7HDISIi+i6Fkork5GSUK1cu03oTExM2fyIiygceHh54/PgxGjZsKHYoRERUwtja2uLVq1eZ1r99+xa2trYK1alQUuHq6oqpU6fi48ePwroPHz5g+vTpcHV1VSgQIiKSlXFYv5s3byI4OFi8YIiIqMSIiopCWlpapvWfPn3C06dPFapT7iFlM1q0aBHc3d0zTX6noaGBv//+W6FAiIgoa9evX0fTpk0hkUjwzz//oFq1amKHRERExdChQ4eEx3///bfMPHRpaWkICgqCtbW1QnUrlFRUr14dERERMhNm9OjRA7169YKmpqZCgRARUdYcHR3h7OyMkJAQeHh44OLFizA3Nxc7LCIiKmY8PT0BABKJBD4+PjLbVFVVYW1tjfnz5ytUd66Tis+fP8Pe3h5HjhzBgAEDFDooERHJT0NDAwcOHECDBg3w4MEDtGvXDufOnYOOjo7YoRERUTGSnp4OALCxscGVK1dgZGSUb3Xnuk+FqqqqTF8KIiIqeIaGhggMDISxsTFu3LiBbt26ITU1VeywiIioGIqMjMzXhAJQsPnTsGHDMHv2bKxbtw4qKgpVQUREuWRra4sjR46gadOmOHbsGIYNG4ZVq1ZBIpGIHRoRERVxS5YswcCBA6GhoYElS5bkWHbkyJG5rl+hjODKlSsICgrCiRMnUL16dZkRSgBg3759ilRLRETfUbduXezYsQOdOnVCZGQkPn36BA0NDbHDIiKiIm7hwoXo1asXNDQ0sHDhwmzLSSSSwksqDAwM0KVLF0V2JSKiPOrYsSNOnDiBJk2aQFVVVexwiIioGIiMjMzycX7JVVKRnp6OuXPn4uHDh0hJSUHz5s0xbdo0jvhERFTI3NzchMdSqRRPnz6FhYWFiBEREVFplquk4vfff8e0adPg5uYGTU1NLFmyBPHx8diwYUNBxUdERDlITU3FqFGjsGPHDly4cAH29vZih0REREWcn59fluslEgk0NDRgZ2eHjh07wtDQUO46c5VUbNmyBStWrMCgQYMAAKdOnULbtm2xbt06KCkpNDk3ERHlwefPn3Ht2jW8efMGHh4eCA0NRbly5cQOi4iIirAbN27g+vXrSEtLQ5UqVQAADx8+hLKyMuzt7bFixQqMHTsW58+fh6Ojo1x15ioTiI6OhoeHh/Dczc0NEokEz549y001RESUTzQ1NXHo0CHY2toiMjISHTp0QHJysthhERFREdaxY0e4ubnh2bNnuHbtGq5du4aYmBi0bNkSPXr0wNOnT/HDDz9gzJgxcteZq6QiNTU10ygjqqqq+Pz5c26qISKifGRiYoJjx47B0NAQly9fRs+ePZGWliZ2WEREVETNnTsXM2fOhJ6enrBOX18f06ZNw5w5c6ClpYUpU6bg2rVrcteZq+ZPUqkUvr6+UFdXF9Z9/PgRgwcPlhlWlkPKEhEVrsqVK+PQoUNo0aIFDh48CD8/PyxevFjssIiIqAhKSEjAixcvMjVtio+PR2JiIoAvo72mpKTIXWeu7lT4+PjAxMQE+vr6wtK7d2+Ym5vLrCMiosLXsGFD/PnnnwCAZcuW4ebNmyJHRERERVHHjh3Rt29f7N+/HzExMYiJicH+/fvRr18/eHp6AgAuX76MypUry11nru5UbNy4MVcBExFR4eratSsWLVoEW1tbODk5iR0OEREVQatXr8aYMWPQvXt3pKamAgBUVFTg4+MjTIxnb2+PdevWyV2nQpPfERFR0TVq1CiZ51KpFBKJRKRoiIioqNHR0cHatWuxcOFC/PvvvwAAW1tb6OjoCGWcnZ1zVSfHgSUiKsEiIyPRpEkTPH78WOxQiIioiNHR0YGhoSEMDQ1lEgpFMKkgIirBhg8fjpCQELRp0wYvX74UOxwiIioC0tPTMWPGDOjr68PKygpWVlYwMDDAzJkzkZ6erlCdTCqIiEqwdevWoUKFCoiIiICnpyc+fvwodkhERCSySZMmYdmyZZg1axZu3LiBGzdu4I8//sDSpUsxefJkhepkUkFEVIKZmZkhMDAQ+vr6+Oeff+Dt7a3wVSgiIioZNm/ejHXr1mHIkCGoUaMGatSogaFDh2Lt2rXYtGmTQnUyqSAiKuGqVq2K/fv3Q1VVFX/99Rd++eUXsUMiIiIRvX79Gvb29pnW29vb4/Xr1wrVyaSCiKgUaNasGTZs2ADgy0yqil6JIiKi4s/JyQnLli3LtH7ZsmUKD0fOIWWJiEqJ3r17IyoqCgcPHoS7u7vY4RARkUjmzJmDtm3b4tSpU3B1dQUAhIaG4smTJwgMDFSoTt6pICIqRSZNmoSQkBCYmpqKHQoREYmkSZMmePjwITp16oS3b9/i7du36Ny5Mx48eIDGjRsrVCfvVBARlSISiQQaGhrC8507d6J+/fqwtrYWLygiIip05ubm+P3332XWxcTEYODAgVizZk2u6+OdCiKiUmrt2rXo0aMHPDw88ObNG7HDISIikb169Qrr169XaF8mFUREpVSbNm1Qvnx53Lt3D507d8anT5/EDomIiIopJhVERKWUhYUFjh49Cl1dXQQHB6N///54+PAh/P390aNHD/j7+yMiIkLsMImIirVz586hffv2MDc3h0QiwYEDB3IsHxwcDIlEkmmJi4uTKbd8+XJYW1tDQ0MD9erVw+XLlwvwLL6PSQURUSnm5OSEv/76C8rKyti6dSvs7e0xd+5c7N69G3PnzoW9vT2HnyUiyoOkpCQ4OTlh+fLludrvwYMHiI2NFRYTExNh265du+Dn54epU6fi+vXrcHJyQuvWrfHixYv8Dl9u7KhNRFTKtW7dGjNmzMCkSZMglUqRlpYms71fv35o1KgR7OzsRIqQiKj4atOmDdq0aZPr/UxMTGBgYJDltgULFmDAgAHo06cPAGDVqlU4evQoNmzYkOMEp507d87xmG/fvs11nF/xTgUREeHdu3eQSCRZbpNIJAp33CMiKqnevXuHxMREYcnvfmnOzs4wMzNDy5Yt8c8//wjrU1JScO3aNbi5uQnrlJSU4ObmhtDQ0Bzr1NfXz3GxsrKCt7e3QvHyTgURESEqKgoSiQRSqTTTNqlUiqioqMIPioioCHN0dJR5PnXqVEybNi3P9ZqZmWHVqlWoXbs2Pn36hHXr1qFp06a4dOkSatWqhZcvXyItLQ3lypWT2a9cuXK4f/9+jnVv3Lgxz/Flh0kFERHB2to62zsVAGBlZVWI0RARFX3h4eEoX7688FxdXT1f6q1SpQqqVKkiPG/QoAEeP36MhQsX4s8//8yXYxQENn8iIiL07ds3y7sUAJCeno6goKDvXgEjIipNdHV1oaenJyz5lVRkpW7dunj06BEAwMjICMrKynj+/LlMmefPn8PU1LTAYvgeJhVERIRKlSph/fr1UFJSgrKysvCvRCKBmpoarl69CicnJ8ycORMpKSlih0tEVKqEhYXBzMwMAKCmpgYXFxcEBQUJ279e/HF1dRUrRDZ/IiKiL3x9fdGoUSOsX78eUVFRsLa2Rr9+/aCqqoqhQ4ciMDAQU6ZMwa5du7B27VpR/3gRERUX79+/F+4yAEBkZCTCwsJgaGiIChUqwN/fH0+fPsWWLVsAAIsWLYKNjQ2qVq2Kjx8/Yt26dTh9+jROnDgh1OHn5wcfHx/Url0bdevWxaJFi5CUlCSMBiUGJhVERCSws7NDQEBApvVHjhzBzp07MWrUKNy9exd//vknkwoiIjlcvXoVzZo1E577+fkBAHx8fLBp0ybExsYiOjpa2J6SkoKxY8fi6dOn0NLSQo0aNXDq1CmZOry8vBAfH48pU6YgLi4Ozs7OOH78eKbO24VJIs2uEW0hWr58OebOnYu4uDg4OTlh6dKlqFu3bpZl165diy1btuDOnTsAABcXF/zxxx/Zlv9WTEwMLC0t8eTJE1hYWOTbOcijVcDBQj1eQTvh31HsEIiokL169QozZ87E9OnToa+vDwBITk6GlpaWyJERERUOMX9LFmWi96nI7YyAwcHB6NGjB86cOYPQ0FBYWlqiVatWePr0aSFHTkRU+pQtWxaLFi0SEgqpVIr27duja9euiI2NFTk6IiISi+hJRcYZAR0dHbFq1SpoaWlhw4YNWZbftm0bhg4dCmdnZ9jb22PdunVC5xQiIipc169fx9mzZ7Fnzx44ODhg7dq1SE9PFzssIiIqZKImFXmZEfCr5ORkfP78GYaGhgUVJhERZcPFxQVXr16Fi4sLEhISMHDgQDRr1gwPHjwQOzQiIipEoiYVOc0IGBcXJ1cdEyZMgLm5uUxiktGnT59kplB/9+5dnuMmIqL/cXZ2xsWLFzF//nxoaWnh3LlzcHJywu+//47Pnz+LHR4RERUC0Zs/5cWsWbOwc+dO7N+/HxoaGlmWCQgIgL6+vrB8O6U6ERHlnYqKCvz8/HDnzh20bt0anz59wo4dO7KdUI+IiEoWUZOKvMwIOG/ePMyaNQsnTpxAjRo1si3n7++PhIQEYQkPD8+X2ImIKDMbGxscO3YMW7duxbp166CmpgYA+Pz5M96/fy9ydEREVFBETSoUnRFwzpw5mDlzJo4fP47atWvneAx1dXWZKdR1dXXzLX4iIspMIpGgV69eqF+/vrBu3rx5cHR0xNGjR0WMjIiICorozZ/8/Pywdu1abN68Gffu3cOQIUNkZgT09vaGv7+/UH727NmYPHkyNmzYAGtra8TFxSEuLo5XwIiIiqjU1FRs3boVT548Qbt27dCjR49Md6iJiKh4Ez2p8PLywrx58zBlyhQ4OzsjLCxMZkbA6OhombHPV65ciZSUFPz4448wMzMTlnnz5ol1CkRElAMVFRVcvnwZ48aNg5KSEnbu3AkHBwds3LiRfS6IiEqIIjGjdmHijNr5hzNqE1FuXbt2DQMGDMCNGzcAAM2bN8e6detgY2MjcmRERPLhjNpZE/1OBRERlR4uLi64fPky5syZA01NTZw/fx6fPn0SOywiIsojJhVERFSoVFRUMH78eNy5cwebN2+Gvb29sO3p06ciRkZERIpiUkFERKKwtbVF9+7dheeXLl2CjY0N/Pz8OPgGEVExw6SCiIiKhMDAQHz+/BkLFy5EtWrVcPz4cbFDIiIiOTGpICKiImH69OkIDAyElZUV/vvvP7Rp0wa9evVCfHy82KEREdF3MKkgIqIio02bNrhz5w5Gjx4NJSUlbN++Hfb29ti1a5fYoRERUQ6YVBARUZGio6ODhQsX4uLFi6hRowZev36Nd+/eiR0WERHlgEkFEREVSXXq1MHVq1exefNm9OvXT1j/6NEjpKamihgZERF9i0kFEREVWaqqqvD29oZEIgEAvHv3Ds2aNUO9evVw/fp1kaMjIqKvmFQQEVGxcefOHSQlJeH69euoW7cufv75ZyQnJ4sdFhFRqcekgoiIig1XV1fcu3cPXl5eSEtLw9y5c1G9enWcOnVK7NCIiEo1JhVERFSslCtXDjt37sSRI0dgaWmJf//9Fy1btoSPjw8+f/4sdnhERKUSkwoiIiqW2rZti7t372LkyJGQSCT4+PEjVFVVxQ6LiKhUUhE7ACIiIkXp6upi8eLF6NmzJ6ysrIT1z58/x4cPH2BtbS1ecEREpQjvVBARUbFXr149mJqaCs9HjBiBqlWrYsGCBRx+loioEDCpICKiEiU5ORnPnz9HcnIyxo4dC1dXV9y8eVPssIiISjQmFUREVKJoaWnhzJkzWLNmDfT19XH16lW4uLjgl19+wYcPH8QOj4ioRGJSQUREJY6SkhIGDBiAe/fuoUuXLkhLS8Ps2bNRvXp1hIeHix0eEVGJw6SCiIhKLDMzM+zZswcHDhxA+fLlkZKSAktLS7HDIiIqcZhUEBFRidexY0eEh4fj8OHD0NXVBQCkp6fjxIkTkEqlIkdHRFT8MakgIqJSQU9PD05OTsLzDRs2oHXr1mjfvj2io6NFjIyIqPhjUkFERKVSYmIi1NTUcPToUTg6OmLJkiVIS0sTOywiomKJSQUREZVKfn5+CAsLQ8OGDZGUlIRRo0ahYcOGuH37ttihEREVO0wqiIio1HJwcMC5c+ewcuVK6Onp4dKlS6hVqxZWrVoldmhERMUKkwoiIirVlJSUMHjwYISHh6NTp05IS0tDzZo1xQ6LiKhYYVJBREQEoHz58ti3bx/CwsJQr149Yf3Ro0fx5s0bESMjIir6mFQQERFlUKNGDeHx/fv30blzZzg4OGDPnj0cfpaIKBtMKoiIiLKRlJQEGxsbPH/+HF27doWnpydiYmLEDouIqMhhUkFERJQNFxcXhIWFYfLkyVBVVcWhQ4fg6OiI5cuXIz09XezwiIiKDCYVREREOdDQ0MCMGTNw48YN1K9fH+/evcPw4cPRsmVLNociIvp/TCqIiIjkULVqVZw/fx5Lly6Fjo4OWrZsCYlEInZYRERFgorYARARERUXysrKGD58ODw9PVGuXDlh/dWrV/Hx40c0atRIxOiIiMTDOxVERES5ZGFhAVVVVQBASkoKfHx80LhxYwwZMgQJCQkiR0dEVPiYVBAREeVBSkoKGjZsCABYtWoVHB0dsX//fpGjIiIqXEwqiIiI8kBHRwdr1qxBcHAwKlWqhGfPnqFz587o3Lkznj17JnZ4RCSyc+fOoX379jA3N4dEIsGBAwdyLL9v3z60bNkSxsbG0NPTg6urK/7++2+ZMtOmTYNEIpFZ7O3tC/Asvo9JBRERUT5o0qQJbt26hUmTJkFFRQX79++Hg4MDHj58KHZoRAqLiIhAgwYNULlyZdSpUwd3797NVCYqKgpNmzaFvr4+nJ2dZbadPn0adevWhaOjI6pWrYqff/5ZGI45KioKysrKcHZ2FpbHjx8XxmkVqqSkJDg5OWH58uVylT937hxatmyJwMBAXLt2Dc2aNUP79u1x48YNmXJVq1ZFbGyssJw/f74gwpcbO2oTERHlEw0NDfz222/w8vJC//79oaOjg0qVKokdFpHCBg0ahIEDB8LX1xd79uyBr68vrly5IlNGT08Pv/32GxISEjBp0iSZbWXKlMHOnTtha2uLjx8/ws3NDVu2bIGvry8AQFdXF2FhYYV0NuJo06YN2rRpI3f5RYsWyTz/448/cPDgQRw+fBg1a9YU1quoqMDU1DS/wswz3qkgIiLKZ9WrV8eFCxewe/duYdjZhIQEzJkzB58+fRI5OiL5vHjxAlevXkXv3r0BAF26dMGTJ0/w6NEjmXKGhoZo1KgRtLW1M9VRs2ZN2NraAviSdDs7OyMqKqrAYy8M7969Q2JiorAU1P/t9PR0vHv3DoaGhjLrIyIiYG5uDltbW/Tq1QvR0dEFcnx5MakgIiIqAMrKyihbtqzw3N/fHxMmTECtWrVw4cIFESMjks+TJ09gZmYGFZUvDVskEgkqVKig8I/XuLg47NmzB+3atRPWJSUloU6dOqhVqxZmzJiBtLS0fIm9MDg6OkJfX19YAgICCuQ48+bNw/v379GtWzdhXb169bBp0yYcP34cK1euRGRkJBo3box3794VSAzyYFJBRERUCJo0aQJjY2OEh4ejUaNGGDZsGBITE8UOi6hQJCYmon379vj5559Ru3ZtAICZmRmePn2KK1eu4NSpUwgJCcH8+fNFjlR+4eHhSEhIEBZ/f/98P8b27dsxffp07N69GyYmJsL6Nm3aoGvXrqhRowZat26NwMBAvH37Frt37873GOTFpIKIiKgQeHl54d69e/D19YVUKsWKFSvg6OiIQ4cOiR0aUZYsLS0RGxuL1NRUAIBUKkV0dDQqVKiQq3revXsHd3d3dOzYEX5+fsJ6dXV14YeyoaEh+vbti5CQkPw7gQKmq6sLPT09YVFXV8/X+nfu3In+/ftj9+7dcHNzy7GsgYEBKleunKlpWmFiUkFERFRIypYti40bN+LUqVOoWLEinj59io4dO2Lp0qVih0aUiYmJCWrVqoWtW7cCAPbu3QsLCwvY2dnJXcf79+/h7u4Od3d3/PrrrzLbXrx4gc+fPwMAPn36hH379sl0RC7NduzYgT59+mDHjh1o27btd8u/f/8ejx8/hpmZWSFElzUmFURERIWsRYsWuHXrFiZMmAATExN4eXmJHRJRllavXo3Vq1ejcuXKmDVrFjZu3AgA6N+/v3CXLTk5GRYWFujatSvCw8NhYWEhNAVavHgxLl++jH379gnDxv7+++8AgPPnz6NmzZpwcnJCrVq1YGpqmmn0qJLg/fv3CAsLE0a5ioyMRFhYmNA3xd/fH97e3kL57du3w9vbG/Pnz0e9evUQFxeHuLg4JCQkCGXGjRuHs2fPIioqChcuXECnTp2grKyMHj16FOq5ZSSRSqVS0Y4ugpiYGFhaWuLJkyewsLAo1GO3CjhYqMcraCf8O4odApHCIiIi4OPjg5cvX0JfXx+bNm1C1apVZcpERUXB19cXN27cgI2Njcywhzlt+0oqlaJFixa4fv063r59W7AnRMXWu3fvoKurKzyfMWMGvLy8UKVKFRGjIqLs5Pa3ZHBwMJo1a5ZpvY+PDzZt2gRfX19ERUUhODgYANC0aVOcPXs22/IA0L17d5w7dw6vXr2CsbExGjVqhN9//x0VK1bM07nlBZOKQsSkgqjoaN68Oby9vYWx12fPnp1p7PXXr18LHfEmTZokkzjktO2rBQsW4N69e/jrr7+YVJBc9u/fj86dO0NdXR2TJ0/G+PHjoaamJnZYRJSBmL8lizI2fyKiUic/xl7PaRsA3L17FwcOHMAvv/yS/ydAJVbNmjXh7u6OT58+4ddff4WLiwsuXbokdlhERN/FpIKISp38Hnv9W58/f8aAAQOwevVqKCsr50udVDpYW1sjMDAQ27Ztg5GREe7cuQNXV1eMHDlS1PHniYi+h0kFEVE+mz59Ojp37gwHBwexQ6FiSCKRoGfPnrh37x68vb0hlUqxdOlSuUaAISISC5MKIip18mvs9eycPXsWS5cuhbW1NRo1aoTExERYW1sjPj4+X+qn0sHIyAibN2/GiRMnYGNjg4kTJ4odEhFRtlTEDoCIqLBlHHvd19dXobHXc5Jx8qaoqCg4OzsjKioqX+qm0qdly5a4f/++TIftzZs3Iz09Hb6+vpBIJCJGR8WN+8n8n/VZTMdbBogdAv0/3qkgolIpr2Ov57SNKL9lTChiY2MxcuRI9O3bF25ubqLOoEtE9BXvVBBRqVSlShWEhoZmWr9u3TrhsZaWFmJiYrLcP6dtGVlbW3M4WcpXxsbG+PXXXzF16lScPn0a1atXx9SpUzF27FioqqqKHR4RlVK8U0FERFSMqKioYPz48bhz5w7c3Nzw8eNH+Pv7o06dOrh69arY4RFRKcWkgoiIirWIiAg0aNAAlStXRp06dXD37t1MZaKiotC0aVPo6+vD2dk50/b169ejUqVKqFixIgYMGIDPnz/LtU1Mtra2OHHiBDZv3gxDQ0PcvHkTDRo0wLNnz8QOjYhKISYVRERUrA0aNAgDBw7Ew4cPMWHCBPj6+mYqo6enh99++w3bt2/PtC0yMhKTJ09GSEgIHj16hOfPn2PNmjXf3VYUSCQSeHt74/79++jVqxdGjBgBc3NzscMiolKISQURERVb+TE7+p49e9ChQweYmppCIpFg8ODB2LFjx3e3FSXGxsbYunUr5s6dK6wLDw+Hj48PhzImokLBpIKIiIqt/JgdPTo6GlZWVsJza2trYf+cthVFSkpf/qxLpVIMHjwYW7Zsgb29PTZv3gypVCpydERUknH0JyIqEVoFHBQ7hHx1wr+j2CFQMSaRSDB//nwMGDAAN2/ehK+vL7Zu3YpVq1ahYsWKYodHRCUQ71QQEVGxlR+zo1eoUAH//fef8DwqKkrYP6dtRV2dOnVw5coVBAQEQENDA6dOnUL16tUxd+5c4fUiIsovTCqIiKjYyjg7OgCFZkfv0qULDh06hLi4OEilUqxatQrdu3f/7rbiQFVVFb/88gtu376NZs2a4cOHD/j555+xefNmsUMjohKGSQURERVreZ0d3dbWFtOnT0fDhg1hZ2cHY2NjDBo06LvbihM7OzsEBQVhw4YNcHNzg4+Pj9ghEVEJI5GWsp5bMTExsLS0xJMnT2BhYVGox2abb6KCw/9fRPKRSqWQSCQAgI8fP6Jz584YM2YMWrZsKXJkVBjcT/qLHUK+Ot4yoNCPKeZvyaKMdyqIiIhKka8JBQAsXrwYx44dQ6tWreDj44OXL1+KGBkRFWeiJxXLly+HtbU1NDQ0UK9ePVy+fDnbsnfv3kWXLl1gbW0NiUSCRYsWFV6gREREJczQoUMxcuRISCQSbNmyBQ4ODti2bRuHnyWiXBM1qdi1axf8/PwwdepUXL9+HU5OTmjdujVevHiRZfnk5GTY2tpi1qxZMDU1LeRoiYiIShZdXV0sXrwYFy5cQLVq1fDy5Uv07t0bHh4eiIqKEjs8IipGRE0qFixYgAEDBqBPnz5wdHTEqlWroKWlhQ0bNmRZvk6dOpg7dy66d+8OdXX1Qo6WiIioZKpfvz6uXbuG3377Derq6jh+/DhGjx4tdlhEVIyIllSkpKTg2rVrcHNz+18wSkpwc3NDaGhovh3n06dPSExMFJZ3797lW91EREQlhZqaGiZNmoSbN2/C3d0dCxYsELaxORQRfY9oM2q/fPkSaWlpKFeunMz6cuXK4f79+/l2nICAAEyfPj3f6iMiooLF0WnEVaVKFRw7dkxm3fDhw6Grq4upU6dCU1NTpMiIqCgTvaN2QfP390dCQoKwhIeHix0SERFRsXH37l2sWLECs2fPRvXq1REUFCR2SERUBImWVBgZGUFZWRnPnz+XWf/8+fN87YStrq4OPT09YdHV1c23uomIiEq6qlWr4uDBgyhfvjweP34MNzc39O3bF69fvxY7NCIqQkRLKtTU1ODi4iJzxSM9PR1BQUFwdXUVKywiIiL6RocOHRAeHo5hw4ZBIpFg48aNcHBwwM6dO9nfgogAiNz8yc/PD2vXrsXmzZtx7949DBkyBElJSejTpw8AwNvbG/7+/2tbm5KSgrCwMISFhSElJQVPnz5FWFgYHj16JNYpEBERlQp6enpYtmwZzp8/D0dHR7x48QIjRoxAQkKC2KERUREgWkdtAPDy8kJ8fDymTJmCuLg4ODs74/jx40Ln7ejoaCgp/S/vefbsGWrWrCk8nzdvHubNm4cmTZogODi4sMMnIiIqdRo0aIDr169j9uzZqFSpEgwMDAB8GSFKKpXK/N0motJD1KQC+DKixPD/a+/Ow6Iq+/+BvwcI0ACFQDBkHBFBXGJRDFwCe0xQU0wrMc19AJfHkhJygwxLfrnFYxoY6FMXrrmSWT2EWZSU7BngQoFDxYCmBbiwzfn+wdX5NYErM3ME36/r4qpzn8+Z87mtu/jMue/7LFrU6rl/FgoKhYKPWYmIiCRmZmaG6Ohorbb9+/djw4YNeP/99zFw4ECJMiMiqfDrBCIiImoTjUaDVatW4fvvv4e3tzdWrlyJGzduSJ0WERkQiwoiIiJqEyMjI6Snp+OZZ55BY2Mj3nzzTXh4eOCrr76SOjUiMhAWFURERNRmjo6OOHjwIA4cOIDu3bvj3LlzCAgIgFKpxJUrV6ROj4j0jEUFERER6cykSZNQVFSEsLAwAEBSUhJycnIkzoqI9E3yhdpERETUsXTt2hUJCQmYNm0a0tLSMGrUKPFcfX09TE1NJcyOiPSBTyqIiIhIL0aMGIE33nhDPC4vL0evXr2wZcsWaDQaCTMjIl1jUUFEREQGsWXLFvz2229YtGgRhg8fjsLCQqlTIiIdYVFBREREBvHWW2/h3XffhaWlJTIzM+Hl5YXo6GjU1dVJnRoRtRGLCiIiIjIIIyMjLFy4EEVFRRg/fjwaGhoQGxsLDw8PZGRkSJ0eEbUBiwoiIiIyqB49euDIkSPYt28f7O3tcfbsWRw9elTqtIioDVhUEBERkcHJZDI899xzKC4uxmuvvYaYmBjxXE1NjYSZEdG9YFFBREREkrG2tsbatWvRuXNnAEBTUxOeeuopTJo0Cb/99pvE2RHRnWJRQWQA58+fx9ChQ+Hq6gofH5+b7niSnJyMPn36oHfv3lAqlWhoaAAAnDhxAp06dYKnp6f4c/36dfG606dPIyAgAO7u7nB3d8fBgwcN0i8iIl3LyspCTk4ODh06BHd3dyQkJHD7WaJ2gEUFkQGEhYUhNDQU586dQ1RUFGbNmtUiprS0FKtWrUJGRgZKSkpQWVmJbdu2iefd3NyQn58v/nTq1AkAcO3aNQQHB2PNmjUoLi7Gjz/+iBEjRhiqa0REOuXr64vc3FwMGTIE1dXVmD9/Pvz9/VFcXCx1akR0CywqiPSsqqoK2dnZmD59OgBg8uTJKC8vR0lJiVbc/v37MWHCBDg4OEAmkyE8PBy7d+++7efv2rULvr6+GD58OADA2NgYdnZ2uu8IEZGBDBw4ECdPnkR8fDwefvhhfPPNN/D09MQbb7whPsElovsLiwoiPSsvL0f37t1hYmICoHlxolwuh0ql0opTqVTo2bOneKxQKLRifvrpJ3h7e8PHxwdbt24V24uKimBmZoann34anp6emDFjBi5evKjnXhER6ZexsTEWL16MoqIijBs3DvX19fj4449hZMRfXah9+frrrzF+/Hg8+uijkMlkOHz48G2vOXHiBLy9vWFmZgYXFxf897//bRGzZcsWKBQKmJub4/HHH8epU6d0n/xd4Mgkage8vb3xyy+/IDc3F4cOHUJCQgL27dsHAGhsbMQXX3yBxMRE5OXlwdHREfPnz5c4YyIi3ZDL5fj444+xZ88eJCUlwdjYGABw/fp1VFdXS5wd0e1dvXoVHh4e2LJlyx3Fl5aWYty4cRg5ciTy8/Px8ssvY968efj888/FmL179yIiIgIxMTHIzc2Fh4cHAgMDUVVVpa9u3BaLCiI9c3JyQkVFBRobGwEAgiBApVJBLpdrxcnlcly4cEE8LisrE2OsrKzQpUsXAM37u0+dOlV8UZRcLsfIkSPh6OgImUyG6dOn47vvvjNE14iIDEImk2HKlCnw8PAQ22JjY9GvXz8cOXJEwsyIbm/MmDFYs2YNnnnmmTuKT0hIQK9evbBhwwa4u7tj0aJFePbZZ7Fp0yYxZuPGjVAqlZg9ezb69euHhIQEdO7cGdu3b9dXN26LRQWRnnXr1g3e3t5ISUkBABw4cAA9evSAi4uLVtzkyZORmpoKtVoNQRCQkJCAkJAQAEBFRYW4+0lNTQ2OHj0KLy8vAMDzzz+PrKws8Ru7Y8eOaf2Pl4ioo6mvr8ehQ4fw66+/YuLEiXj22WdRUVEhdVr0gKmpqUF1dbX4U1dXp5PPzczMxKhRo7TaAgMDkZmZCaD53/+cnBytGCMjI4waNUqMkQKLCiIDSExMRGJiIlxdXREXF4cdO3YAAObNm4fU1FQAgLOzM1avXo1hw4bBxcUFdnZ2CAsLA9BciAwcOBAeHh7w9fXFU089hdmzZwNoflKxfPlyDB06FI899hiOHz+OhIQEaTpKRGQApqamyM3NRVRUFIyNjXHgwAG4u7vj/fff5/azZDD9+vVDly5dxJ+1a9fq5HPVajXs7e212uzt7VFdXY3r16/j0qVLaGpqajVGrVbrJId7YSLZnYkeIG5ubq1+e5CUlKR1rFQqoVQqW8QtWrQIixYtuunnv/jii3jxxRfbnigRUTvRqVMnxMXFISQkBEqlEtnZ2QgNDUVKSgp27NgBZ2dnqVOkDq6oqAiOjo7isZmZmYTZSI9PKoiIiKjd8vT0RGZmJjZu3IjOnTsjJydHXMxNpE+WlpawsrISf3RVVDg4OKCyslKrrbKyElZWVujUqRNsbW1hbGzcaoyDg4NOcrgXLCqIiIioXTMxMcGSJUtQWFiInTt3am3PXVpaKmFmRHfPz88P6enpWm1paWnw8/MD0Dz9b9CgQVoxGo0G6enpYowUWFQQERFRh6BQKBAcHCwef/nll3BxccHixYtRU1MjYWb0IKutrUV+fj7y8/MBNBe6+fn54ruoli1bhhkzZojx4eHh+PnnnxEZGYkzZ85g69at2LdvH5YsWSLGRERE4P3338cHH3yA4uJizJ8/H1evXhXXW0qBRQURERF1SOnp6dBoNNi8eTP69++Po0ePSp0SPYCys7Ph5eUl7toYEREBLy8vREdHA2je4fHvL7vt1asXPvnkE6SlpcHDwwMbNmxAUlISAgMDxZgpU6Zg/fr1iI6OhqenJ/Lz8/HZZ5+1WLxtSFyoTXQPgtKWSZ2CTn32lG52rCAiup+sWbMGTzzxBMLDw1FaWorx48djypQpiI+Pl/SXL3qwBAQEQBCEm55v7W3ZAQEByMvLu+Xn3m4TF0PjkwoiIiLqsEaPHo3Tp0/j1VdfhZGREfbu3Qt3d3fs2bNH6tSIOhQWFURERNShPfzww1i3bh2ysrLg5eWFK1euSJ0SUYfDooKIiIgeCN7e3jh16hT27duHKVOmiO2FhYVoaGiQMDOi9o9FBRERET0wTExM8Nxzz0EmkwEAfv/9d4wcORKDBw9GVlaWxNkRtV8sKoiIiOiBVVxcjKamJvzwww/w9fXFkiVLUFtbK3VaRO0OiwqS3Pnz5zF06FC4urrCx8cHhYWFrcYlJyejT58+6N27N5RKpfioOjMzE56envD09ET//v0RFhaGurq6215HREQ0fPhwnDlzBtOmTYNGo8E777yDAQMG4NNPP5U6NaJ2hUUFSS4sLAyhoaE4d+4coqKiMGvWrBYxpaWlWLVqFTIyMlBSUoLKykps27YNAODh4YGsrCzk5+fj9OnTqKqqwtatW297HREREQDY2dkhJSUFx44dQ8+ePXHhwgWMHTsW06dPR1NTk9TpEbULLCpIUlVVVcjOzsb06dMBAJMnT0Z5eTlKSkq04vbv348JEybAwcEBMpkM4eHh2L17NwCgc+fOeOihhwAA9fX1uH79ujhX9lbXERER/d2YMWPw448/YsmSJTAyMoK5uTmMjY2lTouoXWBRQZIqLy9H9+7dYWLS/B5GmUwGuVyu9WZJAFCpVOjZs6d4rFAotGLKysrg4eEBW1tbdOnSBQsWLLij64g6urZOLzx+/DiGDBmCfv36oX///oiMjIRGowHQPO6MjY3F6Yeenp746aefDNY3In2wsLDAxo0b8d1332HdunVie3l5Of/9JroFFhXUISgUChQUFECtVqOurg4HDx6UOiWi+0JbpxdaW1tjz549KCoqQk5ODk6ePIkPP/xQvNbS0hL5+fniT+/evQ3VNSK98vHxgbW1NQBAEASEhYVh4MCBWLduHRobGyXOjuj+w6KCJOXk5ISKigrxP9CCIEClUkEul2vFyeVyXLhwQTwuKytrEQM0f8MUEhKCnTt33tV1RB2RLqYXenl5wdnZGQBgbm4OT09PlJWVGbQfRFKrra3FjRs3cP36dURGRmLIkCHIzc2VOi2i+wqLCpJUt27d4O3tjZSUFADAgQMH0KNHD7i4uGjFTZ48GampqVCr1RAEAQkJCQgJCQEAlJSUiFM16uvrcejQITz22GO3vY6oo9PV9MK/qNVq7N+/H08//bTYdvXqVfj4+MDb2xtvvPEGF7VSh2RpaYn09HRs374d1tbWyMvLg4+PD5YuXYpr165JnR7RfYFFBUkuMTERiYmJcHV1RVxcHHbs2AEAmDdvHlJTUwEAzs7OWL16NYYNGwYXFxfY2dkhLCwMQPOcby8vL3h4eMDLywv29vZYtWrVba8jojtXXV2N8ePHIzIyEoMHDwYAdO/eHb/++iuysrLwxRdfICMjAxs2bJA4UyL9kMlkmD17NoqLixESEgKNRoP169djwIABOHPmjNTpEUnOROoEiNzc3JCZmdmiPSkpSetYqVRCqVS2iAsNDUVoaOhNP/9m1xF1dH+fXmhiYnLL6YV/X4D6z2mCNTU1CAoKQnBwMCIiIsR2MzMzdOvWDQBgY2ODOXPmYNeuXYiMjNRzz4ikY29vj927d2P69OmYP38+TExMoFAopE6LSHJ8UkFE1EHpYnphbW0tgoKCEBQUhJUrV2pdV1VVJU49/GuDBC8vLwP0jEh648aNQ1FREVJTU2Fubg4AaGxsRGpqKgRBkDg7IsNjUUFE1IG1dXphfHw8Tp06hYMHD4rbxr755psAgG+++Uaceujt7Q0HBwesWLFCmo4SScDCwgJ9+/YVjzdv3ozg4GCMGTOGGxrQA4fTn4iIOrC2Ti9csWLFTQuFSZMmYdKkSbpJlKgDEAQBZmZm+Pzzz9G/f3/ExsZi8eLF4mYJRB0Zn1QQERER6UBERAR++OEH+Pv749q1a3jllVfg6+uL/Px8AM0vo1y2bBmmTp2KZcuW4fz589ImTKRDLJ1J56r+M0rqFHSq2+IvpE6BiIjaCVdXVxw/fhzbt2/Hq6++ipycHAwePBjTpk1DSkoKZDIZBEGATCbD22+/jeTk5FZfSknU3vBJBREREZEOGRkZYd68eSguLsazzz4LAEhJSYFGo0FTU5PWX+fOndvihZRE7RGLCiIiItKp8+fPY+jQoXB1dYWPjw8KCwtbjUtOTkafPn3Qu3dvKJVKcTexW53TaDR49dVXMWDAAPTt2xdz585FfX29Qfp1t7p3746PPvoIc+bMgUwmazVGJpMhOTnZwJkR6R6LCiIiItKpsLAwhIaG4ty5c4iKimp1ek9paSlWrVqFjIwMlJSUoLKyEtu2bbvtueTkZOTm5iI3NxfFxcUwMjJCfHy8Ibt312pqam66zawgCNwpijoErqkgImpHuGaJ7ndVVVXIzs7G//73PwDN70FZtGgRSkpKtN6Rsn//fkyYMAEODg4AgPDwcLz11ltYuHDhLc8VFBRg1KhRMDU1BQCMGTMGr7/+OpYuXWrgnt45hUJxyycVfHkedQR8UkFEREQ6U15eju7du4vbqMpkMsjlcqhUKq04lUqFnj17iscKhUKMudW5QYMGITU1FdXV1WhoaMC+ffvu+2/658yZc8snFXPnzjVwRkS6x6KCiIiI2o1Zs2YhKCgI/v7+8Pf3h6urqyTvgbibdSNjx47FI488AplMBmNjYxgZGcHY2BgymQy2trYIDAxssaYEaC44nnzySXTt2tUAPSJqGxYVREREpDNOTk6oqKhAY2MjgOZfjFUqFeRyuVacXC7HhQsXxOOysjIx5lbnZDIZXn/9deTl5eHkyZPo168f+vfvr+9utXC360YqKysxcuRIPPnkk3j++eehVCpha2uLgoKCFutG/rJp0yb07t3bQD0iahsWFURERKQz3bp1g7e3N1JSUgAABw4cQI8ePbTWUwDNay1SU1OhVqshCAISEhIQEhJy23M3btzAlStXAACXLl1CXFwcIiMjDdjD/79uZPr06WK+5eXlLbaG/fvaEJlMhldeeQXXrl3D7t274ezsjEmTJonnwsPDsXv3bvHawsJCHD58GK+99ppB+0Z0r1hUEBERkU4lJiYiMTERrq6uiIuLw44dOwAA8+bNQ2pqKgDA2dkZq1evxrBhw+Di4gI7OzuEhYXd9tyff/6JoUOHon///hgxYgTCw8Mxfvx4g/ZP3+tGGhoaoFQqkZiYCGNjY313h0gnuPsTERER6ZSbmxsyMzNbtCclJWkdK5VKKJXKVj/jZufs7e1RXFysm0TvU6tXr8akSZPg7u5+3y9CJ/oLn1QQERER3QV9rxv56quvsHnzZigUCgwfPhzV1dVQKBS4ePGivrtGdM9YVBARERHdBX2vG8nIyMCFCxdQVlaGb775BlZWVigrK4OdnZ1hO0p0F1hUEBEREd0lfa4bIWqPuKaCiIiI6C7pc93I3ykUCvzxxx/3nCeRofBJBRERERERtQmfVBAREdEtjV57ROoUdOp/y4KlToGow+GTCiIiIiIiapP7oqjYsmULFAoFzM3N8fjjj+PUqVO3jP/oo4/Qt29fmJubY+DAgTh27JiBMiUiIiIion+SvKjYu3cvIiIiEBMTg9zcXHh4eCAwMBBVVVWtxp88eRJTp07F3LlzkZeXh4kTJ2LixIn48ccfDZw5EREREREB98Gaio0bN0KpVGL27NkAgISEBHzyySfYvn07XnvttRbx8fHxCAoKwtKlSwEAsbGxSEtLw7vvvouEhASD5k5EREQdR9V/Rkmdgs50W/yF1CnQA0bSoqK+vh45OTlYtmyZ2GZkZIRRo0a1uk0bAGRmZiIiIkKrLTAwEIcPH241vq6uDnV1deLxn3/+CQCoqKhoY/Z378aflwx+T3365ZdfWm2/9McNA2eiX/Wt9PPGxWoJMtGfm/2zbE84vtonjq/2geOr/WltbAEcX7rw1++QGo3G4Pe+n0laVFy6dAlNTU2wt7fXare3t8eZM2davUatVrcar1arW41fu3YtVq9e3aJ9yJAh95g1/cXp/0mdgYHEOEmdgd45YavUKdA/cHx1HBxf958HYnw9AGMLkHZ8VVZWQi6XS3b/+43k05/0bdmyZVpPNhobG1FcXAwnJycYGUm+pITuUU1NDfr164eioiJYWlpKnQ5Rh8LxRaQ/HF/tn0ajQWVlJby8vKRO5b4iaVFha2sLY2NjVFZWarVXVlbCwcGh1WscHBzuKt7MzAxmZmZabcOGDWtD1nQ/qK5ufnzr6OgIKysribMh6lg4voj0h+OrY+ATipYk/are1NQUgwYNQnp6utim0WiQnp4OPz+/Vq/x8/PTigeAtLS0m8YTEREREZF+ST79KSIiAjNnzsTgwYMxZMgQvPPOO7h69aq4G9SMGTPg6OiItWvXAgBeeukl+Pv7Y8OGDRg3bhz27NmD7OxsbNu2TcpuEBERERE9sCQvKqZMmYKLFy8iOjoaarUanp6e+Oyzz8TF2CqVSmvtw9ChQ7Fr1y6sXLkSy5cvR58+fXD48GEMGDBAqi6QBMzMzBATE9NiahsRtR3HF5H+cHxRRyUTBEGQOgkiIiIiImq/uP0RERERERG1CYsKIiIiIiJqExYVRERERETUJiwqSGcCAgLw8ssvS3b/WbNmYeLEifdNPkREREQPChYV1GEdPHgQsbGxUqdBpFeGLJ63bduGgIAAWFlZQSaT4Y8//mgRc/nyZUybNg1WVlbo2rUr5s6di9raWoPkR9RW7XE8/fDDDxgxYgTMzc3h5OSEt99+2yD5E/0TiwrqsGxsbGBpaSl1GkQdxrVr1xAUFITly5ffNGbatGkoLCxEWloajh49iq+//hqhoaEGzJKofdDFeKqursbo0aPRs2dP5OTkYN26dXj99df57i6ShkCkI/7+/sLChQuFhQsXClZWVsIjjzwirFy5UtBoNIIgCMKHH34oDBo0SLCwsBDs7e2FqVOnCpWVleL1ly9fFl544QXB1tZWMDc3F1xcXITt27eL51UqlfDcc88JXbp0EaytrYUJEyYIpaWl4vmZM2cKwcHBWvm89NJL4nHPnj2FN998U5g9e7ZgYWEhODk5CYmJiVp9uN09iO4nM2fOFABo/ZSWlgonTpwQfHx8BFNTU8HBwUGIiooSGhoaxOtuN1Zv58svvxQACFeuXNFqLyoqEgAIWVlZYtunn34qyGQy4ddff9VJn4n0pT2Op61btwrW1tZCXV2dGBMVFSW4ubm14U+C6N7wSQXp1AcffAATExOcOnUK8fHx2LhxI5KSkgAADQ0NiI2NRUFBAQ4fPoyysjLMmjVLvHbVqlUoKirCp59+iuLiYrz33nuwtbUVrw0MDISlpSUyMjLw7bffwsLCAkFBQaivr7/j/DZs2IDBgwcjLy8PCxYswPz583H27Fmd3oPIUOLj4+Hn5welUomKigpUVFTgoYcewtixY+Hj44OCggK89957SE5Oxpo1a7SuvdVYvVeZmZno2rUrBg8eLLaNGjUKRkZG+P7779v02UT61h7HU2ZmJp544gmYmpqKMYGBgTh79iyuXLnSpvsT3S3J36hNHYuTkxM2bdoEmUwGNzc3nD59Gps2bYJSqcScOXPEOGdnZ/znP/+Bj48PamtrYWFhAZVKBS8vL/E/oAqFQozfu3cvNBoNkpKSIJPJAAA7duxA165dceLECYwePfqO8hs7diwWLFgAAIiKisKmTZvw5Zdfws3NTWf3IDKULl26wNTUFJ07d4aDgwMAYMWKFXBycsK7774LmUyGvn374rfffkNUVBSio6NhZNT8XdKtxuq9UqvV6Natm1abiYkJbGxsoFar772jRAbQHseTWq1Gr169tGLs7e3Fc9bW1vd8f6K7xScVpFO+vr7iL+QA4Ofnh/Pnz6OpqQk5OTkYP3485HI5LC0t4e/vDwBQqVQAgPnz52PPnj3w9PREZGQkTp48KX5OQUEBSkpKYGlpCQsLC1hYWMDGxgY3btzATz/9dMf5PfbYY+Lfy2QyODg4oKqqSqf3IJJScXEx/Pz8tMbhsGHDUFtbi19++UVsu9VYfeutt8Qx8FfBT/Qg4ngiunN8UkEGcePGDQQGBiIwMBA7d+6EnZ0dVCoVAgMDxalFY8aMwYULF3Ds2DGkpaXhX//6FxYuXIj169ejtrYWgwYNws6dO1t8tp2d3R3n8dBDD2kdy2QyaDQaANDZPYjau/DwcDz//PPi8aOPPnpH1/29SP9LY2MjLl++LH7zS/Sg0ed4cnBwQGVlpVbMX8ccc2RoLCpIp/45b/q7775Dnz59cObMGfz++++Ii4uDk5MTACA7O7vF9XZ2dpg5cyZmzpyJESNGYOnSpVi/fj28vb2xd+9edOvWDVZWVnrJ3RD3INI1U1NTNDU1icfu7u44cOAABEEQvzn99ttvYWlpiR49eohxNxurxsbGsLGxgY2NzV3n4ufnhz/++AM5OTkYNGgQAOD48ePQaDR4/PHH76V7RAbV3saTn58fVqxYgYaGBvFLs7S0NLi5uXHqExkcpz+RTqlUKkRERODs2bPYvXs3Nm/ejJdeeglyuRympqbYvHkzfv75Z6SmprZ4h0R0dDSOHDmCkpISFBYW4ujRo3B3dwfQvK2era0tgoODkZGRgdLSUpw4cQKLFy/WegTdFoa4B5GuKRQKfP/99ygrK8OlS5ewYMEClJeX49///jfOnDmDI0eOICYmBhEREeL8b+DmY/VW1Go18vPzUVJSAgA4ffo08vPzcfnyZQDNv4AFBQVBqVTi1KlT+Pbbb7Fo0SKEhITc8bezRFJqb+PphRdegKmpKebOnYvCwkLs3bsX8fHxiIiI0NOfENEtSL39FHUc/v7+woIFC4Tw8HDByspKsLa2FpYvXy5uq7dr1y5BoVAIZmZmgp+fn5CamioAEPLy8gRBEITY2FjB3d1d6NSpk2BjYyMEBwcLP//8s/j5FRUVwowZMwRbW1vBzMxMcHZ2FpRKpfDnn38KgnBnW8pu2rRJK2cPDw8hJibmju9BdL85e/as4OvrK3Tq1OmutsC81Vi9mZiYmBZbbgIQduzYIcb8/vvvwtSpUwULCwvByspKmD17tlBTU6Ov7hPpVHscTwUFBcLw4cMFMzMzwdHRUYiLi9PpnwnRnZIJgiBIUs0QEZEkAgIC4OnpiXfeeUfqVIjaPY4nomac/kRERERERG3CooKIiIiIiNqE05+IiIiIiKhN+KSCiIiIiIjahEUFERERERG1CYsKIiIiIiJqExYVRERERETUJiwqiIiIiIioTVhUEBERERFRm7CoICIiIiKiNmFRQUREREREbcKigoiIiIiI2uT/AMHJRbcVfSP4AAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top-100 ablation — full prediction shift:\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + "
\n", + "
Input Sentence:
\n", + "
Fact: the capital of the state containing Dallas is
\n", + " \n", + "
\n", + "
Original Top 5 Tokens
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TokenProbabilityDistribution
Austin0.414\n", + "
\n", + "
\n", + " 41.4%\n", + "
\n", + "
Texas0.056\n", + "
\n", + "
\n", + " 5.6%\n", + "
\n", + "
the0.056\n", + "
\n", + "
\n", + " 5.6%\n", + "
\n", + "
not0.056\n", + "
\n", + "
\n", + " 5.6%\n", + "
\n", + "
Fort0.044\n", + "
\n", + "
\n", + " 4.4%\n", + "
\n", + "
\n", + " \n", + "
New Top 5 Tokens
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TokenProbabilityDistribution
Texas0.125\n", + "
\n", + "
\n", + " 12.5%\n", + "
\n", + "
the0.110\n", + "
\n", + "
\n", + " 11.0%\n", + "
\n", + "
not0.059\n", + "
\n", + "
\n", + " 5.9%\n", + "
\n", + "
called0.036\n", + "
\n", + "
\n", + " 3.6%\n", + "
\n", + "
a0.031\n", + "
\n", + "
\n", + " 3.1%\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + "
Key Tokens
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TokenOriginalNewChange
▁Austin0.41410.0090\n", + "
\n", + "
\n", + " -97.8%\n", + "
\n", + "
▁Dallas0.03000.0038\n", + "
\n", + "
\n", + " -87.4%\n", + "
\n", + "
▁Texas0.05590.1245\n", + "
\n", + "
\n", + " +122.7%\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Progressive ablation: zero out increasing numbers of custom-target features\n", + "probs_base = torch.softmax(original_logits.squeeze(0)[-1].float(), dim=-1)\n", + "groups = {\"baseline\": {\n", + " \"P(Austin)\": probs_base[idx_x].item(),\n", + " \"P(Dallas)\": probs_base[idx_y].item(),\n", + " \"P(Texas)\": probs_base[idx_texas].item(),\n", + "}}\n", + "logit_diffs = {\"baseline\": orig_gap}\n", + "\n", + "ablation_results = {}\n", + "for n in [10, 100]:\n", + " top_n, _ = get_top_features(graph_custom, n=n)\n", + " abl_tuples = [\n", + " (layer, pos, feat_idx, 0.0 * activations[layer, pos, feat_idx])\n", + " for (layer, pos, feat_idx) in top_n\n", + " ]\n", + " abl_logits, _ = model.feature_intervention(input_ids, abl_tuples)\n", + " probs_abl = torch.softmax(abl_logits.squeeze(0)[-1].float(), dim=-1)\n", + " gap = (abl_logits.squeeze(0)[-1, idx_x] - abl_logits.squeeze(0)[-1, idx_y]).item()\n", + " label = f\"top-{n}\"\n", + " groups[label] = {\n", + " \"P(Austin)\": probs_abl[idx_x].item(),\n", + " \"P(Dallas)\": probs_abl[idx_y].item(),\n", + " \"P(Texas)\": probs_abl[idx_texas].item(),\n", + " }\n", + " logit_diffs[label] = gap\n", + " ablation_results[n] = abl_logits\n", + "\n", + "display_ablation_chart(groups, logit_diffs=logit_diffs,\n", + " title=\"Custom-target ablation: token probabilities & logit gap\")\n", + "\n", + "# Show the full top-k comparison for the strongest ablation\n", + "strongest_n = max(ablation_results.keys())\n", + "print(f\"\\nTop-{strongest_n} ablation — full prediction shift:\")\n", + "display_topk(prompt, original_logits, ablation_results[strongest_n])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Ablate the Semantic Concept Circuit\n", + "\n", + "Same progressive ablation, now zeroing out features from the **semantic concept** graph. Because the concept direction captures the capital-vs-state pathway, ablation should similarly collapse the Austin signal." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAxYAAAHqCAYAAACZcdjsAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjgsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvwVt1zgAAAAlwSFlzAAAPYQAAD2EBqD+naQAApRFJREFUeJzs3XlcTfn/B/DXbd8XlFJRpJJIhMm+ZB1kGGMwiOzbEIasGTOyjX0n2yCNLNnGMsiaLRKiLCVL2RUVUef3h6/z62pRt+W0vJ6Px3m493M+53Pe597jdN73nM/nyARBEEBERERERJQHSlIHQERERERExR8TCyIiIiIiyjMmFkRERERElGdMLIiIiIiIKM+YWBARERERUZ4xsSAiIiIiojxjYkFERERERHnGxIKIiIiIiPKMiQUREREREeUZEwsq8aKjoyGTycSJiiZvb2/xO3J3d8/RMu7u7uIy3t7eBRqfpaWluK6goKACXVdRpch3VNo1a9ZM/Mw2btxYIOtQZN9Mf0yMjo7+Zlt52Y6s1lWSFMb3nJXCPA4SfYuK1AFQ7r158wZz587Fvn37cP/+fXz69AmGhoYwMTFBjRo10Lp1a/Tu3VvqMAtNaGgo9uzZA+DzH0UpTniCgoLEP8K1atVC586dCz2GgpL+D9Xo0aNhYGAgWSwF5c2bN1i0aJH4vqT8cS4N3x1JKzo6WjyRNjAwwOjRoyWNJ6dSUlKwYsUKbNy4Effv34cgCDA1NUXt2rUxceJE1KpVS+oQ86ykHteoaGNiUcy8fv0a9erVw927d+XKnz59iqdPn+LatWuIiooqdYnFjBkzAABNmzbNkFiYmpri9OnTBRpDUFCQGEPfvn1LVGLxZbuAz7+MlcST0zdv3shtZ2Z/gAMCAvD+/XsAQI0aNQortDwpDd8dKSb9MdHU1PSb9ZcuXYr4+HgAgI2NjVgeHR0t7meVKlXKNLHI7boKQ8+ePbFz5065sjt37uDOnTto27ZtsUosJk+ejAEDBgAAKlasKJbn5LhGlN+YWBQzixcvFpOKihUrYurUqahcuTKSk5Nx8+ZN7N27F0pKvMMtPXV1dTRq1EjqMPJNcnIy1NXV+T0XMmdnZ6lDIAklJiZCW1tb6jDyTW6PiXlJpova8ffly5dySYWPjw/q1q2Lx48f4/jx49DX15cwutyrWrUqqlatKnUYRADYx6LYuXjxovh67NixGDBgAFq0aIHvv/8ev/32G86cOYMDBw5kWC4pKQlz585FvXr1oKenB3V1dVStWhWenp54/vy5XN2goCDxfk1LS0tERETg+++/h46ODoyNjTFy5EgkJyfj7du3GDlyJMqXLw9NTU00adIEly9flmvr8uXL+OWXX1CjRg0YGRlBVVUVurq6qFWrFqZPn453797J1f/6Hu6zZ8+iRYsW0NbWhr6+Prp3745nz56J9WUyGfr16ye+P3nyZIb+FN/qYxEaGgp3d3dUrlwZGhoa0NPTQ40aNTB+/Phvfh9f2k7/q9CmTZvkPj/g8x+yIUOGoH79+jA1NYWGhgY0NTVhbW2NgQMH4v79+9l+B5GRkejSpQsMDQ2hpaWFhIQEAMCDBw/QrVs36OvrQ09PDx06dEB4eHi29/u+evUKU6dOhaOjI3R0dKCpqYnq1avD29tb7vv4ct9uelZWVjm+jzi33/3XLly4gJYtW0JHRweGhob4+eef8fDhw2yXUWTdzZo1g5WVldzy6feXL7e4ZXcf+/379zF06FBYW1tDQ0MDOjo6cHR0xLRp0/DmzRu5urndx4GM+8O35Oa7y03sWTly5Ag0NTUhk8mgrKyMdevWifMuXryIHj16wMLCAmpqajA0NISrqyv27t2boZ2v99v169fD0dERGhoaqFChAry8vJCampqjmL5ua926dahZsyY0NDRgbm6OCRMmiFegvkj/HR85cgTTp09H5cqVoaKigrVr14r1Tp06ha5du6JChQriNjVu3Bjr1q1DWlpatnFt3rxZ3Kas4li2bBnatWsHKysr6OnpQVVVFcbGxmjTpg12796dbftpaWlYuHAhbG1toa6ujipVqmDOnDkZ4sptv4fMjimWlpZo3ry5WOfBgweZtpvdum7duoUBAwbIHX8bNmyIjRs3QhAEubqPHz/G4MGDUblyZairq0NTUxMWFhZo1aoVpk+f/s1t+EJbWxuqqqri+65du6Jly5bo06cPNm7ciB9++CHHbeXE06dPMX78eNjb20NLSwuampqws7PDmDFj8OTJkwz1c3tcz6yPRU6Pa9k5deoUGjZsCE1NTZQvXx5Dhw7F69evs/w+c7vfpv//duzYMcydOxdVq1bNdr+lYkCgYuXnn38WAAgABFtbW2H79u1CXFxctss8f/5ccHBwEJf7ejIzMxPu378v1j9x4oQ4z8DAQDA2Ns6wTJcuXYT69etnKC9XrpyQkJAgtrVy5cos1wtAqFOnjvDx40ex/vTp08V5VlZWgoqKSoZl2rRpI9bPru0vu3dUVFSGsi/WrFmT6ToACPr6+t/8Pr5u++upUqVKgiAIwq1bt7KtZ2hoKNy7dy/T70BfX18wMjKSq//69WshNjZWqFChQqZtWVlZie83bNggtnvnzh3B3Nw8yzgcHByEly9fCoIgCH379s025vTtZiYv3729vb2grq6eYRlzc3Ph6dOn4jLpY5w+fbpC627atGm2dU+cOCEIgiBUqlQpQ5kgCEJQUJCgo6OT5fJWVlbCo0ePMt3OnOzjX+8PX/ap7OT0u8tL7H379hUEQRCOHj0qaGhoCAAEFRUVYdu2bWL95cuXC0pKSlm27+XlJRd3+u+iatWqmS7j4+Pzze3/ui17e/tM22rbtq2QlpYmLpP+O/56/QsXLhQEQRDmzZsnyGSyLLepffv2cvt1+jicnJxyFEdmx9bMYsks7qzWMWjQILll0s+LiorKtK30+3n67fiy/6Svm9n0pd2s1rV7925x38ls6tWrl/i5pKSkCFWqVMmyrrq6eo72iy+6desmLlu7dm0hPj4+V8t/LbPPRxAEITw8PNO/oV+mcuXKCdeuXRPrK3Jcz+w4mNPjWlb+++8/QVVVNcNytWvXzvL7zMt+m9X/0a/3Wyr6mFgUM3///Xem//nMzMyEn3/+WQgMDJT7AyUIgvDTTz+J9WrVqiX4+fkJ//77r9C1a1exvHHjxmL99CcxX5bZvXu34O3tLVeuoaEhLFq0SNi5c6fcgXPVqlViW2fPnhX++usvYffu3cJ///0nnDhxQtixY4dQt25dsf4///wj1k9/4gJAaNWqlbB3794M5bdv3xYEQRBOnz4tTJo0SS7W06dPi5MgZJ1Y3Lx5U1BWVpZbdtOmTcKhQ4eERYsWCS4uLt/8Pt6/fy+cPn1a6Nevn9hOu3btxPVfunRJEARBiIuLE37//XfB399fOHTokBAUFCTs27dP+OWXX8Tlhg0bluV3YGBgICxatEg4cuSIsHjxYiEpKUno37+/OF9PT09Yvny5EBgYKDRu3Fhu2fR/gNIf+Js3by7s3r1b2Ldvn9wfod69ewuCIAiRkZHC6dOn5drasWOHuG3pT/Azk9fvvlOnTsL+/fuFpUuXyp38DhgwQFwmq8QiN+sOCwsTduzYIbfu9PvQmzdvBEHI/IQrOTlZ7iSgXr16wq5du4TNmzcLZmZmYnn79u2z3M5v7eNf7w85SSxy8t3lNfa+ffsKx44dEzQ1NQUAgpqamrB7926x7o0bN8SkQklJSZg8ebJw5MgRYfXq1YKhoaHYzrFjx8Rlvj4ZGjlypHDgwAHhxx9/FMtMTEy+uf1ftyWTyYTffvtNOHjwoODp6Sm3ji1btojLfH2i3K9fP2H//v3CP//8I5w6dUoIDQ2VSyp69+4tHDhwQJg9e7agpqYmls+dOzdPcaxYsULw9fUV9u/fLwQFBQlHjx4Vli1bJibb+vr6cslL+rjV1NSEWbNmCQcPHsyQYJ45c0ZcJn25oonFpUuXhCVLlsh9N+n/77x//z7LdT179kzu//WQIUOEQ4cOCX///bdcDL6+vuK6vpTVrFlT2L17t3D06FFh06ZNwq+//ipUr149R/uFIAjC2LFj5WICIHz33XdyyUX6v5uvX7/+ZptZJRbpT8SrVq0q+Pn5CTt27JA7kXZwcBBSU1MFQRAUOq5ndhzM6XEtM6mpqYK1tbW4nJ2dnbBjxw7h77//ljs2fL3vFMZ+S0UfE4tiaPjw4dn+Yubm5iYmF69fv5Y7ed62bZt4YDlx4oTcLxJfTmS+PqkNDw8XBEEQ0tLSBG1tbbH8t99+k4vpS7mnp6dY/vHjR2Hp0qVCw4YNBUNDw0x/vUxfP/2JS7ly5YSkpCRxnp2dnThv7969YvmGDRvE8qZNm2b4vLJKLMaNGyeWmZubC+/evcvyM7906ZLcQfn06dNCZGRkpnF/+SX3a/v37xe+//57wcTEJNNfqWvXri3W/fo7SL+9gvD5wK+npyfOX7BggTjv+fPncr8CfvkDdP36dbFMVVVVOHz4sLgtAQEBcvPevn0rtpfVH5Fvyct3X6FCBeHDhw/ivPnz54vzDAwMxD/CWSUWuV13dle1vsjshCswMFDuD+OTJ0/E+vv37xfnyWQyMRFTZB9XVHbfXV5jr1mzpqClpSUAEDQ1NYVDhw7JtZ/+5M3V1VXu/076k6eff/5ZXCb9yVn6hCYuLk5uW9JfFc1K+ra6desmN69Dhw7ivE6dOonl6b/jLl26ZGhzzJgx4vwaNWrIzUt/PLG3t89THDExMcKwYcMEW1tbMXH7egoLC8s07vHjx8uto0aNGuK8UaNGieVZ7Ru5SSwEIWdJb2brWrp0qVjm4OAgt39MnjxZnPfdd98JgvA5Wf5S1rJlS+HmzZtCSkpKpuvLzsKFC8V2GjduLLdd6ZOLL/8XzczMctRuZp/PtWvX5LY9JCRErH/jxg25eRcvXlTouC4IWR8Hc3Jcy8zly5fllrty5Yo4L/2x4et9pzD2Wyr62Hm7GFq2bBlGjhyJHTt24PTp07hw4YI4WgcABAYGwt/fHz///DMiIyPl7knu2bNnlu3euHEDtra2cmUGBgaoVq0agM/3Z5YpUwaJiYkAABcXF7FeuXLlxNevXr0SX/fv3x9///13ttvz+vXrTMtdXFygqakpvi9btmym61BUeHi4+LpNmzbZdsz88ccf8eDBA7myvn375ni88vXr18PDwyPbOll9Durq6ujQoYNc2bNnz8R+FgDQsGFD8XW5cuVgZ2eH0NBQuWXSb+/Hjx/Rpk2bTNf38eNHREREoE6dOtnG+y15+e7r168PNTU18X36zp9v3rzBixcvYGxsXCDrzo3bt2+Lr6tUqSI34k36mAVBQERERIaYC3ofz05eYw8LCxNfr127NsP+lH5/+++///Dff/9lGseNGzcyLW/ZsqX4Ov3nAnz+bHR1dTNdLjNfdx5u1KgR9u/fD+DzSECZ6dq1a4ay9J9ZZm3Onz8fABAZGQlBEDL0c8lJHHFxcXB2ds7Qz+ZrWe2/X6+jYcOGuH79utw6ioL0+8eNGzfQuHHjTOt92T+sra3h6uqK//77D8eOHUP16tWhrKyMKlWqwMXFBUOGDMF33333zfXOnj0bAKCiooJ//vkHWlpaaN68Oa5cuYLz58+jbdu2GDZsmPhdt2/fXuFtTL+/aGpqonbt2uL76tWrw8DAQOzHdPv2bVhYWOT6uF4Q0u8nWlpacHJyEt9n1RG/tOy39G3svF1M2draYsqUKTh8+DBevnyJQ4cOwdDQUJx/4cKFXLeZWWfar0fHSD8SUVZDVwr/63D3+PFjuZO70aNH48iRIzh9+jT69OkjlmfVOatMmTJy71VU/j8P/rKO4uLLHzMAaNu2Lfbu3YvTp09j4cKFYnlWn0P58uUznKB8631efatj9bfk9bsvruvOreK8jysrK4uvvb29ERsbq1A7We1r6T+b9J8LUDifjVTDoq5fv148OStfvjx8fX1x8uRJnD59Wu4HnKKw/xaGL/uHTCbDvn37sGrVKvzwww+wtbWFkpISIiMjsWnTpkwHD/nas2fP8PTpUwCf/7aZmJhAT08Phw4dEofQDQ4OFodrl8lkGDVqVAFunbyCPq4rGkdOcL+lL5hYFDMnTpzIMFKLsrIy2rRpg/r164tlX/7z2tjYyJ0AREREQPh8C5zc9O7dO/Tt2zdfY00/gk/ZsmWxcOFCtGrVCo0aNcLjx4/zbT3pk53cHLTs7e3F10eOHBGvxHyR/uQlOjo6w2eW/mrFt2KIiYkRX8+bNw8dO3ZEo0aNcnQCn9lB3sjISC7pO3/+vPj6xYsXcr+UffHlyhPw+dezN2/eZLkvNG3aNNP15/Tzzet3f/HiRXz8+FF8f/bsWfG1vr6+3B+q/Fj310P35nQ77ezsxNf37t1DXFxcpjHLZLIMVwMLQ3bfXV5j/+mnn+Dq6goAuHv3Llq2bCk3wlz6/a1Hjx6Z7muCIGR5xSI/pd+er99bW1tnukxm/+/Sf2bZtWljY5Pp8jmJI/2x4pdffkH//v3RpEkTVKxYES9fvsw01uzWce7cuQzryE+KHn/T7x8NGjTIcv/4cowUBAEaGhoYPHgwdu3ahdu3byMxMVE88f/48SMCAgKyXaeWlpYY78uXL8XPxsjICEePHoW5ublc/cmTJ8PBwSHH2/S19PtLcnIyrl69Kr4PDw+X+1tuZ2en0HE9O4oe19IPXZuUlCReOQCQ5TOhitt+SwWHt0IVM76+vti1axc6dOiA5s2bo0qVKpDJZDhz5gyOHj0q1vtym5KBgQG6dOmCHTt2APh8WXf8+PGwtrbGmzdv8ODBA5w6dQq3b9/O9UHrWypXriy+fvnyJf788084OzsjICAAx44dy7f1pL9NIiwsDLt27YKxsTEMDAyy/aPg7u6OhQsXIjU1FQ8fPkTTpk3x66+/onz58oiMjMT27dtx5syZXMdw+vRpHDhwQPxFzNraGpUrV8atW7cAAH/88Qc8PDwQEhKCP//8U6FtVlJSwo8//ghfX18AwLRp06CmpoYKFSpg3rx5GYavBD6PQ1+3bl1cunQJycnJaNGiBUaNGgULCws8f/4cUVFROH78ONLS0uRuWylbtixevHgBAFi1ahU6dOgAJSUl1KtXT+52pfTy+t0/fvwYP/30EwYMGCD3AC7g821p2T3DQ5F1lylTBjKZTEwmFy5ciHr16kFJSUnudoSvtW7dGhUqVMCTJ0+QkpKCH374ARMmTMC7d+/g5eUl1mvXrl22t259S1BQkDisZ6VKlXI0PCiQ/XeX19jV1NSwe/dutGzZEhcvXsStW7fQqlUrnDhxAoaGhnB3d8eiRYuQlpYGPz8/6OrqokOHDlBXV8ejR48QHh6OvXv3YtKkSRkeapnfAgIC4OXlhaZNm+L48ePi7UfA5wQpp/r06YNFixZBEASEhYWhX79++Omnn3Djxg0sWbJErJfV9uQkjvT7b0BAAFxcXJCWloYZM2bk6ErN4sWLUaZMGdSsWRM7duyQu2UtN9uaU+mPfU+ePMHmzZtRuXJlaGpqZns7Zffu3TFp0iS8e/cO586dw48//oiePXtCX18fjx8/RkREBA4ePIjOnTtj+vTpePr0KRo2bIiuXbuiRo0aMDU1RVJSktxVisyOe+np6OigTZs2+PfffwEAnTp1wuTJk1G9enWEh4dnOPHObCjY3KhZsyZq166NK1euAPicYM+YMQPKyspyxzQHBwfUqVNHoeN6dhQ9rjk5OaFq1ariLUg9e/aEt7c33r9/jwkTJmS6THHbb6kAFVz3DSoIvXr1yrRDVPqpSZMmwqdPn8Rlnj17lu1ws/iq0112nfGy6tiXVefl9MPjfpmUlZXlRrhIXz+7TtBZdR589eqV2Ik0/dSyZUtBELLvwLZy5Uq5zu3pp5wMN/tFeHh4ph2EPTw8BEEQhFWrVmW6jmbNmuX6O/giq2EJ9fX1BUtLy0w/q8jIyGyHmwUydoDv0aNHpvUePnyY7WeSl+++SpUqmXZwNzMzkxteOatOi7ldtyAIgouLS6bLfJGX4WbTf1aK7OO5HRXqi299d/kR+4sXL4Rq1aqJ5fXq1RM7Vy9btizb4Wa/3s6stl8Qcj+IQE6GeW3VqpU4EIAgZP0dp5eT4WbTdypOH0f6zvlZxREbGys3ataXyd7eXm70vfTxpY87q3V8ORZ96/PMbeftT58+ZXpMqVKlyjfXtWvXrmyHm03//zo2NjbbeioqKsKFCxe+sVcIQnR0tNzxMbMp/fc7Z86cb7aZ3edz8+bNbIebLVu2bI6Gm83uuJ7VcVAQvn1cy0pWw83WqlUr0+8zr/ttVv9Hv95vqejjrVDFjLe3NxYvXozOnTujWrVqKFOmDJSVlWFgYAAXFxf89ddfOHLkiNztT0ZGRrh48SLmz5+P7777Dvr6+lBVVUWFChXw3XffYfLkyXJPIc1P69atw+jRo2Fubg5NTU3Ur18fBw8eRIsWLfJtHYaGhti1axecnZ2hrq6eq2WHDBmCCxcuoHfv3rC0tISamhp0dHTg4OCAgQMH5ridatWqYfPmzahevbrcg5e+GDx4MFauXAk7OztoaGigatWqWLRoEaZNm5areNMzMTHB2bNn0bVrV+jq6oq/xp05c0au/0v6TulVq1ZFWFgYpk2bBicnJ+jo6EBdXR0VK1ZEkyZN8Oeff2LVqlVy61m8eDG6d+8u/vqVU3n57hs1aoSjR4+icePG0NLSgr6+Pn766SecPXsW5cuXL5B1//3332jfvn2uOgUDQNOmTREaGio+uEtNTQ2ampqoUaMGpkyZgitXrmS4xaKwfOu7y4/Yy5YtiyNHjqBixYoAPt/G9v333yMpKQnDhw/H+fPn0atXL1SsWBFqamrQ09ODra0tunXrhs2bN6NLly4Fsu3pjRo1Cps2bULNmjWhrq6OChUqYNy4cQgMDMz1E+zHjRuHEydOoEuXLjAxMYGKigr09fXRsGFDrF69Gvv27cv0GAAAEyZMwMqVK2Fvby/+Ev11HCYmJggKCoKrqyv09PRQtmxZ/PLLLzhx4oRcR/+sLF26FH/++SeqVKkCNTU1WFlZYdasWVi9enWutjOnlJWVsXv3bjRp0gRaWlq5WvaHH37A1atXMWjQIPEBjdra2rC2tkaHDh2watUqDBs2DMDnq+8zZ85E69atUbFiRWhqakJFRQUVKlRAly5dcPr0adSrV++b66xUqRJCQ0MxZcoUODg4QENDA2pqaqhcuTL69OmDoKAgrFixQqw/ceLEbz6UMDv29vYICwvD2LFjxeO/hoYGbGxs8OuvvyIsLAw1a9YU6ytyXM+Oose1li1b4ujRo2jQoAHU1dVhZGSEAQMGZDhX+BJHXvfb+fPnY8GCBahatWqh7LdUcGSCUMR7CBJRloRMRp559uwZLC0tkZycDODzk8UdHR2lCI9IMs2aNcPJkycBABs2bCjw262I8ktROK5nFgMA7Nu3D506dQLw+Var58+f5zo5/8LS0lIcbfHEiRNo1qyZwvFS0cErFkTFWMuWLbFmzRpcvXoVDx8+xH///Qc3Nzfxj4+jo6Pcr2FERFS0FYXjemRkJNq2bYudO3ciIiIC9+7dw7Zt2zBkyBCxzi+//KJwUkElFztvExVj4eHhGDx4cKbzjI2NsWXLFsmGLCQiotwrCsd1QRBw+PBhHD58ONP59evXxx9//FGgMVDxxFSTqBgbOnQo6tevj3LlykFFRQW6urqoXbs2pkyZgps3b+ZpqEQiIip8ReG4bmxsjIEDB6J69erQ19eHiooKypYti2bNmmHlypU4ffp0rvttUOnAPhZERERERJRnvGJBRERERER5xsSCiIiIiIjyrNR13v706ROuXr2K8uXLczQDIiIiIsp3aWlpePr0KZycnKCiUnpOt0vPlv7P1atXc/QQHSIiIiKivLh48SLq1q0rdRiFptQlFl+e2nvx4kWYmppKHA0RERERlTSxsbGoV6+eeN5ZWpS6xOLL7U+mpqYwNzeXOBoiIiIiKqlK2233pWtriYiIiIioQDCxICIiIiKiPGNiQUREREREeVbq+ljkVGpqKj5+/Ch1GJQLqqqqUFZWljoMIiKiXON5R/HCc47MMbH4iiAIiIuLw5s3b6QOhRRgYGAAExMTyGQyqUMhIiL6Jp53FF8858iIicVXvvznNjY2hpaWFneWYkIQBCQlJeHZs2cAwKGEiYioWOB5R/HDc46sMbFIJzU1VfzPXbZsWanDoVzS1NQEADx79gzGxsa8RElEREUazzuKL55zZI6dt9P5cm+jlpaWxJGQor58d7xPlYiIijqedxRvPOfIiIlFJngZsvjid0dERMUN/3YVT/zeMmJiQUREREREecbEohTp3bs3Zs2aJXUYAD5n+Xv27Mlx/UOHDqFWrVpIS0sruKCIiIioyLC0tMSiRYvy3E50dDRkMhlCQ0MBAEFBQZDJZHIjce3ZswfW1tZQVlbG6NGjsyyj7LHzdg619gks1PUd8XLLVX13d3ds2rQJwOexlStWrIg+ffpg0qRJUFFRwbVr13Dw4EGsXLkyw7J+fn745ZdfMGTIECxfvjxf4v/C29sbe/bsEf8zfxEbGwtDQ8Mct9O2bVtMnToVW7duRe/evfM1RiIiIsodd3d3vHnzJlc/EubWpUuXoK2tLb6XyWTYvXs3OnfunKd2GzRogNjYWOjr64tlgwcPRr9+/TBq1Cjo6upmWUbZ4xWLEqRt27aIjY3FnTt3MHbsWHh7e2PevHkAgKVLl6Jbt27Q0dHJsJyvry9+++03+Pn54f3794USq4mJCdTV1XO1jLu7O5YsWVJAERERERVfd+7cgZeXF3r06AEvLy/cuXNH6pDyzMjIqEA6tqupqck9f+Ldu3d49uwZ2rRpgwoVKkBXVzfTMvo2JhYliLq6OkxMTFCpUiUMHToUrq6u2Lt3L1JTUxEQEICOHTtmWCYqKgrnzp3DxIkTYWNjg127dsnN9/b2Rq1ateTKFi1aBEtLS/F9UFAQ6tWrB21tbRgYGKBhw4Z48OABNm7ciBkzZuDatWuQyWSQyWTYuHEjAPlbob5coty1axeaN28OLS0tODo6Ijg4WG69HTt2xOXLl3Hv3r0SeQAlIiJSxIYNG2BnZ4d58+bhn3/+wbx582BnZyf+zZXCyZMnUa9ePairq8PU1BQTJ07Ep0+fxPlv375Fr169oK2tDVNTUyxcuBDNmjWTu+Uo/a1QX847fvjhB8hkMrnzkK9dvHgRTk5O0NDQgLOzM65evSo3P/2tUEFBQWLS0KJFC8hksizL6NuYWJRgmpqaSElJQVhYGOLj4+Hs7JyhzoYNG/D9999DX18fv/zyC3x9fXO1jk+fPqFz585o2rQpwsLCEBwcjEGDBkEmk6F79+4YO3YsqlevjtjYWMTGxqJ79+5ZtjV58mSMGzcOoaGhsLGxQY8ePeQOQhUrVkT58uUxa9asIncAJSIiyk+JiYlZTunvLrhz5w4GDBiAtLQ0pKamyv3r4eGBGzdu5Kjd/PT48WO0b98edevWxbVr17By5Ur4+vrijz/+EOt4enri7Nmz2Lt3L44ePYrTp0/jypUrWbZ56dIlAJ/PW2JjY8X3X3v37h06dOgAe3t7hISEwNvbG+PGjcuy3QYNGiAiIgIAsHPnTsTGxmZZRt/GPhYlkCAIOHbsGA4fPoyRI0fiwYMHUFZWhrGxsVy9tLQ0bNy4EUuXLgUA/Pzzzxg7diyioqJgZWWVo3UlJCQgPj4eHTp0QJUqVQAA1apVE+fr6OhARUUFJiYm32xr3Lhx+P777wEAM2bMQPXq1XH37l3Y2dmJdcqUKYMNGzZAEIQMy3t4eKBu3bpQUmK+TERExVtmty5/0b59exw4cAAAsH79+iwHNklLS0P79u0RExMjlllaWuLFixcZ6mb2d1VRK1asgIWFBZYtWwaZTAY7Ozs8efIEEyZMwLRp05CYmIhNmzZh27ZtaNmyJYDPCUOFChWybNPIyAgAYGBgkO05xbZt25CWlgZfX19oaGigevXqePToEYYOHZppfTU1NfH8qEyZMmLbmZXRt/EMrATZv38/dHR0oKGhgXbt2qF79+7w9vZGcnIy1NXVM4y3fPToUSQmJqJ9+/YAgHLlyqFVq1ZYv359jtdZpkwZuLu7o02bNujYsSMWL16M2NhYheKvWbOm+NrU1BTA5ydappeQkJDl8ulvtSIiIioNoqOjs51fWH0n07t16xZcXFzkzjsaNmyId+/e4dGjR7h//z4+fvyIevXqifP19fVha2ubL+uuWbMmNDQ0xDIXF5c8t0s5w8SiBGnevDlCQ0Nx584dJCcnY9OmTdDW1ka5cuWQlJSElJQUufq+vr549eoVNDU1oaKiAhUVFRw8eBCbNm0Sf/1QUlLK8CvG10+Y3LBhA4KDg9GgQQP4+/vDxsYG58+fz3X8qqqq4usvB6Ovf4XJLrFITU3F5cuXc71eIiKioubdu3dZTjt37hTrWVpaQllZOdM2lJWV0bdvX7my6OjoTNskyg9MLEoQbW1tWFtbo2LFilBR+f+73L50vg4PDxfLXr58icDAQGzfvh2hoaHidPXqVbx+/RpHjhwB8PnSY1xcnFxy8fXQsQDg5OQELy8vnDt3Dg4ODti2bRuAz5cYU1NT82X73r9/j8TExGyfdBkUFIShQ4dmuKeUiIioONHW1s5ySv9rfP/+/bO8jUkQBAwePDhH7eanatWqITg4WC6us2fPQldXF+bm5qhcuTJUVVXl+knEx8cjMjIy23ZVVVW/eU5RrVo1hIWFyV2pUeTHzsLk4+ODunXrQldXF8bGxujcubPYxyM7b968wfDhw2Fqagp1dXXY2Njg4MGDhRBx1phYlAJGRkaoXbs2zpw5I5b9/fffKFu2LH766Sc4ODiIk6OjI9q3by924m7WrBmeP3+OuXPn4t69e1i+fDn+/fdfsZ2oqCh4eXkhODgYDx48wJEjR3Dnzh2xn4WlpSWioqIQGhqKFy9e4MOHDwpvx/nz5+UOpl+TyWRQVlYWR6I4efKkwusiIiIqDqpWrQpfX18oKSlBWVlZ7l9fX19YW1sX2Lrj4+PlfpwMDQ3Fw4cPMWzYMDx8+BAjR47E7du3ERgYiOnTp8PT0xNKSkrQ1dVF3759MX78eJw4cQI3b96Eh4cHlJSUsv3x0NLSEseOHUNcXBxev36daZ2ePXtCJpNh4MCBCA8Px8GDBzF//vyC+gjyxcmTJzF8+HCcP38eR48excePH9G6detsO9WnpKSgVatWiI6ORkBAACIiIrB27VqYmZkVYuQZMbEoJQYMGICtW7eK79evXy8O2fa1rl27Yu/evXjx4gWqVauGFStWYPny5XB0dMTFixflRlfQ0tLC7du30bVrV9jY2GDQoEEYPny4+AtJ165d0bZtWzRv3hxGRkbw8/NTeBv8/PzQu3fvLA+g69evR2hoKNq0aQM7Ozs0bNhQXDY/O6UREREVJe7u7oiIiMD48ePx008/Yfz48YiIiIC7u3uBrjcoKAhOTk5y04wZM2BmZoaDBw/i4sWLcHR0xJAhQ+Dh4YEpU6aIyy5YsAAuLi7o0KEDXF1d0bBhQ1SrVi3bHxD/+usvHD16FBYWFnBycsq0jo6ODvbt24fr16/DyckJkydPxpw5c/J92/PToUOH4O7ujurVq8PR0REbN25ETEwMQkJCslxm/fr1ePXqFfbs2YOGDRvC0tISTZs2haOjYyFGnpFMKGVnXI8ePYKFhQUePnwIc3NzuXnv378XR0TKbscujpKTk2Frawt/f/9i2YnpxYsXsLW1xeXLl2FlZYW7d+/C19cX0dHRsLS0hIeHB6ytrcXv0MjICOXKlQPw+Xt1cXFBr169MGLEiBL33RIRUfFUks87cisxMRFmZmb466+/4OHhIXU4OZLd95fd+ea33L17F1WrVsX169fh4OCQaZ327dujTJky0NLSQmBgIIyMjNCzZ09MmDAhyz43hYHDzZYSmpqa2Lx5c6ZDzBUH0dHRWLFihTgMrrW1NXx8fLKsn36Yvi1btoiXaJcuXYqZM2eiV69ekv7HIyIiKs2uXr2K27dvo169eoiPj8fvv/8OAHBzc5M4svz19u1buYFn1NXVoa6unmX9tLQ0jB49Gg0bNswyqQCA+/fv4/jx4+jVqxcOHjyIu3fvYtiwYfj48SOmT5+er9uQG7wVqhRp1qxZpk/fLg6cnZ2zfbhedvr164cNGzbA3NwcMTEx6Nu3L2rXro1///2Xt0gRERFJZP78+XB0dISrqysSExNx+vRp8W6DksLe3h76+vrilN2PogAwfPhw3LhxA9u3b8+2XlpaGoyNjbFmzRrUqVMH3bt3x+TJk7Fq1ar8DD/XeMWCSjxlZWW4u7uje/fuWLp0KWbNmoWwsDC0b98eLVq0wL59+6ClpSV1mERERKWGk5NTtn0ISorw8HC5DtXZXa0YMWIE9u/fj1OnTn3z9ilTU1OoqqrK3X1RrVo1xMXFISUlBWpqankPXgG8YkGlhqamJn777Tfcv38fY8eOhZqaGrS0tJhUEBERUYHQ1dWFnp6eOGWWWAiCgBEjRmD37t04fvy4eNt3dho2bIi7d+/KPe8rMjISpqamkiUVABMLKoXKlCmD+fPnIzIyEosXLxbLnzx5gtGjR2d42jcRERFRQRk+fDi2bNmCbdu2QVdXF3FxcYiLi0NycrJYp0+fPvDy8hLfDx06FK9evcKvv/6KyMhIHDhwALNmzcLw4cOl2AQREwsqtSpVqoTKlSuL7729vbF48WJUqVIFv//+O59ESkREhSL9r85UfOTX97Zy5UrEx8ejWbNmMDU1FSd/f3+xTkxMDGJjY8X3FhYWOHz4MC5duoSaNWti1KhR+PXXXzFx4sR8iUlR7GNB9D89evTA1atXcfnyZUyfPh0rVqyAt7c3PDw8oKqqKnV4RERUwqipqUFJSQlPnjyBkZER1NTUsn1AHBUNgiAgJSUFz58/h5KSUp5vPcrJQDJBQUEZylxcXIrcU8WZWBD9T/PmzXHhwgXs2LEDkyZNwv379zF06FAsXLgQc+fOLXFD4BERkbSUlJRgZWWF2NhYPHnyROpwKJe0tLRQsWJFKCnxBqAvmFgQpaOkpITu3bvjhx9+wOrVq/H7778jMjISp0+fZmJBRET5Tk1NDRUrVsSnT5+QmpoqdTiUQ8rKylBRUeEVpq8wsShFevfujWrVqmHSpEn53ra3tzf27NmD0NBQAIC7uzvevHmDPXv25LntlJQU2NjYICAgAM7OznluLyfU1NQwcuRI9O3bFwsXLsTIkSPFeTdv3oRMJoO9vX2hxEJERCWbTCaDqqoqb7ulYo+JRQ49W+JaqOszHvVfruq7u7tj06ZNAABVVVVUrFgRffr0waRJk6CiooJr167h4MGDWLlypbhMs2bNcPLkSQCfT6TLlSuH2rVro1+/fujSpUv+bUweqampYdy4cZgwYQKOHTtWqOvW09OTe4KlIAgYPnw4Tp8+jX79+mHGjBly41MTERERlVa8KawEadu2LWJjY3Hnzh2MHTsW3t7emDdvHgBg6dKl6NatG3R0dOSWGThwIGJjY3Hv3j3s3LkT9vb2+PnnnzFo0CApNiFLvXr1wpkzZ3Dz5k1J40hKSkLZsmWRlpYGX19fVK1aFZMmTcKbN28kjYuIiIhIakwsShB1dXWYmJigUqVKGDp0KFxdXbF3716kpqYiICAAHTt2zLCMlpYWTExMYG5uju+++w5z5szB6tWrsXbtWvz33/9fNZkwYQJsbGygpaWFypUrY+rUqfj48WOOYzt06BAaNWoEAwMDlC1bFh06dMC9e/fE+SkpKRgxYgRMTU2hoaGBSpUqyT323tDQEA0bNvzmI+4Lmra2Nnbu3Ilz586hUaNGSE5Oho+PD6pUqYKFCxfiw4cPksZHREREJBUmFiWYpqYmUlJSEBYWhvj4+Bz3T+jbty8MDQ2xa9cusUxXVxcbN25EeHg4Fi9ejLVr12LhwoU5jiUxMRGenp64fPkyjh07BiUlJfzwww/iGNBLlizB3r178c8//yAiIgJbt26FpaWlXBv16tXD6dOnc7zOguTi4oJTp05h7969sLe3x6tXr+Dp6Sl54kNEREQkFfaxKIEEQcCxY8dw+PBhjBw5Eg8ePICysjKMjY1ztLySkhJsbGwQHR0tlk2ZMkV8bWlpiXHjxmH79u347bffctRm165d5d6vX78eRkZGCA8Ph4ODA2JiYlC1alU0atQIMpkMlSpVytBGhQoV8ODBgxytrzDIZDJ07NgR7dq1w6ZNm7B9+3b06tVLnP/8+XMYGRlJGCERERFR4eEVixJk//790NHRgYaGBtq1a4fu3bvD29sbycnJUFdXz9WQaIIgyNX39/dHw4YNYWJiAh0dHUyZMgUxMTE5bu/OnTvo0aMHKleuDD09PfFqxJc23N3dERoaCltbW4waNQpHjhzJ0IampiaSkpJyvM7CoqKiAg8PDxw9ehQqKp9z9Q8fPqBu3bpo1aoVrly5InGERERERAWPiUUJ0rx5c4SGhuLOnTtITk7Gpk2boK2tjXLlyiEpKQkpKSk5aic1NRV37tyBlZUVACA4OBi9evVC+/btsX//fly9ehWTJ0/OcXsA0LFjR7x69Qpr167FhQsXcOHCBQAQ26hduzaioqIwc+ZMJCcn46effsKPP/4o18arV6+KzRWA8+fP48mTJ/jvv/9Qp04d9OzZE1FRUVKHRURERFRgmFiUINra2rC2tkbFihXFX84BoFatWgCA8PDwHLWzadMmvH79Wrx96dy5c6hUqRImT54MZ2dnVK1aNVe3JL18+RIRERGYMmUKWrZsiWrVquH169cZ6unp6aF79+5Yu3Yt/P39sXPnTrx69Uqcf+PGDTg5OeV4vVJq2rQpIiIixFuj/Pz8YGtri9GjR+P58+cSR0dERESU/5hYlAJGRkaoXbs2zpw5k2FeUlIS4uLi8OjRI5w/fx4TJkzAkCFDMHToUDRv3hwAULVqVcTExGD79u24d+8elixZgt27d+d4/YaGhihbtizWrFmDu3fv4vjx4/D09JSrs2DBAvj5+eH27duIjIzEjh07YGJiAgMDA7HO6dOn0bp1a8U+BAlYWVlhy5YtuHLlClq3bo2PHz9i8eLFsLa2xuPHj6UOj4iIiChfMbEoJQYMGICtW7dmKF+7di1MTU1RpUoVdOnSBeHh4fD398eKFSvEOp06dcKYMWMwYsQI1KpVC+fOncPUqVNzvG4lJSVs374dISEhcHBwwJgxY8Tna3yhq6uLuXPnwtnZGXXr1kV0dDQOHjwIJaXPu2hwcDDi4+Mz3B5VHDg5OeHw4cM4evQonJyc0LhxYz5Uj4iIiEocmSAIgtRBFKZHjx7BwsICDx8+hLm5udy89+/fIyoqClZWVtDQ0JAowoKRnJwMW1tb+Pv7w8XFRepwcq179+5wdHTEpEmTsq1X1L/DtLQ0xMfHw9DQEAAQGxuLTp06YfLkyXBzc8tVB3siIiIqmrI73yzJeMWilNDU1MTmzZvx4sULqUPJtZSUFNSoUQNjxoyROpQ8U1JSEpMKAJg7dy4uX76MH374AY0bN8bZs2cljI6IiIhIcUwsSpFmzZpl+vTtok5NTQ1TpkyBpqam1KHkO29vb0yaNAmampo4e/YsGjVqhM6dO+PWrVtSh0ZERESUK0wsiCSkr6+PP//8E3fu3MGAAQOgpKSEwMBAODg4YNSoUVKHR0RERJRjTCyIigAzMzOsXbsWN27cgJubG9LS0qCqqip1WEREREQ5pvLtKkRUWKpVq4Y9e/bg7NmzsLOzE8tDQkJw5swZDB06FGpqahJGSERERJQ5XrEgKoIaNmyIsmXLAgAEQcC4ceMwevRo2NnZwc/PD2lpaRJHSERERCSPiQVREScIAnr06AETExNERUWhZ8+eqFevHo4dOyZ1aEREREQiJhZERZySkhIGDRqEu3fvYubMmdDV1UVISAhcXV3Rtm1bXLt2TeoQiYiIiIpGYrF8+XJYWlpCQ0MD9evXx8WLF7Osu3HjRshkMrmpKD4IjSi/aWtrY8qUKbh37x5GjRoFVVVVHD58GOfPn5c6NCIiIiLpEwt/f394enpi+vTpuHLlChwdHdGmTRs8e/Ysy2X09PQQGxsrTg8ePCjEiIuv3r17Y9asWVKH8U2rVq0qls/bKCxGRkZYvHgxbt++jTFjxsDDw0Ocd/nyZbx8+VLC6IiIiKi0kgmCIEgZQP369VG3bl0sW7YMAJCWlgYLCwuMHDkSEydOzFB/48aNGD16NN68eaPQ+rJ7xPr79+8RFRUFKyurDFdB2h71Umh9ijrUyidX9d3d3bFp0yYAgKqqKipWrIg+ffpg0qRJUFFRwbVr19CiRQs8ePAAL168gJWVVbbtbdiwAe7u7oqGnycpKSmwsrLC9u3b0bhx41wtm913WNJ9+PABdnZ2eP36NSZOnIhff/21RD5UkIiIqKjL7nyzJJP0ikVKSop4r/gXSkpKcHV1RXBwcJbLvXv3DpUqVYKFhQXc3Nxw8+bNLOt++PABCQkJ4vT27dt83YaipG3btoiNjcWdO3cwduxYeHt7Y968eQCApUuXolu3btDR0YGFhYXcFZ+xY8eievXqcmXdu3eXbDvU1NTQs2dPLFmyRLIYiqMnT55AX18f8fHx8PLyQtWqVbF+/XqkpqZKHRoRERGVApImFi9evEBqairKly8vV16+fHnExcVluoytrS3Wr1+PwMBAbNmyBWlpaWjQoAEePXqUaX0fHx/o6+uLk729fb5vR1Ghrq4OExMTVKpUCUOHDoWrqyv27t2L1NRUBAQEiLcXKSsrw8TERJx0dHSgoqIivjc2NsaiRYtgZWUFTU1NODo6IiAgAMDnEYpcXV3Rpk0bfLnY9erVK5ibm2PatGkAgNTUVHh4eIjL29raYvHixXKxBgUFoV69etDW1oaBgQEaNmwod0tbx44dsXfvXiQnJxfGR1ciWFlZ4cqVK9i8eTMqVqyIx48fw8PDA46Ojti/fz8kvjhJREREJZzkfSxyy8XFBX369EGtWrXQtGlT7Nq1C0ZGRli9enWm9b28vBAfHy9O4eHhhRyxdDQ1NZGSkoKwsDDEx8fD2dk5R8v5+Phg8+bNWLVqFW7evIkxY8bgl19+wcmTJyGTybBp0yZcunRJvKIwZMgQmJmZiYlFWloazM3NsWPHDoSHh2PatGmYNGkS/vnnHwDAp0+f0LlzZzRt2hRhYWEIDg7GoEGDIJPJxBicnZ3x6dMnXLhwIZ8/lZJNSUkJvXv3RkREBP766y8YGhri5s2b6NixI0JCQqQOj4iIiEowSZ+8Xa5cOSgrK+Pp06dy5U+fPoWJiUmO2lBVVYWTkxPu3r2b6Xx1dXWoq6uL7xMSEhQPuJgQBAHHjh3D4cOHMXLkSDx48ADKysowNjb+5rIfPnzArFmz8N9//8HFxQUAULlyZZw5cwarV69G06ZNYWZmhtWrV6NPnz6Ii4vDwYMHcfXqVaiofN6dVFVVMWPGDLFNKysrBAcH459//sFPP/2EhIQExMfHo0OHDqhSpQqAz0+cTk9LSwv6+vrsmK8gDQ0NeHp6on///pg9ezbu3r0rl1jGx8dDX19fwgiJiIiopJH0ioWamhrq1Kkj96CvtLQ0HDt2TDyp/ZbU1FRcv34dpqamBRVmsbF//37o6OhAQ0MD7dq1Q/fu3eHt7Y3k5GSoq6vLXRHIyt27d5GUlIRWrVpBR0dHnDZv3ox79+6J9bp164YffvgBs2fPxvz581G1alW5dpYvX446derAyMgIOjo6WLNmDWJiYgAAZcqUgbu7O9q0aYOOHTti8eLFiI2NzRCLpqYmkpKS8viplG4GBgaYPXs2duzYIZbFxcWJt8tldcshERERUW5JfiuUp6cn1q5di02bNuHWrVsYOnQoEhMT0a9fPwBAnz594OX1/yMy/f777zhy5Aju37+PK1eu4JdffsGDBw8wYMAAqTahyGjevDlCQ0Nx584dJCcnY9OmTdDW1ka5cuWQlJSElJSUb7bx7t07AMCBAwcQGhoqTuHh4WI/CwBISkpCSEgIlJWVcefOHbk2tm/fjnHjxsHDwwNHjhxBaGgo+vXrJ7f+DRs2IDg4GA0aNIC/vz9sbGwyPI/h1atXMDIyystHQv+TPqnctWsX4uPjsWrVKlhbW2P69OklelADIiIiKhySJxbdu3fH/PnzMW3aNNSqVQuhoaE4dOiQ2KE7JiZG7tfs169fY+DAgahWrRrat2+PhIQEnDt3rkR3ys4pbW1tWFtbo2LFiuJtSQBQq1YtAMhR/xJ7e3uoq6sjJiYG1tbWcpOFhYVYb+zYsVBSUsK///6LJUuW4Pjx4+K8s2fPokGDBhg2bBicnJxgbW0td7XjCycnJ3h5eeHcuXNwcHDAtm3bxHn37t3D+/fv4eTkpMhHQdkYNmwYTp48ifr16yMxMRG///47qlSpgmXLluUo+SQiIiLKjKR9LL4YMWIERowYkem8oKAgufcLFy7EwoULCyGqksPIyAi1a9fGmTNnxCQjK7q6uhg3bhzGjBmDtLQ0NGrUCPHx8Th79iz09PTQt29fHDhwAOvXr0dwcDBq166N8ePHo2/fvggLC4OhoSGqVq2KzZs34/Dhw7CyssLff/+NS5cuic/OiIqKwpo1a9CpUydUqFABERERuHPnDvr06SPGcfr0aVSuXFnsg0H5q0mTJggODsauXbvg5eWFO3fuYOTIkVi5ciVCQ0OhqqoqdYhERERUzEh+xYIKx4ABA7B169Yc1Z05cyamTp0KHx8fVKtWDW3btsWBAwdgZWWF58+fw8PDA97e3qhduzYAYMaMGShfvjyGDBkCABg8eDC6dOmC7t27o379+nj58iWGDRsmtq+lpYXbt2+ja9eusLGxwaBBgzB8+HAMHjxYrOPn54eBAwfm4ydAX5PJZOjatStu3ryJFStWoHz58mjdujWTCiIiIlKI5E/eLmyKPnm7uEtOToatrS38/f1z3DFeKjdv3kSLFi0QGRmZ65GLSvJ3WNDevXuH1NRU8TO/fPkypk+fjtmzZ6NGjRoSR0dERFR88MnbVKJpampi8+bNePHihdShfFNsbCw2b97M4VALmY6OjtxnPmXKFBw8eBCOjo5wd3cXR/UiIiIiygwTi1KkWbNm4tO3i7IvT/YmaS1btgw//fQTBEHApk2bYGNjg/Hjx+P169dSh0ZERFRi+Pj4oG7dutDV1YWxsTE6d+6MiIiIHC+/fft2yGQydO7cueCCzCEmFkSUKWtra/j7++PixYto1qwZPnz4gPnz56Ny5cpYs2aN1OERERGVCCdPnsTw4cNx/vx5HD16FB8/fkTr1q2RmJj4zWWjo6Mxbtw4NG7cuBAi/TYmFkSUrbp16+L48eM4cOAAHBwc8ObNG6SlpUkdFhERUYlw6NAhuLu7o3r16nB0dMTGjRsRExODkJCQbJdLTU1Fr169MGPGDFSuXLmQos0eEwsi+iaZTIb27dsjNDQUfn5+8PDwEOcdOnQIBw8eRCkbB4KIiKhAxMfHAwDKlCmTbb3ff/8dxsbGcn+TpVYknmNR1PDX2OKL313BUlZWxs8//yy+T0lJwbBhwxAVFYVmzZph7ty5qFu3roQREhERFR1v375FQkKC+F5dXR3q6upZ1k9LS8Po0aPRsGFDODg4ZFnvzJkz8PX1RWhoaH6Gm2dMLNJRU1ODkpISnjx5AiMjI6ipqUEmk0kdFuWAIAhISUnB8+fPoaSkBDU1NalDKhU+fvyIrl27YunSpQgKCkK9evXQrVs3zJo1C9bW1lKHR0REJCl7e3u599OnT4e3t3eW9YcPH44bN27gzJkzWdZ5+/YtevfujbVr16JcuXL5FWq+4HMsvpKSkoLY2FgkJSVJEB3llZaWFkxNTZlYFLKYmBhMmzYNmzdvhiAIUFFRweDBgzF16lSUL19e6vCIiIgK1ZfzzfDwcJiZmYnl2V2xGDFiBAIDA3Hq1ClYWVll2XZoaCicnJygrKwsln25Y0NJSQkRERGoUqVKPm1J7jCxyIQgCPj06RNSU1MLOTrKC2VlZaioqPAqk4TCwsIwceJE/PvvvwCA48ePo3nz5hJHRUREVLhy84A8QRAwcuRI7N69G0FBQahatWq29d+/f4+7d+/KlU2ZMgVv377F4sWLYWNjI9kPrLwVKhMymQyqqqpQVVWVOhSiYqVmzZo4ePAgTpw4gYMHD8olFZcvX4ajoyP/XxEREaUzfPhwbNu2DYGBgdDV1UVcXBwAQF9fH5qamgCAPn36wMzMDD4+PtDQ0MjQ/8LAwAAAsu2XURg4KhQR5bvmzZtj3rx54vunT5+iWbNmcHBwwM6dOzmCFBER0f+sXLkS8fHxaNasGUxNTcXJ399frBMTE4PY2FgJo8wZXrEgogJ3+/ZtaGlpITIyEj/++CPq16+PuXPnokmTJlKHRkREJKmc/NgWFBSU7fyNGzfmTzB5xCsWRFTgmjZtirt372LatGnQ1tbGhQsX0LRpU3Ts2BE3btyQOjwiIiLKB0wsiKhQ6OnpYcaMGbh79y6GDBkCZWVl7N+/H3Xr1sWrV6+kDo+IiIjyiIkFERUqExMTrFy5EuHh4ejatSuGDBki93TR5ORkCaMjIiIiRTGxICJJ2NjYICAgAH/99ZdYFhISAgsLCyxYsADv37+XMDoiIiLKLSYWRCQpJaX/PwytXr0aL1++xNixY2Fra4u///5bfOgPERERFW1MLIioyFixYgV8fX1hZmaGmJgY9OnTB7Vr18ahQ4c4RC0REVERx8SCiIoMFRUV9O/fH5GRkfDx8YG+vj6uXbuGdu3aoVevXlKHR0RERNlgYkFERY6WlhYmTpyIe/fuwdPTE2pqamjcuLHUYREREVE2mFgQUZFVtmxZ/PXXX4iMjMSAAQPE8oCAAIwaNQrPnz+XMDoiIiJKj4kFERV5lSpVgqqqKgDg06dPmDBhApYuXYoqVargzz//RGJiosQREhERERMLIipWVFRUsGbNGtSuXRtv377FlClTULVqVaxZswafPn2SOjwiIqJSi4kFERU7LVu2xKVLl7Bt2zZYWVkhNjYWgwcPRo0aNXDixAmpwyMiIiqVmFgQUbGkpKSEHj164NatW1i0aBHKli2L27dv87kXREREEmFiQUTFmrq6On799Vfcu3cPvr6+aNmypThvx44duHXrloTRERERlR5MLIioRNDX10f//v3F98+ePUP//v3h4OCAgQMH4smTJxJGR0REVPIxsSCiEunDhw9wdXVFWloa1q1bB2tra0yePBnx8fFSh0ZERFQiMbEgohLJwsICu3fvxpkzZ9CgQQMkJydj1qxZqFKlChYtWoQPHz5IHSIREVGJwsSCiEq0hg0b4syZM9i9ezfs7Ozw8uVL/Pbbb3j06JHUoREREZUoKlIHQERU0GQyGTp37owOHTpgw4YNiIuLQ5UqVcT5N27cgIODg4QREhERFX9MLIio1FBRUcHAgQPlyq5cuYI6deqgVatWmDNnDpycnCSKjoiIqHjjrVBEVKqFhIRAVVUVR48eRe3atfHLL78gOjpa6rCIiIiKHSYWRFSqDRw4ELdv30bPnj0BAFu3boWtrS08PT3x8uVLiaMjIiIqPphYEFGpV7lyZWzduhUhISFwdXVFSkoKFi5cCBcXFz7Jm4iIKIeYWBAR/U/t2rVx9OhRHD58GLVq1cKvv/4KJaXPh0lBEPDp0yeJIyQiIiq6mFgQEX2ldevWCAkJweDBg8WyHTt2wNHREXv37oUgCBJGR0REVDQxsSAiyoSSkhJUVP5/4LwFCxYgPDwcbm5uaNKkCYKDgyWMjoiIqOhhYkFElAOHDh3CxIkToaGhIT7Nu2vXroiIiJA6NCIioiKBiQURUQ4YGBjAx8cHd+7cQf/+/aGkpIRdu3ahevXqmDdvntThERERSY6JBRFRLpibm8PX1xdhYWHo2LEjUlNT+VA9IiIiMLEo8u7cuYMGDRrAxsYGdevWxc2bN7OsKwgCWrRoAQMDA7Hs3bt3aNOmDcqVKydX/jV3d3fIZDK8efMm/4InKsGqV6+OvXv3IjQ0FK6urmL50qVLsXTpUqSkpEgYHRERUeFjYlHEDR48GIMGDUJkZCQmTJgAd3f3LOsuXLgQVapUkStTVVXFhAkT8N9//2W53K5du6CqqppfIROVKo6OjuLr58+fY9KkSRg1ahTs7e3h7+/P52AQEVGpwcSiCHv27BkuX76MX375BQDQtWtXPHz4EHfv3s1Q9+bNm9izZw8mTpwoV66urp7hKkZ6T58+xaxZs7BgwYJ8j5+otDEwMMC8efNQvnx53Lt3Dz///DPq16+P48ePSx0aERFRgWNiUYQ9fPgQpqam4pCXMpkMFStWRExMjFy9jx8/YuDAgVi9ejWUlZVztY6BAwdi7ty50NXVzbe4iUorVVVVDBkyBHfv3sXvv/8OHR0dXL58GS1btkS7du1w584dqUMkIiIqMEwsSoAZM2agS5cuqFatWq6WW7duHSpWrIgWLVoUUGREpZOOjg6mTp2Ke/fuYcSIEVBRUcF///0HmUwmdWhEREQFholFEWZhYYHY2Fh8+vQJwOfO2TExMahYsaJcvZMnT2Lp0qWwtLREo0aNkJCQAEtLSzx//jzb9k+cOIHAwEBYWlrC0tISAFCzZk1cvXq1QLaHqLQxNjbG0qVLcevWLaxbtw7W1tbiPD8/P7x69UrC6IiIiPKXyrerkFSMjY1Ru3ZtbNmyBe7u7ti5cyfMzc3lTk4A4PTp0+Lr6Oho1KpVC9HR0d9sf+vWrXLvZTIZwsLCsh09iohyz9raWu7/bWhoKHr16gV9fX14eXlh5MiR0NTUlDBCIiKivOMViyJu9erVWL16NWxsbDB79mxs2LABADBgwADs3bs3R23UrFkTLi4uSEhIgLm5OXr37l2QIRPRN3z48AEODg548+YNJkyYABsbG2zYsAGpqalSh0ZERKQwmSAIgtRBFKZHjx7BwsICDx8+hLm5udThEFEplZqaiq1bt2LKlCl4+PAhAMDBwQGzZ89G+/bt2R+DiKgYy835po+PD3bt2oXbt29DU1MTDRo0wJw5c2Bra5vlMmvXrsXmzZtx48YNAECdOnUwa9Ys1KtXL1+3I7d4xYKISALKysro06cPIiMjMW/ePBgaGuLGjRvo168fkpKSpA6PiIgKycmTJzF8+HCcP38eR48excePH9G6dWskJiZmuUxQUBB69OiBEydOIDg4GBYWFmjdujUeP35ciJFnxCsWRERFwOvXrzF79mxUqVIFgwYNAvB5wIbo6GhYWVlJHB0REeVGXs43nz9/DmNjY5w8eRJNmjTJ0TKpqakwNDTEsmXL0KdPH0VCzhe8YkFEVAQYGhpizpw5YlIBAAEBAbCxscHw4cPx9OlTCaMjIiJFvH37FgkJCeL04cOHby4THx8PAChTpkyO15OUlISPHz/mapmCwMSCiKiIOnnyJD59+oQVK1bA2toaM2bMwLt376QOi4iIcsje3h76+vri5OPjk239tLQ0jB49Gg0bNoSDg0OO1zNhwgRUqFABrq6ueQ05T3grVCFr7RNY6OssSEe83KQOgahECwoKwm+//YZLly4B+DwM9fTp0zFw4ECoqqpKHB0REWXmy/lmeHg4zMzMxHJ1dXWoq6tnudzQoUPx77//4syZMzk+T509ezbmzp2LoKAg1KxZM8+x5wWvWBARFWHNmjXDhQsX8M8//8Da2hrPnj3D8OHD0aNHD6lDIyKib9DV1YWenp44ZZdUjBgxAvv378eJEydynFTMnz8fs2fPxpEjRyRPKgAmFkRERZ5MJkO3bt0QHh6O5cuXw9jYWK4vRlpamoTRERFRXgiCgBEjRmD37t04fvx4jgfsmDt3LmbOnIlDhw7B2dm5gKPMGSYWRETFhKqqKoYNG4aoqCi0bt1aLPfx8UGHDh1w/fp1CaMjIiJFDB8+HFu2bMG2bdugq6uLuLg4xMXFITk5WazTp08feHl5ie/nzJmDqVOnYv369bC0tBSXkbofHhMLIqJiRktLS3ydnJyMBQsW4MCBA3B0dES/fv3EB+4REVHRt3LlSsTHx6NZs2YwNTUVJ39/f7FOTEwMYmNj5ZZJSUnBjz/+KLfM/PnzpdgEkYqkayciojzR1NREcHAwJk+ejICAAGzcuBHbt2/HqFGjMHHiRBgaGkodIhERZSMn4ygFBQXJvY+Oji6YYPKoSFyxWL58OSwtLaGhoYH69evj4sWLOVpu+/btkMlk6Ny5c8EGSERUhNnY2GDHjh04f/48mjRpgvfv32Pu3LmoUqUK9u/fL3V4RERUSkieWPj7+8PT0xPTp0/HlStX4OjoiDZt2uDZs2fZLhcdHY1x48ahcePGhRQpEVHRVr9+fQQFBWHfvn2oXr063r59C1tbW6nDIiKiUkLyxGLBggUYOHAg+vXrB3t7e6xatQpaWlpYv359lsukpqaiV69emDFjBipXrlyI0RIRFW0ymQwdOnTAtWvXcOrUKVStWlWc9/vvv+Pff//N0WV3IiKi3JI0sUhJSUFISIjcUwKVlJTg6uqK4ODgLJf7/fffYWxsDA8Pj8IIk4io2FFWVoaLi4v4/tq1a/D29kb79u3RsmVLXL58WcLoiIioJJI0sXjx4gVSU1NRvnx5ufLy5csjLi4u02XOnDkDX19frF27Nkfr+PDhAxISEsTp7du3eY6biKi4sbCwgKenJ9TU1HDixAnUrVsXP//8M+7duyd1aEREVEJIfitUbrx9+xa9e/fG2rVrUa5cuRwt4+PjA319fXGyt7cv4CiJiIqeMmXKYP78+YiMjETv3r0hk8ng7++PatWqYdSoUXj9+rXUIRIRUTEnaWJRrlw5KCsr4+nTp3LlT58+hYmJSYb69+7dQ3R0NDp27AgVFRWoqKhg8+bN2Lt3L1RUVDL95c3Lywvx8fHiFB4eXmDbQ0RU1FWqVAmbN2/G1atX0bZtW3z8+BF+fn5QVlaWOjQiIirmJH2OhZqaGurUqYNjx46JQ8ampaXh2LFjGDFiRIb6dnZ2GZ4sO2XKFLx9+xaLFy+GhYVFhmXU1dWhrq4uvk9ISMjfjSAiKoYcHR3x77//4vjx43j58iX09PQAfB5PPSAgAD/88ANUVFRw584drF+/HtHR0bC0tET//v3lOoQTERF9IfkD8jw9PdG3b184OzujXr16WLRoERITE9GvXz8Anx9hbmZmBh8fH2hoaMDBwUFueQMDAwDIUE5ERN/WokULufe7du3CTz/9BFtbW7i6umLlypWQyWQQBAEymQxz586Fr68v3N3dpQmYiIiKLMkTi+7du+P58+eYNm0a4uLiUKtWLRw6dEjs0B0TEwMlpWLVFYSIqNhKSUlBuXLlEBERgYiIiEzreHh4oFGjRrC2ti7k6IiIqCiTCaVsQPNHjx7BwsICDx8+hLm5eaGvv7VPYKGvsyAd8XKTOgQiymcJCQlo164dzp07l+l8ZWVljB8/Hj4+PoUcGRFR8SD1+aZUeCmAiIjk6OnpoWLFilleLRYEAdHR0YUbFBERFXlMLIiIKANLS0vIZLJM58lkMlhaWhZuQEREVOQxsSAiogz69++PrO6UFQQBHh4ehRwREREVdUwsiIgog6pVq8LX1xdKSkpQVlaW+/ePP/6AlpaW1CESEVERI/moUEREVDS5u7ujUaNG8PX1FZ9jUb16dQwfPhy7d+/GyZMnoampKXWYRERURDCxICKiLFlbW8uN/nTv3j2oqKjg0qVLGDRoEDZv3pxlXwwiIipdeCsUERHlWJUqVbBjxw4oKytjy5YtmD9/vtQhERFREcHEgoiIcqVFixZYvHgxAGDChAk4ePCgxBEREVFRwMSCiIhybdiwYRg0aBAEQUCPHj1w69YtqUMiIiKJMbEgIqJck8lkWLp0KRo3boyEhATMmTNH6pCIiEhiTCyIiEghampq2LlzJ7y8vLBmzRqpwyEiIolxVCgiIlKYkZERZs2aJXUYRERUBPCKBRER5YtPnz7B09MTGzZskDoUIiKSAK9YEBFRvvDz88PChQuhpqYGW1tbNGjQQOqQiIioEPGKBRER5YtevXqha9euSElJQZcuXfDw4UOpQyIiokLExIKIiPKFkpISNm3aBEdHRzx9+hSdO3dGUlKS1GEREVEhYWJBRET5RltbG4GBgTAyMsKVK1fQv39/CIIgdVhERFQImFgQEVG+qlSpEnbu3AkVFRX4+/vDx8dH6pCIiKgQMLEgIqJ817hxYyxfvhxaWlqwtbWVOhwiIioEHBWKiIgKxKBBg/D999/DzMxM6lCIiKgQ8IoFEREVmPRJxaNHj/Dy5UsJoyEiooLExIKIiArchQsX4OzsjG7duuHjx49Sh0NERP/z/v37fGuLiQURERU4bW1tJCYm4sSJE/D09JQ6HCKiUi0tLQ0zZ86EmZkZdHR0cP/+fQDA1KlT4evrq3C7TCyIiKjAOTg4YOvWrZDJZFi2bBnWrFkjdUhERKXWH3/8gY0bN2Lu3LlQU1MTyx0cHLBu3TqF22ViQUREhaJTp06YOXMmAGD48OE4deqUxBEREZVOmzdvxpo1a9CrVy8oKyuL5Y6Ojrh9+7bC7TKxICKiQjNp0iR0794dnz59QteuXfHgwQOpQyIikpSPjw/q1q0LXV1dGBsbo3PnzoiIiPjmcjt27ICdnR00NDRQo0YNHDx4MMfrfPz4MaytrTOUp6Wl5akfHBMLIiIqNDKZDOvXr4eTkxNevHiByZMnSx0SEZGkTp48ieHDh+P8+fM4evQoPn78iNatWyMxMTHLZc6dO4cePXrAw8MDV69eRefOndG5c2fcuHEjR+u0t7fH6dOnM5QHBATAyclJ4W3hcyyIiKhQaWlpITAwEDNnzsSCBQukDoeISFKHDh2Se79x40YYGxsjJCQETZo0yXSZxYsXo23bthg/fjwAYObMmTh69CiWLVuGVatWfXOd06ZNQ9++ffH48WOkpaVh165diIiIwObNm7F//36Ft4VXLIiIqNBZWFhgzZo10NHRkToUIqIiJT4+HgBQpkyZLOsEBwfD1dVVrqxNmzYIDg7O0Trc3Nywb98+/Pfff9DW1sa0adNw69Yt7Nu3D61atVI4dl6xICIiSQmCAB8fH9jZ2aFLly5Sh0NElG/evn2LhIQE8b26ujrU1dWzrJ+WlobRo0ejYcOGcHBwyLJeXFwcypcvL1dWvnx5xMXF5Ti2xo0b4+jRozmunxO8YkFERJL6+++/MXnyZPTu3RvXrl2TOhwionxjb28PfX19cfLx8cm2/vDhw3Hjxg1s3769QOO6dOkSLly4kKH8woULuHz5ssLtMrEgIiJJ9ezZE61atUJSUhLc3Nzw/PlzqUMiIsoX4eHhiI+PFycvL68s644YMQL79+/HiRMnYG5unm27JiYmePr0qVzZ06dPYWJikqO4hg8fjocPH2Yof/z4MYYPH56jNjLDxIKIiCSloqKC7du3w9raGg8ePMCPP/6IlJQUqcMiIsozXV1d6OnpiVNmt0EJgoARI0Zg9+7dOH78OKysrL7ZrouLC44dOyZXdvToUbi4uOQorvDwcNSuXTtDuZOTE8LDw3PURmaYWBARkeTKlCmDvXv3QldXF6dOncKvv/4qdUhERIVi+PDh2LJlC7Zt2wZdXV3ExcUhLi4OycnJYp0+ffrIXe349ddfcejQIfz111+4ffs2vL29cfnyZYwYMSJH61RXV89wxQMAYmNjoaKieBdsJhZERFQkVKtWDX5+fpDJZFi1ahVWrlwpdUhERAVu5cqViI+PR7NmzWBqaipO/v7+Yp2YmBjExsaK7xs0aIBt27ZhzZo1cHR0REBAAPbs2ZNth+/0WrduDS8vL3EEKgB48+YNJk2axFGhiIioZPj+++8xe/ZsTJ48GaqqqlKHQ0RU4ARB+GadoKCgDGXdunVDt27dFFrn/Pnz0aRJE1SqVEl8IF5oaCjKly+Pv//+W6E2ASYWRERUxIwfPx4dO3ZEtWrVpA6FiKhEMjMzQ1hYGLZu3Ypr165BU1MT/fr1Q48ePfL0ow4TCyIiKlJkMplcUvHy5UuoqalBV1dXwqiIiEoWbW1tDBo0KF/bZGJBRERF1s2bN9GxY0fUrFkTu3btgpISuwYSEeWHO3fu4MSJE3j27BnS0tLk5k2bNk2hNplYEBFRkfXu3Ts8efIEUVFRmD59OmbOnCl1SERExd7atWsxdOhQlCtXDiYmJpDJZOI8mUymcGKh0E8/J06cUGhlREREuVG/fn2sWbMGAPDHH3/gn3/+kTgiIqLi748//sCff/6JuLg4hIaG4urVq+J05coVhdtVKLFo27YtqlSpgj/++CPTp/YRERHllz59+mDcuHEAAHd3d1y9elXiiIiIirfXr18rPKJUdhRKLB4/fowRI0YgICAAlStXRps2bfDPP//wSalERFQgZs+ejbZt2yI5ORlubm6ZPtiJiIhyplu3bjhy5Ei+t6tQH4ty5cphzJgxGDNmDK5cuYINGzZg2LBhGDZsGHr27AkPDw84Ojrmd6xERFRKKSsrw8/PD9999x0iIiIwduxYbNmyReqwiIiKJWtra0ydOhXnz59HjRo1MgwxO2rUKIXalQk5eSrHNzx58gRr1qzB7NmzoaKigvfv38PFxQWrVq1C9erV89p8vnr06BEsLCzw8OFDmJubF/r6W/sEFvo6C9IRLzepQyCiUiQyMhJeXl5Ys2YNypYtK3U4RESZkvp881usrKyynCeTyXD//n2F2lV4VKiPHz8iMDAQ69evx9GjR+Hs7Ixly5ahR48eeP78OaZMmYJu3bohPDxc0VUQERHJsbGxwc6dO6UOg4ioWIuKiiqQdhVKLEaOHAk/Pz8IgoDevXtj7ty5cHBwEOdra2tj/vz5qFChQr4FSkRE9LU1a9agcuXKcHV1lToUIqJiJyUlBVFRUahSpQpUVPL+FAqFOm+Hh4dj6dKlePLkCRYtWiSXVHxRrlw5DktLREQFZuvWrRg8eDB++ukn3LlzR+pwiIiKjaSkJHh4eEBLSwvVq1dHTEwMgM8XD2bPnq1wuwolFtOnT0e3bt2grq4uV/7p0yecOnUKAKCiooKmTZsqHBgREVF2unbtiu+++w6vX7+Gm5sb4uPjpQ6JiKhY8PLywrVr1xAUFAQNDQ2x3NXVFf7+/gq3q1Bi0bx5c7x69SpDeXx8PJo3b65wMERERDmloaGBXbt2wczMDLdu3UKvXr2QmpoqdVhEREXenj17sGzZMjRq1EjuqdvVq1fHvXv3FG5XocRCEAS5IL54+fIltLW1FQ6GiIgoN0xNTbFnzx5oaGjgwIEDmDx5stQhEREVec+fP4exsXGG8sTExEzP8XMqV700unTpAuDzMFTu7u5yt0KlpqYiLCwMDRo0UDgYIiKi3HJ2dsb69evRs2dPzJkzBzVq1ECvXr2kDouIqMhydnbGgQMHMHLkSAAQk4l169bBxcVF4XZzlVjo6+sD+HzFQldXF5qamuI8NTU1fPfddxg4cKDCwRARESmiR48eCAsLw5w5cxAbGyt1OERERdqsWbPQrl07hIeH49OnT1i8eDHCw8Nx7tw5nDx5UuF2c5VYbNiwAQBgaWmJcePG8bYnIiIqMv744w907NiRV86JiL6hUaNGuHbtGnx8fFCjRg0cOXIEtWvXRnBwMGrUqKFwuwoNWDt9+nSFV0hERFQQlJWV5ZKKd+/eQUVFRW7EEyKi0u7jx48YPHgwpk6dirVr1+Zr2zlOLGrXro1jx47B0NAQTk5O2XbsuHLlSr4ER0REpIjo6Gi4ubnB0dERmzZtylNnRCKikkRVVRU7d+7E1KlT873tHCcWbm5uYmftzp0753sgRERE+eX+/fu4efMmwsLC4OjoiLFjx0odEhFRkdG5c2fs2bMHY8aMydd2c5xYpL/9ibdCERFRUdaiRQssXLgQo0aNwm+//QZ7e3u0a9dO6rCIiIqEqlWr4vfff8fZs2dRp06dDP2mR40apVC7CvWxICIiKupGjBiBsLAwrFu3Dj///DMuXLgAOzs7qcMiIpKcr68vDAwMEBISgpCQELl5Mpms4BMLQ0PDHN+jmtlTuYmIiAqTTCbD8uXLcevWLZw9exZubm44f/48DA0NpQ6NiEhSUVFRBdJujhOLRYsWFUgAREREBUVNTQ07d+5E3bp1ERkZiVGjRuHvv/+WOiwioiIhJSUFUVFRqFKlClRU8n4jU45b6Nu3b55XRkREVNjKly+PwMBAjB49Gj4+PlKHQ0QkuaSkJIwcORKbNm0CAERGRqJy5coYOXIkzMzMMHHiRIXaVcppxYSEBLnX2U25tXz5clhaWkJDQwP169fHxYsXs6y7a9cuODs7w8DAANra2qhVqxZ/fSIiomw5OTkhKCgI5ubmUodCRCQ5Ly8vXLt2DUFBQXLP+nF1dYW/v7/C7eaqj0VsbCyMjY1hYGCQaX8LQRAgk8mQmpqa4wD8/f3h6emJVatWoX79+li0aBHatGmDiIgIGBsbZ6hfpkwZTJ48GXZ2dlBTU8P+/fvRr18/GBsbo02bNjleLxERlS7p/27t3LkTZmZm+O677ySMiIhIGnv27IG/vz++++47uWNj9erVce/ePYXbzXFicfz4cZQpUwYAcOLECYVX+LUFCxZg4MCB6NevHwBg1apVOHDgANavX5/pZZhmzZrJvf/111+xadMmnDlzhokFERF90z///IPu3bvDxMQEly5d4lUMIip1nj9/nukP+ImJiXl6oGiOE4umTZtm+jovUlJSEBISAi8vL7FMSUkJrq6uCA4O/ubygiDg+PHjiIiIwJw5c/IlJiIiKtnat2+PGjVq4Pr16+jcuTNOnz4NTU1NqcMiIio0zs7OOHDgAEaOHAng/6/orlu3Di4uLgq3q3D379evX8PX1xe3bt0CANjb26Nfv37iVY2cePHiBVJTU1G+fHm58vLly+P27dtZLhcfHw8zMzN8+PABysrKWLFiBVq1apVp3Q8fPuDDhw/i+7dv3+Y4PiIiKnl0dHQQGBiIunXrIiQkBB4eHti6dWuefqUjIipOZs2ahXbt2iE8PByfPn3C4sWLER4ejnPnzuHkyZMKt5vjztvpnTp1CpaWlliyZAlev36N169fY8mSJbCyssKpU6cUDiandHV1ERoaikuXLuHPP/+Ep6cngoKCMq3r4+MDfX19cbK3ty/w+IiIqGizsrJCQEAAVFRU4Ofnx6veRFSqNGrUCKGhofj06RNq1KiBI0eOwNjYGMHBwahTp47C7coEQRByu1CNGjXg4uKClStXQllZGQCQmpqKYcOG4dy5c7h+/XqO2klJSYGWlhYCAgLQuXNnsbxv37548+YNAgMDc9TOgAED8PDhQxw+fDjDvK+vWDx+/Bj29vZ4+PChJPfVtvbJ2TYVF0e83KQOgYhIYStXrsSwYcMgk8mwd+9edOjQQeqQiKgEePToESwsLCQ738yMp6cnZs6cCW1tbZw6dQoNGjTIl2dXpKfQFYu7d+9i7NixYlIBAMrKyvD09MTdu3dz3I6amhrq1KmDY8eOiWVpaWk4duxYru7vSktLk0se0lNXV4eenp446erq5rhdIiIq2YYOHYohQ4ZAEARcuHBB6nCIiArM0qVL8e7dOwBA8+bN8erVq3xfh0JpSu3atXHr1i3Y2trKld+6dQuOjo65asvT0xN9+/aFs7Mz6tWrh0WLFiExMVEcJapPnz4wMzMTH2rk4+MDZ2dnVKlSBR8+fMDBgwfx999/Y+XKlYpsChERlXJLlizB999/z6sVRFSifenG0Lp1awiCgODgYBgaGmZat0mTJgqtI8eJRVhYmPh61KhR+PXXX3H37l1xDPDz589j+fLlmD17dq4C6N69O54/f45p06YhLi4OtWrVwqFDh8QO3TExMVBS+v8LK4mJiRg2bBgePXoETU1N2NnZYcuWLejevXuu1ktERAQAqqqqcklFSkoKlJSU8v0WASIiKc2bNw9DhgyBj48PZDIZfvjhh0zr5faZdHLL5rSPhZKSEmQyGb5VPS/BFAap73ljHwsioqLr2bNn+PHHH+Hk5ITFixdLHQ4RFVNSn29m5927d9DT08vyYdQAoK+vr1DbOe5jERUVhfv37yMqKirb6f79+woFQkREJLULFy7g9OnTWLJkCdatWyd1OERUSpw6dQodO3ZEhQoVIJPJsGfPnm8us3XrVjg6OkJLSwumpqbo378/Xr58mWV9T09PJCYmQkdHBydOnICVlZXcyKnpJ0XlOLGoVKlSjiciIqLiqGPHjpgxYwYAYNiwYThz5ozEERFRaZCYmAhHR0csX748R/XPnj2LPn36wMPDAzdv3sSOHTtw8eJFDBw4MMtl0nfebtGiRdHpvP1FeHg4YmJikJKSIlfeqVOnPAVFREQklSlTpuD69esICAhA165dcenSJVSsWFHqsIioBGvXrh3atWuX4/rBwcGwtLTEqFGjAHx+Ns/gwYOzfSZPkeq8nd79+/fxww8/4Pr163L9Lr48tbQo97EgIiLKjpKSEjZu3Ig7d+7g2rVrcHNzw5kzZ6CtrS11aERUzLx9+xYJCQnie3V1dairq+e5XRcXF0yaNAkHDx5Eu3bt8OzZMwQEBKB9+/ZZLlMYnbcVeo7Fr7/+CisrKzx79gxaWlq4efMmTp06BWdn5yyfgE1ERFRcaGtrIzAwEEZGRggNDRV/FSQiyg17e3u5vgtfHp+QVw0bNsTWrVvRvXt3qKmpwcTEBPr6+tneStW5c2fExcUhISEBgiAgIiICr1+/zjDl5RYpha5YBAcH4/jx4yhXrhyUlJSgpKSERo0awcfHB6NGjcLVq1cVDoiIiKgoqFSpEnbt2oWhQ4di/PjxUodDRMVQeHg4zMzMxPf5cbXiS7u//vorpk2bhjZt2iA2Nhbjx4/HkCFD4Ovrm+2y6Ttv5/ew2gq1lpqaKj7Buly5cnjy5AlsbW1RqVIlRERE5GuAREREUmnUqBFCQ0OhrKwsdShEVAzp6upCT08v39v18fFBw4YNxR89atasCW1tbTRu3Bh//PEHTE1NMyyTkJAgxuLk5ISkpKQs21c0ZoUSCwcHB1y7dg1WVlaoX78+5s6dCzU1NaxZswaVK1dWKBAiIqKiKH1Scfz4cRgZGaFGjRoSRkREpV1SUlKGqw1fjlVZPXPO0NAQsbGxMDY2hoGBgdg3Oj1BEPLUx0KhxGLKlClITEwEAPz+++/o0KEDGjdujLJly8Lf31+hQIiIiIqyPXv24Mcff4SFhQUuXbqEcuXKSR0SEZUQ7969w927d8X3UVFRCA0NRZkyZVCxYkV4eXnh8ePH2Lx5M4DPQ2MPHDgQK1euFG+FGj16NOrVq4cKFSpkuo7jx4+jTJkyAIATJ04UyHYolFi0adNGfG1tbY3bt2/j1atXMDQ0zDT7ISIiKu4aN26MSpUq4f79++jWrRuOHDkCVVVVqcMiohLg8uXLaN68ufje09MTANC3b19s3LgRsbGxiImJEee7u7vj7du3WLZsGcaOHQsDAwO0aNEi2+FmmzZtmunr/CQTsrpekkMPHz4EAFhYWORLQAVN6kest/YJLPR1FqQjXm5Sh0BEVGhu3ryJ7777Du/evcOwYcNy/DArIipdpD7fzExYWFiO69asWVOhdSh0xeLTp0+YMWMGlixZIj7BT0dHByNHjsT06dP5Cw4REZVI1atXx7Zt2+Dm5oYVK1agRo0aGDJkiNRhERF9U61atcTnz33rDqNCfY7FyJEjsWbNGsydOxdXr17F1atXMXfuXPj6+nKsbyIiKtE6duyIWbNmAfj89/DkyZMSR0RE9G1RUVG4f/8+oqKisHPnTlhZWWHFihXiufyKFStQpUoV7Ny5U+F1KHTFYtu2bdi+fbvco8dr1qwJCwsL9OjRAytXrlQ4ICIioqJuwoQJCAsLg5+fH3bv3l1g9ysTEeWXSpUqia+7deuGJUuWyD2p+8u5/NSpU9G5c2eF1qFQYqGurg5LS8sM5VZWVlBTU1MoECIiouJCJpPB19cXbdq0QZ8+faQOh4goV65fvw4rK6sM5VZWVggPD1e4XYVuhRoxYgRmzpyJDx8+iGUfPnzAn3/+iREjRigcDBERUXGhqamJvn37ivcqp6WlIS0tTeKoiIi+rVq1avDx8UFKSopYlpKSAh8fH1SrVk3hdnN8xaJLly5y7//77z+Ym5vD0dERAHDt2jWkpKSgZcuWCgdDRERUHCUkJOCXX35B7dq14e3tLXU4RETZWrVqFTp27Ahzc3NxBKiwsDDIZDLs27dP4XZznFjo6+vLve/atavc++Iy3CwREVF+O3LkCPbt24d9+/bBwcEBP/74o9QhERFlqV69erh//z62bt2K27dvAwC6d++Onj17QltbW+F2c5xYbNiwQeGVEBERFaY7d+6gb9++ePHiBfT19bFx40ZUr15drs7x48cxceJEvHv3DjKZDN9//z1mz54NJSX5u4Td3d2xadMmvH79GgYGBgCAv//+G/Pnz0dqairKly+PDRs2wNPTEwsWLEDfvn1hbW2NWrVqFdLWEhHlnra2NgYNGpSvbSrUx+KL58+f48yZMzhz5gyeP3+eXzERERHlyeDBgzFo0CBERkZiwoQJcHd3z1DH0NAQ27dvR3h4OEJCQnDu3Dls3rxZrs6uXbsyPJvp9u3bGD9+PA4dOoQbN26gX79+GDp0KObMmYM2bdogKSkJbm5uePbsWUFuIhFRkaNQYpGYmIj+/fvD1NQUTZo0QZMmTVChQgV4eHggKSkpv2MkIiLKsWfPnuHy5cv45ZdfAHy+dffhw4e4e/euXD0nJydUrlwZAKChoYFatWohOjpanP/06VPMmjULCxYskFvuxo0bqFmzJkxNTQEA7du3x7///ov4+Hhs374dNjY2iImJQdeuXeU6RhIRlXQKJRaenp44efIk9u3bhzdv3uDNmzcIDAzEyZMnMXbs2PyOkYiIKMcePnwIU1NTqKh8vttXJpOhYsWKiImJyXKZuLg4BAQEoEOHDmLZwIEDMXfuXOjq6srVdXR0xJUrVxAZGQkA2LJlCwRBwIMHD2BgYIC9e/dCX18fZ86cwZgxYwpgC4mIiiaFEoudO3fC19cX7dq1g56eHvT09NC+fXusXbsWAQEB+R0jERFRgUlISEDHjh3x22+/wdnZGQCwbt06VKxYES1atMhQv2rVqli1ahX69OkDZ2dnvHz5EgYGBmIiY2trCz8/P1hYWGR6CxYRUUmlUGKRlJSE8uXLZyg3NjbmrVBERCQpCwsLxMbG4tOnTwAAQRAQExODihUrZqj79u1btG3bFm5ubvD09BTLT5w4gcDAQFhaWooPhK1ZsyauXr0KAPjxxx9x/vx5XL58GUOHDkVycjKsra3F5du1a4fIyEjUrVu3ALeUiEgxlStXxsuXLzOUv3nzRrxFVBEKJRYuLi6YPn063r9/L5YlJydjxowZcHFxUTgYIiKivDI2Nkbt2rWxZcsWAJ+vspubm8ud+APAu3fv0LZtW7Rt2xZTpkyRm7d161Y8fPgQ0dHRYr+LsLAwODk5AQBiY2MBAKmpqZgwYQKGDx8OLS0tuTY0NDTE1yEhIbh3716+bicRkaKio6ORmpqaofzDhw94/Pixwu3meLjZ9BYtWoS2bdtmeECehoYGDh8+rHAwRERE+WH16tVwd3fHrFmzoKenJw6ZPmDAAHTq1AmdOnXC4sWLcfHiRSQmJmLXrl0AgG7dumHy5MnfbL9///548OABPnz4gO+//x6zZs3Ksu7hw4fRuXNnVK5cGcHBwdDT08ufjSQiyqW9e/eKrw8fPiz3nLrU1FQcO3ZMvEqrCJkgCIIiCyYlJck9VKNatWro1asXNDU1FQ6mMDx69AgWFhZ4+PAhzM3NC339rX0CC32dBemIl5vUIRARFWlPnjxB3bp18eTJE3Ts2BF79uzJ8KwMIipZpD7fzMqXY49MJsPXKYCqqiosLS3x119/yQ1kkRu5vmLx8eNH2NnZYf/+/Rg4cKBCKyUiIiotKlSogD179qBx48bYt28fpk6dij///FPqsIioFEpLSwMAWFlZ4dKlSyhXrly+tp/rn0xUVVXl+lYQERFR9urWrQtfX18AwKxZs+Dn5ydxRERUmkVFReV7UgEo2Mdi+PDhmDNnDtatWycOr0dERERZ69WrF8LCwjB37lz0798fNjY2qFOnjtRhEVEpsWTJEgwaNAgaGhpYsmRJtnVHjRql0DoUygouXbqEY8eO4ciRI6hRowa0tbXl5n/pBEdERET/b9asWbhx4wYOHjyI1atXY82aNVKHRESlxMKFC9GrVy9oaGhg4cKFWdaTyWSFm1gYGBiga9euCq2QiIgot9oe9ZI6hHzzcaANbM2TsXLFSqlDIaJSJCoqKtPX+SlXiUVaWhrmzZuHyMhIpKSkoEWLFvD29i7yI0EREREVFaraGrD8sT6UlZUBQByZRSaTSRkWEVGe5Sqx+PPPP+Ht7Q1XV1doampiyZIleP78OdavX19Q8REREZVYHz58wJAhQ+Do6IjRo0dLHQ4RlRKenp6ZlstkMmhoaMDa2hpubm4oU6ZMrtrNVWKxefNmrFixAoMHDwYA/Pfff/j++++xbt06jslNRESUSzt37sTGjRuhpKQEe3t7tG7dWuqQiKgUuHr1Kq5cuYLU1FTY2toCACIjI6GsrAw7OzusWLECY8eOxZkzZ2Bvb5/jdnOVDcTExKB9+/bie1dXV8hkMjx58iQ3zRARERGAHj16oH///khLS0P37t0RGRkpdUhEVAq4ubnB1dUVT548QUhICEJCQvDo0SO0atUKPXr0wOPHj9GkSROMGTMmV+3mKrH49OkTNDQ05MpUVVXx8ePHXK2UiIiIPt92sGLFCjRo0ABv3rxBp06dEB8fL3VYRFTCzZs3DzNnzoSenp5Ypq+vD29vb8ydOxdaWlqYNm0aQkJCctVurm6FEgQB7u7uUFdXF8vev3+PIUOGyA05y+FmiYiIckZdXR07d+5E3bp1ERERgR49emDfvn1i524iovwWHx+PZ8+eZbjN6fnz50hISADweRTYlJSUXLWbqysWffv2hbGxMfT19cXpl19+QYUKFeTKiIiIKOdMTEwQGBgITU1N/Pvvv5g0aZLUIRFRCebm5ob+/ftj9+7dePToER49eoTdu3fDw8MDnTt3BgBcvHgRNjY2uWo3V1csNmzYkKvGiYiIKGdq166NDRs2YOjQoWjVqpXU4RBRCbZ69WqMGTMGP//8Mz59+gQAUFFRQd++fcWH59nZ2WHdunW5alehB+QRERFR/uvevTtat24NQ0NDqUMhohJMR0cHa9euxcKFC3H//n0AQOXKlaGjoyPWqVWrVq7bZWJBRERUhKRPKiIjI6Gjo4MKFSpIGBERlVQ6OjrisyrSJxWK4sMniIiIiqCTJ0+iXr166Ny5M5KTk6UOh4hKkLS0NPz+++/Q19dHpUqVUKlSJRgYGGDmzJlIS0tTuF0mFkREREWQhYUFlJWVcenSJQwaNAiCIEgdEhGVEJMnT8ayZcswe/ZsXL16FVevXsWsWbOwdOlSTJ06VeF2eSsUERFREVS5cmXs2LEDrVu3xpYtW1CzZk2MHz9e6rCIqATYtGkT1q1bh06dOollNWvWhJmZGYYNG4Y///xToXZ5xYKIiKiIatGiBRYvXgwAmDBhAg4ePChxRERUErx69Qp2dnYZyu3s7PDq1SuF22ViQUREVIQNGzZMvBWqR48euH37ttQhEVEx5+joiGXLlmUoX7ZsGRwdHRVul4kFERFRESaTybB06VI0btwYCQkJ8PHxkTokIspnp06dQseOHVGhQgXIZDLs2bPnm8t8+PABkydPRqVKlaCurg5LS0usX78+R+ubO3cu1q9fD3t7e3h4eMDDwwP29vbYuHEj5s2bp/B2MLEgIiIq4tTU1LBz505MnDgRa9askTocIspniYmJcHR0xPLly3O8zE8//YRjx47B19cXERER8PPzg62tbY6Wbdq0KSIjI/HDDz/gzZs3ePPmDbp06YKIiAg0btxY0c1g520iIqLiwMjIiFcriEqodu3aoV27djmuf+jQIZw8eRL3798Xn0NhaWmZq3VWqFAhQyftR48eYdCgQQr/gMErFkRERMVMamoqxo8fjw0bNkgdChFJYO/evXB2dsbcuXNhZmYGGxsbjBs3Ls/PvHn58iV8fX0VXp5XLIiIiIqZbdu2Yf78+VBTU4OtrS0aNGggdUhElIm3b98iISFBfK+urg51dfU8t3v//n2cOXMGGhoa2L17N168eIFhw4bh5cuXkv7gwCsWRERExUyvXr3QpUsXpKSkoEuXLnj48KHUIRFRJuzt7aGvry9O+XU7Y1paGmQyGbZu3Yp69eqhffv2WLBgATZt2pTnqxZ5wSsWRERExYySkhI2bdqEu3fvIiwsDJ07d8bp06ehpaUldWhElE54eDjMzMzE9/lxtQIATE1NYWZmBn19fbGsWrVqEAQBjx49QtWqVfNlPbnFxIKIiKgY0tHRQWBgIOrWrYsrV66gf//+8PPzg0wmkzo0IvofXV1d6Onp5Xu7DRs2xI4dO/Du3Tvo6OgAACIjI6GkpARzc/Msl+vSpUu27b558yZPcfFWKCIiomLK0tISO3fuhIqKCvz9/TlqFFEx9e7dO4SGhiI0NBQAEBUVhdDQUMTExAAAvLy80KdPH7F+z549UbZsWfTr1w/h4eE4deoUxo8fj/79+0NTUzPL9aS/LSuzqVKlSnLryS1esSAiIirGmjRpguXLl2P06NGwtraWOhwiUsDly5fRvHlz8b2npycAoG/fvti4cSNiY2PFJAP4fMXy6NGjGDlyJJydnVG2bFn89NNP+OOPP7JdT0F37GZiQUREVMwNGjQI7dq1g4WFhdShEJECmjVrBkEQspy/cePGDGV2dnY4evRoAUaVe7wVioiIqARIn1Q8fvwYL1++lDAaIiqNmFgQERGVIJcuXULdunXRrVs3fPz4UepwiKgUYWJBRERUgmhqauLt27c4ceKEeJ82EVFhYGJBRERUgjg4OGDLli0AgGXLlmHNmjUSR0REpUWRSCyWL18OS0tLaGhooH79+rh48WKWddeuXYvGjRvD0NAQhoaGcHV1zbY+ERFRaePm5iaODjN8+HCcOnVK4oiIqDSQPLHw9/eHp6cnpk+fjitXrsDR0RFt2rTBs2fPMq0fFBSEHj164MSJEwgODoaFhQVat26Nx48fF3LkRERERdekSZPQvXt3fPr0CV27dsWDBw+kDomISjjJE4sFCxZg4MCB6NevH+zt7bFq1SpoaWlh/fr1mdbfunUrhg0bhlq1asHOzg7r1q1DWloajh07VsiRExERFV0ymQzr16+Hk5MTXrx4AS8vL6lDIqISTtLEIiUlBSEhIXB1dRXLlJSU4OrqiuDg4By1kZSUhI8fP6JMmTIFFSYREVGxpKWlhcDAQHh4eGD16tVSh0NEJZykD8h78eIFUlNTUb58ebny8uXL4/bt2zlqY8KECahQoYJccpLehw8f8OHDB/H927dvFQ+YiIiomLGwsMC6deukDoOISgHJb4XKi9mzZ2P79u3YvXs3NDQ0Mq3j4+MDfX19cbK3ty/kKImIiIoGQRAwZ84c7Nq1S+pQiKgEkjSxKFeuHJSVlfH06VO58qdPn8LExCTbZefPn4/Zs2fjyJEjqFmzZpb1vLy8EB8fL07h4eH5EjsREVFxs3XrVkycOBG9e/fGtWvXpA6HiEoYSRMLNTU11KlTR67j9ZeO2C4uLlkuN3fuXMycOROHDh2Cs7NztutQV1eHnp6eOOnq6uZb/ERERMXJzz//DFdXVyQlJcHNzQ3Pnz+XOiQiKkEkvxXK09MTa9euxaZNm3Dr1i0MHToUiYmJ6NevHwCgT58+ciNZzJkzB1OnTsX69ethaWmJuLg4xMXF4d27d1JtAhERUbGgoqICf39/WFtb48GDB/jxxx+RkpIidVhEVEJInlh0794d8+fPx7Rp01CrVi2Ehobi0KFDYofumJgYxMbGivVXrlyJlJQU/PjjjzA1NRWn+fPnS7UJRERExUaZMmWwd+9e6Orq4tSpUxg5ciQEQZA6LCIqASQdFeqLESNGYMSIEZnOCwoKknsfHR1d8AERERGVYNWqVYOfnx86duyINWvWwNHREcOGDZM6LCIq5iS/YkFERESF7/vvv4ePjw9UVFSgrKwsdThEVAIUiSsWREREVPh+++03dOjQAdWrV5c6FCIqAXjFgoiIqJSSyWRyScWrV6/4IFkiUhgTCyIiIkJ4eDjq1auH3r17Iy0tTepwiKgYYmJBREREePv2LR49eoTAwEBMnz5d6nCIqBhiYkFERESoX78+1qxZAwD4448/4O/vL3FERFTcMLEgIiIiAJ8fSjt27FgAQL9+/XDlyhWJIyKi4oSJBREREYnmzJmDtm3bIjk5GZ07d8bTp0+lDomIigkmFkRERCRSVlaGn58fbGxs8PDhQ4wZM0bqkIiomGBiQURERHIMDAywd+9euLm5YcmSJVKHQ0TFBB+QR0RERBnY2tpiz549UodBRMUIr1gQERHRN/n6+uLo0aNSh0FERRivWBAREVG2/Pz8MGDAABgaGuLChQuoWrWq1CERURHEKxZERESUrR9++AHfffcdXr9+DTc3NyQkJEgdEhEVQUwsiIiIKFsaGhrYtWsXzMzMcOvWLfTs2ROpqalSh0VERQwTCyIiIvomU1NT7NmzBxoaGjhw4ACmTJkidUhEVMQwsSAiIqIccXZ2xvr16wEAs2fPxrZt2ySOiIiKEiYWRERElGM9evSAl5cXAODhw4cSR0NERQlHhSIiIqJc+eOPP9C+fXs0atRI6lCIqAjhFQsiIiLKFSUlJbmkIjExEe/fv5cwIiIqCphYEBERkcJiYmLQqFEjDBo0CIIgSB0OEUmIiQUREREp7O7du7h+/Tr+/vtvLFiwQOpwiEhCTCyIiIhIYS1atMDChQsBAL/99hv+/fdfiSMiKn5OnTqFjh07okKFCpDJZNizZ0+Olz179ixUVFRQq1atAosvp5hYEBERUZ6MGDECAwYMQFpaGnr06IGIiAipQyIqVhITE+Ho6Ijly5fnark3b96gT58+aNmyZQFFljscFYqIiIjyRCaTYfny5bh9+zbOnDmDTp064cKFCzAwMJA6NKJioV27dmjXrl2ulxsyZAh69uwJZWXlXF3lKCi8YkFERER5pqamhp07d8LCwgKRkZEYMWKE1CERSe7t27dISEgQpw8fPuRb2xs2bMD9+/cxffr0fGszr5hYEBERUb4wNjbG3r170bBhQ8yaNUvqcIgkZ29vD319fXHy8fHJl3bv3LmDiRMnYsuWLVBRKTo3IBWdSIiIiKjYq1WrFk6fPg2ZTCZ1KESSCw8Ph5mZmfheXV09z22mpqaiZ8+emDFjBmxsbPLcXn5iYkFERET5Kn1SsXv3bpiYmMDFxUXCiIikoaurCz09vXxt8+3bt7h8+TKuXr0q3nKYlpYGQRCgoqKCI0eOoEWLFvm6zpxiYkFEREQFIiAgAN26dYOJiQkuXboEc3NzqUMiKvb09PRw/fp1ubIVK1bg+PHjCAgIgJWVlUSRMbEgIiKiAtK2bVvUqFED169fR+fOnXH69GloampKHRZRkfPu3TvcvXtXfB8VFYXQ0FCUKVMGFStWhJeXFx4/fozNmzdDSUkJDg4OcssbGxtDQ0MjQ3lhY+dtIiIiKhA6OjoIDAxE2bJlERISAg8PDwiCIHVYREXO5cuX4eTkBCcnJwCAp6cnnJycMG3aNABAbGwsYmJipAwxR5hYEBERUYGxsrJCQEAAVFRU4Ofnhzlz5kgdElGR06xZMwiCkGHauHEjAGDjxo0ICgrKcnlvb2+EhoYWSqzZYWJBREREBapZs2ZYunQpAGDSpEnYt2+fxBERUUFgYkFEREQFbsiQIRgyZAgEQcD58+elDoeICgA7bxMREVGhWLJkCdq1a4dOnTpJHQoRFQBesSAiIqJCoaqqKpdUpKSk4NOnTxJGRET5iYkFERERFboXL16gdevWGDt2rNShEFE+4a1QREREVOjOnz+PkydP4uTJk6hRowYGDBggdUhElEe8YkFERESFrkOHDvj9998BAMOGDcOZM2ckjoiI8oqJBREREUliypQp6NatGz5+/IguXboUiweAEVHWmFgQERGRJGQyGTZs2IBatWrh+fPncHNzQ2JiotRhEZGCmFgQERGRZLS1tbFnzx4YGRkhNDQUI0eOlDokIlIQEwsiIiKSVKVKlbBr1y7Y29tj/PjxUodDRAriqFBEREQkuUaNGiEsLAzKyspSh0JECuIVCyIiIioS0icVJ06cwPXr1yWMhohyi4kFERERFSl79+5F69at0alTJ7x48ULqcIgoh5hYEBERUZHSqFEjVKpUCdHR0eJwtERU9DGxICIioiKlTJkyCAwMhK6uLoKCgjB69GipQyKiHGBiQUREREVO9erVsXXrVshkMqxYsQKrVq2SOiQi+gYmFkRERFQkdezYEbNmzQIAjBw5EidPnpQ4IiLKDhMLIiIiKrImTJiAHj164NOnT9i5c6fU4RBRNvgcCyIiIiqyZDIZfH190apVK7i7u0sdDhFlg4kFERERFWmampro16+f+D4tLQ0AoKTEGy+IihL+jyQiIqJi4+3bt+jSpQtmzJghdShE9BVesSAiIqJi48iRIwgMDERgYCAcHBzQrVs3qUMiov/hFQsiIiIqNrp27QpPT08AQN++fXH16lWJIyKiL5hYEBERUbEyZ84ctGnTBsnJyXBzc8PTp0+lDomIwMSCiIiIihkVFRVs374dNjY2ePjwIbp27YqUlBSpwyIq9ZhYEBERUbFjYGCAvXv3Ql9fH2fPnsXo0aOlDomo1GNiQURERMWSra0t/Pz8YG5uzmdcEBUBHBWKiIiIiq127drhzp070NDQkDoUolKPVyyIiIioWEufVFy9ehV3796VMBqi0kvyxGL58uWwtLSEhoYG6tevj4sXL2ZZ9+bNm+jatSssLS0hk8mwaNGiwguUiIiIirQjR46gYcOGcHNzQ0JCgtThEJU6kiYW/v7+8PT0xPTp03HlyhU4OjqiTZs2ePbsWab1k5KSULlyZcyePRsmJiaFHC0REREVZQ4ODjA0NER4eDh69eqF1NRUqUMiKlUkTSwWLFiAgQMHol+/frC3t8eqVaugpaWF9evXZ1q/bt26mDdvHn7++Weoq6sXcrRERERUlFWoUAF79uyBuro69u/fj6lTp0odElGpIllikZKSgpCQELi6uv5/MEpKcHV1RXBwcL6t58OHD0hISBCnt2/f5lvbREREVLTUrVsXvr6+AAAfHx/4+flJHBFR6SFZYvHixQukpqaifPnycuXly5dHXFxcvq3Hx8cH+vr64mRvb59vbRMREVHR06tXL0yYMAEA0L9/f1y+fFniiIhKB8k7bxc0Ly8vxMfHi1N4eLjUIREREVEB+/PPP/H999/j/fv3WLlypdThEJUKkiUW5cqVg7KyMp4+fSpX/vTp03ztmK2urg49PT1x0tXVzbe2iYiIqGhSVlbG1q1bMXfuXKxZs0bqcIiyderUKXTs2BEVKlSATCbDnj17sq2/a9cutGrVCkZGRtDT04OLiwsOHz5cOMFmQ7LEQk1NDXXq1MGxY8fEsrS0NBw7dgwuLi5ShUVEREQlhL6+PsaPHw9lZWUAgCAIEARB4qiIMkpMTISjoyOWL1+eo/qnTp1Cq1atcPDgQYSEhKB58+bo2LEjrl69WsCRZk/SJ297enqib9++cHZ2Rr169bBo0SIkJiaiX79+AIA+ffrAzMwMPj4+AD53+P5yK1NKSgoeP36M0NBQ6OjowNraWrLtICIioqLtw4cPGDZsGBwcHDBmzBipwyGS065dO7Rr1y7H9b9+ltusWbMQGBiIffv2wcnJKZ+jyzlJE4vu3bvj+fPnmDZtGuLi4lCrVi0c+r/27j0qqnLvA/h3AzJhMApHBAPGCbkczOSiqCAeqWNiWmB5UikvoA4gWp7FOkpYai49aV4y02PgAalOgJQSsix7X7wdyahEhRS8ADEOFgOZKZCFIPv9g+W8TaCYzMwG5vtZa1buvZ+99++hHuM3z+2zz3QTujUaDSws/r9T5fvvv9f7YW3atAmbNm3C+PHjcfToUVOHT0RERD1ETk4Odu3aBQsLCwwdOhRhYWFSh0RkMK2trWhoaICDg4OkcUiaWADA4sWLsXjx4g6v/T5ZUCqV7MIkIiKiP2zmzJk4ePAgdu3ahRkzZuDrr7+Gl5eX1GFRL9fQ0KC3C7xMJjPKXmybNm1CY2Mjpk+fbvBn/xG9flUoIiIiIkEQsGPHDgQHB+P69esIDw/HtWvXpA6LermhQ4fqbXtwe3i/IWVmZmL16tX48MMPMXDgQIM//4+QvMeCiIiIyBRkMhlycnIwcuRIXLhwAZGRkdi/f79ucjeRoZWVlcHFxUV3bOjeit27d2PBggX46KOP9Dadlgp7LIiIiMhsODk5Yd++fbCxscFnn32GpKQkqUOiXszOzk5v2wNDJhZZWVmIjo5GVlYWpkyZYrDndgUTCyIiIjIrAQEBSE9Ph729fbf4lpeosbERxcXFKC4uBgBUVVWhuLgYGo0GQNuGz3PmzNGVz8zMxJw5c7B582aMHj0aWq0WWq0W169flyJ8HSYWREREZHZmzJiByspKTJw4UepQiFBUVAR/f3/d6qcJCQnw9/fHypUrAQA1NTW6JAMAdu7ciZaWFixatAiDBg3SfZYsWSJJ/LdxjgURERGZJXt7e92fy8vL0bdvX73x8ESmEhoaeteVT99991294+66zQJ7LIiIiMisHTt2DKNGjcIzzzyDX375RepwiHosJhZERERk1lxdXWFhYYETJ05ApVJxzyyi+8TEgoiIiMyau7s7PvroI1haWiIjIwMbN26UOiSiHomJBREREZm9xx9/HFu3bgUAvPzyy/jkk08kjoio52FiQURERAQgPj4eMTExEEURzz//PM6dOyd1SEQ9ChMLIiIiIgCCIGDbtm0YN24c6uvr8frrr0sdElGPwsSCyITKy8sRHBwMLy8vBAYGorS0tMNyaWlp8PT0xJAhQ6BSqdDc3AygbXk5Gxsb+Pn56T6/XcHkzJkzCA0NhY+PD3x8fJCTk2OSehER9RbW1tbYu3cvli1bhtTUVKnDIepRmFgQmVBsbCxiYmJw8eJFJCYmIioqql2ZqqoqrFixAgUFBaioqEBtbS127typu+7t7a3bnbO4uBg2NjYAgBs3biAiIgJr167FuXPncPbsWYwbN85UVSMi6jUcHR3xxhtvQCaTSR0KUY/CxILIROrq6lBUVIRZs2YBAKZNm4bq6mpUVFTolduzZw/Cw8Ph7OwMQRAQFxeHrKysTp+fmZmJMWPGICQkBABgaWkJR0dHw1eEiMiM3Lp1C4mJidi1a5fUoRB1e0wsiEykuroagwYNgpVV24b3giBAoVBAo9HoldNoNBg8eLDuWKlU6pWprKxEQEAAAgMDsWPHDt35srIyyGQyPPXUU/Dz88OcOXPwww8/GLlWRES92+7du7FhwwbExcXh+PHjUodD1K0xsSDqQQICAnD58mWcOnUKH3/8MZKTk/Hhhx8CAFpaWnDw4EGkpKTg9OnTcHFxwcKFCyWOmIioZ4uMjMS0adPQ3NyMZ599FtXV1VKHRNRtMbEgMhE3NzfU1NSgpaUFACCKIjQaDRQKhV45hUKBS5cu6Y7VarWujFwuR79+/QC07RQbGRmJgoIC3X2PPfYYXFxcIAgCZs2ahS+//NIUVevxujqp/jZRFPH444+jf//+euc3btyIYcOGYejQoXjmmWdw7do1I9WEiAzNwsIC7733Hnx9fVFXV4epU6fixo0bUodF1C0xsSAykYEDByIgIAAffPABAGDv3r1wdXWFh4eHXrlp06YhLy8PWq0WoigiOTkZM2fOBADU1NSgtbUVANDQ0ID9+/fD398fADB9+nScOHEC9fX1AIBPP/0Uvr6+pqpej2aISfUAsGXLFgwZMkTvXH5+PtLT01FYWIiysjKMGDECr7zyijGrQ0QG9uCDD2Lfvn1wdHTEqVOnMG/ePIiiKHVYRN0OEwsiE0pJSUFKSgq8vLywfv16pKenAwAWLFiAvLw8AIC7uztWr16NsWPHwsPDA46OjoiNjQXQlow8+uij8PX1xZgxY/DEE08gOjoaQFuPxfLlyxEcHIzhw4fj8OHDSE5OlqaiPYihJtWXlpYiNzcXL7/8st59JSUlCAkJgZ2dHQBg8uTJ+M9//mPkWhGRoQ0ePBh79+6FlZUVsrOzsW7dOqlDIup2mFgQmZC3tzcKCwtx8eJFFBUV4dFHHwUApKamIjw8XFdOpVKhsrISlZWVSEtLQ58+fQAAixcvRmlpKUpKSlBaWorXXnsNgiDo7ps9ezbOnj2Lb775BgcOHICbm5tpK9gDGWJSfXNzM1QqFVJSUmBpaal334gRI3Dw4EFdD1RGRgYaGhpw9epVI9eMiAxt3Lhx2LFjB2xsbODu7i51ON2CsYeS3hYVFQVBEDiUtJtjYkFE1EWrV6/Gs88+Cx8fn3bXHnvsMfzjH//AU089hTFjxuiWAL6dyBBRz6JSqXDhwgXdEFVzZ8yhpLfl5OTovmCj7o2JBRGZNUNMqv/vf/+Lbdu2QalUIiQkBPX19VAqlbrlfuPj41FUVISvvvoKoaGhcHV1hVwuN1ENicjQftsb/P333+PHH3+UMBrpGHsoKQDU1tbi9ddfx5tvvmncypBBMLEgIrNmiEn1BQUFuHTpEtRqNT7//HPI5XKo1Wpd70RNTQ2Att3RV65ciWXLlpmwhkRkLCdPnkRgYCCee+65dkN7zIGxh5ICbT1EGzZs0M1To+6NiQURmb2uTqrvzMSJE/HII4/A19cXISEhWLx4sdHqQkSmI5PJUF9fjyNHjiAhIUHqcHqkuw0lTU1NhUKhwOOPPy5BZHQ/OMiXqAsm5SdJHYLBffaE+a10cntS/e+lpqbqHatUKqhUqrs+S6lUtptceObMmS7HSETdz7Bhw5CRkYGpU6di+/btePTRRxETEyN1WCbz26GkVlZWdx1KWllZqTv+/VBSjUaD7du3o6WlRTeU9MSJEzhy5AiOHTuG/fv36+4dPnw49u3bp1tqnboX9lgQERER3afw8HCsXbsWALBo0SIcO3ZM4ohMx9hDSTMyMlBdXQ21Wg21Wg0A+Oabb5hUdGNMLIiIiIi6ICkpCTNmzEBLSwumTZumt9BDb2fsoaTUs3AoFBEREVEXCIKAXbt2oby8HKdOncLLL7+st+pRb2bsoaS/xd3Ouz/2WBARERF1Ud++fZGbm4t58+a126OByFywx4KIep2J6/ZJHYLB/W9ShNQhEFEn3NzckJaWJnUYRJJhjwV1C+Xl5QgODoaXlxcCAwNRWlraYbm0tDR4enpiyJAhUKlUunXDCwsL4efnBz8/PzzyyCOIjY1FU1NTp/cREREZgyiK2LRpE3JycqQOhchkmFhQtxAbG4uYmBhcvHgRiYmJiIqKalemqqoKK1asQEFBASoqKlBbW6vrbvb19cWJEydQXFyMM2fOoK6uDjt27Oj0PiIiImPIysrC0qVLMXv2bJSUlEgdDpFJMLEgydXV1aGoqAizZs0C0LYsXXV1NSoqKvTK7dmzB+Hh4XB2doYgCIiLi9NNjuvbty/69OkDALh58yZ++eUXCILQ6X1ERETGMH36dDzxxBO4ceMGIiIi8MMPP0gdEpHRcY4FSa66uhqDBg2ClVXbf46CIEChUECj0eitha3RaDB48GDdsVKphEaj0R2r1WpERESgsrISU6ZMQXx8/D3dR0REZGhWVlbIzs7GqFGjUFFRgb/97W/Iz8+HtbW11KHdVW/b+NUcN32VEnssqNdQKpUoKSmBVqtFU1MTx7USEZGk7O3tkZeXB7lcjmPHjuHFF1/kkqnUqzGxIMm5ubmhpqYGLS0tANomvGk0GigUCr1yCoVCb9MhtVrdrgwA2NraYubMmcjIyPhD9xERERmaj48PsrKyIAgCdu7ciXfeeUfqkIiMhokFSW7gwIEICAjABx98AADYu3cvXF1d9YZBAW1zL/Ly8qDVaiGKIpKTkzFz5kwAQEVFhW6lp5s3b+Ljjz/G8OHDO72PiIjI2CZPnoz169fD0tJSN/+PqDdiYkHdQkpKClJSUuDl5YX169cjPT0dALBgwQLk5eUBANzd3bF69WqMHTsWHh4ecHR0RGxsLADg8OHD8Pf3h6+vL/z9/eHk5IQVK1Z0eh8REZEpLF26FMXFxVi4cKHUoRAZDSdvU7fg7e2NwsLCdudTU1P1jlUqFVQqVbtyMTExiImJuePz73QfERGRKQiCgGHDhumOf/rpJ1hZWcHOzk7CqIgMiz0WRERERCZ0/vx5jB49GrNnz0Zra6vU4RAZDBMLIiIiIhO6fv06NBoN9u3bh1WrVkkdDpHBMLEgIiIiMqHRo0fj3//+NwBg7dq1yM7OljgiIsNgYkFERERkYrNnz8bSpUsBANHR0cjNzUVSUhIiIyORlJSE8vJyiSMkUzp27BiefvppPPTQQxAEAbm5uZ3ec/ToUQQEBEAmk8HDwwPvvvuu0ePsDCdvk1HUvT1B6hAMauBLB6UOgYiIepl169bh7NmzOHDgAJ555hlYWlpCFEUIgoANGzYgLS0NUVFRUodJJvDzzz/D19cX8+bNw7PPPttp+aqqKkyZMgVxcXHIyMjAoUOHsGDBAgwaNAhhYWEmiLhjTCyIiIiIJGBpaYk1a9bgwIEDAIBbt27pXZ8/fz5CQkLa7etEvc+TTz6JJ5988p7LJycn4+GHH8bmzZsBtG3E+Pnnn2PLli2SJhYcCkVEREQkkT179sDS0rLDa4IgIC0tzcQRkSE1NDSgvr5e92lqajLIcwsLCzFhgv7okLCwsA6X7jclJhZEREREElGr1RBFscNroihCrVabNiAyqKFDh6Jfv366z7p16wzyXK1WCycnJ71zTk5OqK+vxy+//GKQd9wPJhZERGakvLwcwcHB8PLyQmBgIEpLSzssl5aWBk9PTwwZMgQqlQrNzc33dO3MmTMIDQ2Fj48PfHx8kJOTY/Q6EfVkSqUSgiB0eE0QBCiVStMGRAZVVlaG69ev6z5JSUlSh2RUTCyIiMxIbGwsYmJicPHiRSQmJnY4MbSqqgorVqxAQUEBKioqUFtbi507d3Z67caNG4iIiMDatWtx7tw5nD17FuPGjTNl9Yh6nHnz5t21x2L+/PkmjogMyc7ODnK5XPeRyWQGea6zszNqa2v1ztXW1kIul8PGxsYg77gfTCyIiMxEXV0dioqKMGvWLADAtGnTUF1djYqKCr1ye/bsQXh4OJydnSEIAuLi4pCVldXptczMTIwZMwYhISEA2iamOjo6mrCGRD2Pp6cn0tLSYGFhAUtLS71/pqWlceI2dSgoKAiHDh3SO5efn4+goCCJImrDVaGIiMxEdXU1Bg0aBCurtr/6BUGAQqGARqPR++VFo9Fg8ODBumOlUgmNRtPptbKyMshkMjz11FO4fPkyhg8fjs2bNzO5IOpEVFQUQkJCkJaWBrVaDaVSifnz5zOpMCONjY16X/JUVVWhuLgYDg4OUCgUSEpKwnfffYf3338fABAXF4ft27dj2bJlmDdvHg4fPowPP/wQn3zyiVRVAMAeCyIiMpCWlhYcPHgQKSkpOH36NFxcXLBw4UKpw6JuxJhzfNRqNUJDQ9GvXz/4+fmZojp39UfrGhYWhitXruD999/HunXr4OHhcd9znajnKSoqgr+/P/z9/QEACQkJ8Pf3x8qVKwEANTU1ui9xAODhhx/GJ598gvz8fPj6+mLz5s1ITU2VdKlZgIkFEZHZcHNzQ01NDVpaWgC0jd/WaDRQKBR65RQKBS5duqQ7VqvVujKdXXvsscfg4uICQRAwa9YsfPnll8auFvUgxpzjI5fLsXbtWmRmZpqySndkzLre7Rr1TKGhoRBFsd3n9m7a7777Lo4ePdruntOnT6OpqQmVlZXdYjNFJhZERGZi4MCBCAgIwAcffAAA2Lt3L1xdXdsNt5g2bRry8vKg1WohiiKSk5Mxc+bMTq9Nnz4dJ06cQH19PQDg008/ha+vrwlrSN2Zsef4ODg4ICQkBA8++KBpK9YBY9f1bteIpMTEgojIjKSkpCAlJQVeXl5Yv3490tPTAQALFixAXl4eAMDd3R2rV6/G2LFj4eHhAUdHR8TGxnZ6TaFQYPny5QgODsbw4cNx+PBhJCcnS1NR6nbuNsfnt+53jk93Yuy69pSfA5kfTt4mIjIj3t7eHe7MmpqaqnesUqmgUqk6fMbdrs2ePRuzZ8/ueqBERNTjsMeCiIiIjM7Yc3y6E1PMZ+oJPwcyP0wsiIiIyOiMPcenOzF2XXvKz4HMDxMLIiIiMgljzvG5ceMGXF1d8dxzz6GsrAyurq5ISkqSpqIwbl3vdo1ISoJ4p33ke6nLly/Dzc0N1dXVcHV1Nfn7J67bZ/J3GtP/JkV0eL7u7QkmjsS4Br50sMPzk/Kl+5+WsXz2xDqpQ+iy3tbOgDu3NXPBtkZkGr2trUnVzqT+fVMq7LEgIiIiIqIu6xarQv3rX//Cxo0bodVq4evri23btmHUqFF3LP/RRx9hxYoVUKvV8PT0xBtvvIHJkyebMGIiou7BXHoHiYio+5O8xyI7OxsJCQlYtWoVTp06BV9fX4SFhaGurq7D8l988QUiIyMxf/58nD59GlOnTsXUqVNx9uxZE0dORERERES3SZ5YvPnmm1CpVIiOjsbQoUORnJyMvn37YteuXR2W37p1KyZNmoSlS5fCx8cHa9asQUBAALZv327iyImIiIiI6DZJh0LdvHkTJ0+e1Fu1wcLCAhMmTOhwAycAKCwsREJCgt65sLAw5Obmdli+qakJTU1NuuPr168DAGpqaroY/f359foVSd5rLJcvX+7w/JVrv5o4EuO6eYd6/vpDvYkjMb47/TvtSXpbOwPY1tjWuqfnt/+P1CEYXObiMKlDkFRva2tStbPbv2e2trZK8n6pSJpYXLlyBbdu3YKTk5PeeScnJ5w/f77De7RabYfltVpth+XXrVuH1atXtzt/tzkcdO/c3pA6AhNZ5SZ1BCbjhh1Sh0AdYFvrfdjWuiezaWtmQup2Vltba1abF3aLydvGlJSUpNfD0dLSgnPnzsHNzQ0WFpKPBKMuaGhowNChQ1FWVgY7OzupwyHqtdjWiEyDba33aG1tRW1tLfz9/aUOxaQkTSwGDBgAS0tL1NbW6p2vra2Fs7Nzh/c4Ozv/ofIymQwymUzv3NixY7sQNXUX9fVt3bUuLi6Qy+USR0PUe7GtEZkG21rvYk49FbdJ+pW9tbU1RowYgUOHDunOtba24tChQwgKCurwnqCgIL3yAJCfn3/H8kREREREZHySD4VKSEjA3LlzMXLkSIwaNQpvvfUWfv75Z0RHRwMA5syZAxcXF6xb17Zz4pIlSzB+/Hhs3rwZU6ZMwe7du1FUVISdO3dKWQ0iIiIiIrMmeWIxY8YM/PDDD1i5ciW0Wi38/Pzw2Wef6SZoazQavbkQwcHByMzMxKuvvorly5fD09MTubm5GDZsmFRVIInIZDKsWrWq3VA3IjIstjUi02Bbo55OEEVRlDoIIiIiIiLq2bgsEhERERERdRkTCyIiIiIi6jImFkRERERE1GVMLMigQkND8fe//12y90dFRWHq1KndJh4iIiIic8HEgnq1nJwcrFmzRuowiEzKlAn1zp07ERoaCrlcDkEQcO3atXZlrl69ihdeeAFyuRz9+/fH/Pnz0djYaJL4iAylJ7arb775BuPGjcMDDzwANzc3bNiwwSTxk/liYkG9moODA+zs7KQOg6jXunHjBiZNmoTly5ffscwLL7yA0tJS5OfnY//+/Th27BhiYmJMGCVRz2KIdlVfX4+JEydi8ODBOHnyJDZu3IjXXnuN+36RcYlEBjR+/Hhx0aJF4qJFi0S5XC7+6U9/El999VWxtbVVFEVRfP/998URI0aItra2opOTkxgZGSnW1tbq7r969ar4/PPPiwMGDBAfeOAB0cPDQ9y1a5fuukajEZ977jmxX79+or29vRgeHi5WVVXprs+dO1eMiIjQi2fJkiW648GDB4v//Oc/xejoaNHW1lZ0c3MTU1JS9OrQ2TuIurO5c+eKAPQ+VVVV4tGjR8XAwEDR2tpadHZ2FhMTE8Xm5mbdfZ213c4cOXJEBCD+9NNPeufLyspEAOKJEyd05w4cOCAKgiB+9913BqkzkbH1xHa1Y8cO0d7eXmxqatKVSUxMFL29vbvwkyC6O/ZYkMG99957sLKywtdff42tW7fizTffRGpqKgCgubkZa9asQUlJCXJzc6FWqxEVFaW7d8WKFSgrK8OBAwdw7tw5vPPOOxgwYIDu3rCwMNjZ2aGgoADHjx+Hra0tJk2ahJs3b95zfJs3b8bIkSNx+vRpxMfHY+HChbhw4YJB30Ekla1btyIoKAgqlQo1NTWoqalBnz59MHnyZAQGBqKkpATvvPMO0tLSsHbtWr1779Z271dhYSH69++PkSNH6s5NmDABFhYW+Oqrr7r0bCJT6YntqrCwEH/5y19gbW2tKxMWFoYLFy7gp59+6tL7ie5E8p23qfdxc3PDli1bIAgCvL29cebMGWzZsgUqlQrz5s3TlXN3d8fbb7+NwMBANDY2wtbWFhqNBv7+/rq/LJVKpa58dnY2WltbkZqaCkEQAADp6eno378/jh49iokTJ95TfJMnT0Z8fDwAIDExEVu2bMGRI0fg7e1tsHcQSaVfv36wtrZG37594ezsDAB45ZVX4Obmhu3bt0MQBPz5z3/G999/j8TERKxcuRIWFm3fMd2t7d4vrVaLgQMH6p2zsrKCg4MDtFrt/VeUyIR6YrvSarV4+OGH9co4OTnprtnb29/3+4nuhD0WZHBjxozR/VIOAEFBQSgvL8etW7dw8uRJPP3001AoFLCzs8P48eMBABqNBgCwcOFC7N69G35+fli2bBm++OIL3XNKSkpQUVEBOzs72NrawtbWFg4ODvj1119RWVl5z/ENHz5c92dBEODs7Iy6ujqDvoOoOzl37hyCgoL02uXYsWPR2NiIy5cv687dre2+/vrrujZx+0sAInPGdkXUHnssyGR+/fVXhIWFISwsDBkZGXB0dIRGo0FYWJhumNGTTz6JS5cu4dNPP0V+fj7++te/YtGiRdi0aRMaGxsxYsQIZGRktHu2o6PjPcfRp08fvWNBENDa2goABnsHUW8TFxeH6dOn644feuihe7rvt4n7bS0tLbh69arum18ic2XMduXs7Iza2lq9MreP2fbIWJhYkMH9ftz0l19+CU9PT5w/fx4//vgj1q9fDzc3NwBAUVFRu/sdHR0xd+5czJ07F+PGjcPSpUuxadMmBAQEIDs7GwMHDoRcLjdK7KZ4B5GxWVtb49atW7pjHx8f7N27F6Io6r45PX78OOzs7ODq6qord6e2a2lpCQcHBzg4OPzhWIKCgnDt2jWcPHkSI0aMAAAcPnwYra2tGD169P1Uj0gSPa1dBQUF4ZVXXkFzc7PuC7X8/Hx4e3tzGBQZDYdCkcFpNBokJCTgwoULyMrKwrZt27BkyRIoFApYW1tj27Zt+Pbbb5GXl9duj4mVK1di3759qKioQGlpKfbv3w8fHx8AbUvrDRgwABERESgoKEBVVRWOHj2Kl156Sa/buStM8Q4iY1Mqlfjqq6+gVqtx5coVxMfHo7q6Gi+++CLOnz+Pffv2YdWqVUhISNCNAwfu3HbvRqvVori4GBUVFQCAM2fOoLi4GFevXgXQ9svXpEmToFKp8PXXX+P48eNYvHgxZs6cec/fzhJ1Bz2tXT3//POwtrbG/PnzUVpaiuzsbGzduhUJCQlG+gkRgcvNkmGNHz9ejI+PF+Pi4kS5XC7a29uLy5cv1y2tl5mZKSqVSlEmk4lBQUFiXl6eCEA8ffq0KIqiuGbNGtHHx0e0sbERHRwcxIiICPHbb7/VPb+mpkacM2eOOGDAAFEmk4nu7u6iSqUSr1+/LorivS03u2XLFr2YfX19xVWrVt3zO4i6uwsXLohjxowRbWxs/tCymHdru3eyatWqdstwAhDT09N1ZX788UcxMjJStLW1FeVyuRgdHS02NDQYq/pERtET21VJSYkYEhIiymQy0cXFRVy/fr1BfyZEvyeIoihKktEQEVG3ERoaCj8/P7z11ltSh0LUa7BdkbnhUCgiIiIiIuoyJhZERERERNRlHApFRERERERdxh4LIiIiIiLqMiYWRERERETUZUwsiIiIiIioy5hYEBERERFRlzGxICIiIiKiLmNiQUREREREXcbEgoiIiIiIuoyJBRERERERdRkTCyIiIiIi6rL/A9TUDgVpX/SCAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top-100 semantic ablation — full prediction shift:\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + "
\n", + "
Input Sentence:
\n", + "
Fact: the capital of the state containing Dallas is
\n", + " \n", + "
\n", + "
Original Top 5 Tokens
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TokenProbabilityDistribution
Austin0.414\n", + "
\n", + "
\n", + " 41.4%\n", + "
\n", + "
Texas0.056\n", + "
\n", + "
\n", + " 5.6%\n", + "
\n", + "
the0.056\n", + "
\n", + "
\n", + " 5.6%\n", + "
\n", + "
not0.056\n", + "
\n", + "
\n", + " 5.6%\n", + "
\n", + "
Fort0.044\n", + "
\n", + "
\n", + " 4.4%\n", + "
\n", + "
\n", + " \n", + "
New Top 5 Tokens
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TokenProbabilityDistribution
the0.154\n", + "
\n", + "
\n", + " 15.4%\n", + "
\n", + "
not0.106\n", + "
\n", + "
\n", + " 10.6%\n", + "
\n", + "
called0.064\n", + "
\n", + "
\n", + " 6.4%\n", + "
\n", + "
a0.057\n", + "
\n", + "
\n", + " 5.7%\n", + "
\n", + "
Texas0.044\n", + "
\n", + "
\n", + " 4.4%\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + "
Key Tokens
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TokenOriginalNewChange
▁Austin0.41410.0012\n", + "
\n", + "
\n", + " -99.7%\n", + "
\n", + "
▁Dallas0.03000.0004\n", + "
\n", + "
\n", + " -98.6%\n", + "
\n", + "
▁Texas0.05590.0442\n", + "
\n", + "
\n", + " -21.0%\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Progressive ablation of semantic-target features\n", + "sem_groups = {\"baseline\": {\n", + " \"P(Austin)\": probs_base[idx_x].item(),\n", + " \"P(Dallas)\": probs_base[idx_y].item(),\n", + " \"P(Texas)\": probs_base[idx_texas].item(),\n", + "}}\n", + "sem_logit_diffs = {\"baseline\": orig_gap}\n", + "\n", + "sem_ablation_results = {}\n", + "for n in [10, 100]:\n", + " top_n, _ = get_top_features(graph_semantic, n=n)\n", + " abl_tuples = [\n", + " (layer, pos, feat_idx, 0.0 * activations[layer, pos, feat_idx])\n", + " for (layer, pos, feat_idx) in top_n\n", + " ]\n", + " abl_logits, _ = model.feature_intervention(input_ids, abl_tuples)\n", + " probs_abl = torch.softmax(abl_logits.squeeze(0)[-1].float(), dim=-1)\n", + " gap = (abl_logits.squeeze(0)[-1, idx_x] - abl_logits.squeeze(0)[-1, idx_y]).item()\n", + " label = f\"top-{n}\"\n", + " sem_groups[label] = {\n", + " \"P(Austin)\": probs_abl[idx_x].item(),\n", + " \"P(Dallas)\": probs_abl[idx_y].item(),\n", + " \"P(Texas)\": probs_abl[idx_texas].item(),\n", + " }\n", + " sem_logit_diffs[label] = gap\n", + " sem_ablation_results[n] = abl_logits\n", + "\n", + "display_ablation_chart(sem_groups, logit_diffs=sem_logit_diffs,\n", + " title=\"Semantic-target ablation: token probabilities & logit gap\")\n", + "\n", + "# Show the full top-k comparison for the strongest ablation\n", + "strongest_n = max(sem_ablation_results.keys())\n", + "print(f\"\\nTop-{strongest_n} semantic ablation — full prediction shift:\")\n", + "display_topk(prompt, original_logits, sem_ablation_results[strongest_n])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IGnU9l1zmS8m" + }, + "source": [ + "## Visualize the Semantic Concept Graph\n", + "\n", + "Save the **semantic concept** graph and serve it locally. The interactive visualization shows the circuit driving the abstract `Capitals − States` direction — the multi-hop reasoning path.\n", + "\n", + "**If running on a remote server, set up port forwarding so that port 8046 is accessible on your local machine.**" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "\n", + "graph_dir = Path(\"attribution_targets_demo/graphs\")\n", + "graph_dir.mkdir(parents=True, exist_ok=True)\n", + "graph_path = graph_dir / \"dallas_austin_semantic_concept_graph.pt\"\n", + "graph_semantic.to_pt(graph_path)\n", + "\n", + "slug = \"dallas-austin-semantic-concept\"\n", + "graph_file_dir = \"attribution_targets_demo/graph_files\"\n", + "node_threshold, edge_threshold = 0.8, 0.98\n", + "\n", + "create_graph_files(\n", + " graph_or_path=graph_path,\n", + " slug=slug,\n", + " output_path=graph_file_dir,\n", + " node_threshold=node_threshold,\n", + " edge_threshold=edge_threshold,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "id": "GmKhWpuUmS8n" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Open your graph at: http://localhost:8046/index.html\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from circuit_tracer.frontend.local_server import serve\n", + "\n", + "port = 8046\n", + "server = serve(data_dir=\"attribution_targets_demo/graph_files/\", port=port)\n", + "\n", + "if IN_COLAB:\n", + " from google.colab import output as colab_output # noqa\n", + " colab_output.serve_kernel_port_as_iframe(\n", + " port, path=\"/index.html\", height=\"800px\", cache_in_notebook=True\n", + " )\n", + "else:\n", + " from IPython.display import IFrame\n", + " print(f\"Open your graph at: http://localhost:{port}/index.html\")\n", + " display(IFrame(src=f\"http://localhost:{port}/index.html\", width=\"100%\", height=\"800px\"))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uCo4FSQwqcBl" + }, + "outputs": [ + { + "ename": "", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[1;31mThe Kernel crashed while executing code in the current cell or a previous cell. \n", + "\u001b[1;31mPlease review the code in the cell(s) to identify a possible cause of the failure. \n", + "\u001b[1;31mClick here for more info. \n", + "\u001b[1;31mView Jupyter log for further details." + ] + } + ], + "source": [ + "# server.stop()" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "ct_dev (3.13.11)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.11" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/tests/test_tutorial_notebook_backends.py b/tests/test_tutorial_notebook_backends.py index 7b7c7f96..3786ef3d 100644 --- a/tests/test_tutorial_notebook_backends.py +++ b/tests/test_tutorial_notebook_backends.py @@ -1,4 +1,5 @@ import gc +from contextlib import contextmanager import pytest import torch @@ -8,12 +9,71 @@ from circuit_tracer.attribution.attribute_transformerlens import ( attribute as attribute_transformerlens, ) +from circuit_tracer.attribution.targets import CustomTarget +from circuit_tracer.graph import compute_node_influence +from circuit_tracer.utils.demo_utils import get_unembed_vecs from tests.conftest import has_32gb # Mark all tests in this module as requiring 32GB+ VRAM pytestmark = [pytest.mark.skipif(not has_32gb, reason="Requires >=32GB VRAM")] +def _move_replacement_model(model, device): + """Move a ReplacementModel (and its transcoders) to *device*, updating internal refs. + + Works for both NNSight and TransformerLens backends. + """ + device = torch.device(device) if isinstance(device, str) else device + + # Move model parameters + model.to(device) + + # Move transcoders — NNSight wraps them in an Envoy so .to() only takes device + try: + model.transcoders.to(device, torch.float32) + except TypeError: + model.transcoders.to(device) + + # Update stale tensor references left on the NNSight model instance. + # `.to()` replaces Parameter tensors inside the module but external refs + # (e.g. embed_weight, unembed_weight) still point at the old device. + for attr in ("embed_weight", "unembed_weight"): + t = getattr(model, attr, None) + if t is not None and t.device != device: + setattr(model, attr, t.to(device)) + + # Update backend-specific device tracking + if hasattr(model, "cfg") and hasattr(model.cfg, "device"): + # TransformerLens backend + model.cfg.device = device + + +def _gpu_cleanup(): + """Run garbage collection and free CUDA memory.""" + gc.collect() + torch.cuda.empty_cache() + + +@contextmanager +def _swap_backend(model_off, model_on): + """Context manager: move *model_off* to CPU, move *model_on* to CUDA. + + On exit (whether or not an exception occurred) restores *model_on* → CPU + and *model_off* → CUDA so the fixture is left in its original state. + """ + _move_replacement_model(model_off, "cpu") + gc.collect() + torch.cuda.empty_cache() + _move_replacement_model(model_on, "cuda") + try: + yield + finally: + _move_replacement_model(model_on, "cpu") + gc.collect() + torch.cuda.empty_cache() + _move_replacement_model(model_off, "cuda") + + @pytest.fixture(autouse=True) def cleanup_cuda(): yield @@ -31,6 +91,22 @@ def models(): return model_nnsight, model_tl +@pytest.fixture(scope="module") +def models_sequential(): + """Load models for memory-constrained tests: NNSight on CUDA, TL on CPU. + + Tests using this fixture should call ``_move_replacement_model`` to swap + which model lives on CUDA before each backend phase. + """ + model_nnsight = ReplacementModel.from_pretrained( + "google/gemma-2-2b", "gemma", backend="nnsight", dtype=torch.float32 + ) + model_tl = ReplacementModel.from_pretrained( + "google/gemma-2-2b", "gemma", dtype=torch.float32, device=torch.device("cpu") + ) + return model_nnsight, model_tl + + @pytest.fixture def dallas_supernode_features(): """Features from Dallas-Austin circuit supernodes.""" @@ -747,6 +823,441 @@ def test_setup_attribution_consistency(models, dallas_austin_prompt): ) +def _build_demo_custom_target(model, prompt, token_x, token_y, backend): + """Build a CustomTarget for logit(token_x) − logit(token_y). + + Backend-agnostic helper matching the attribution_targets_demo pattern. + Uses ``get_unembed_vecs`` from ``demo_utils`` for unembedding extraction. + """ + tokenizer = model.tokenizer + idx_x = tokenizer.encode(token_x, add_special_tokens=False)[-1] + idx_y = tokenizer.encode(token_y, add_special_tokens=False)[-1] + + input_ids = model.ensure_tokenized(prompt) + with torch.no_grad(): + logits, _ = model.get_activations(input_ids) + last_logits = logits.squeeze(0)[-1] + + vec_x, vec_y = get_unembed_vecs(model, [idx_x, idx_y], backend) + diff_vec = vec_x - vec_y + probs = torch.softmax(last_logits, dim=-1) + diff_prob = max((probs[idx_x] - probs[idx_y]).abs().item(), 1e-6) + + return ( + CustomTarget(token_str=f"logit({token_x})-logit({token_y})", prob=diff_prob, vec=diff_vec), + idx_x, + idx_y, + ) + + +def _build_demo_semantic_target(model, prompt, group_a_tokens, group_b_tokens, label, backend): + """Build a CustomTarget for an abstract concept direction via vector rejection. + + For each (capital, state) pair, project the capital vector onto the state + vector and subtract that projection, leaving pure "capital-ness". + + Backend-agnostic helper matching the attribution_targets_demo pattern. + """ + assert len(group_a_tokens) == len(group_b_tokens), ( + "Groups must have equal length for paired differences" + ) + tokenizer = model.tokenizer + ids_a = [tokenizer.encode(t, add_special_tokens=False)[-1] for t in group_a_tokens] + ids_b = [tokenizer.encode(t, add_special_tokens=False)[-1] for t in group_b_tokens] + + vecs_a = get_unembed_vecs(model, ids_a, backend) + vecs_b = get_unembed_vecs(model, ids_b, backend) + + # Vector rejection: for each pair, remove the state-direction component + residuals = [] + for va, vb in zip(vecs_a, vecs_b): + va_f, vb_f = va.float(), vb.float() + proj = (va_f @ vb_f) / (vb_f @ vb_f) * vb_f + residuals.append((va_f - proj).to(va.dtype)) + + direction = torch.stack(residuals).mean(0) + + input_ids = model.ensure_tokenized(prompt) + with torch.no_grad(): + logits, _ = model.get_activations(input_ids) + probs = torch.softmax(logits.squeeze(0)[-1], dim=-1) + avg_prob = max(sum(probs[i].item() for i in ids_a) / len(ids_a), 1e-6) + + return CustomTarget(token_str=label, prob=avg_prob, vec=direction) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_attribution_targets_string(models_sequential, dallas_austin_prompt): + """Test attribution with Sequence[str] targets consistency between TL and NNSight.""" + model_nnsight, model_tl = models_sequential + str_targets = ["▁Austin", "▁Dallas"] + + # --- NNSight backend (already on CUDA from fixture) --- + graph_nnsight = attribute_nnsight( + dallas_austin_prompt, + model_nnsight, + attribution_targets=str_targets, + verbose=False, + batch_size=256, + ) + nn_active = graph_nnsight.active_features.cpu() + nn_selected = graph_nnsight.selected_features.cpu() + nn_tokens = [t.token_str for t in graph_nnsight.logit_targets] + nn_adj = graph_nnsight.adjacency_matrix.cpu() + del graph_nnsight + _gpu_cleanup() + + # --- TL backend --- + with _swap_backend(model_nnsight, model_tl): + graph_tl = attribute_transformerlens( + dallas_austin_prompt, + model_tl, + attribution_targets=str_targets, + verbose=False, + batch_size=128, + ) + tl_active = graph_tl.active_features.cpu() + tl_selected = graph_tl.selected_features.cpu() + tl_tokens = [t.token_str for t in graph_tl.logit_targets] + tl_adj = graph_tl.adjacency_matrix.cpu() + del graph_tl + _gpu_cleanup() + + # --- Compare CPU tensors --- + assert (nn_active == tl_active).all(), ( + "String-target active features don't match between backends" + ) + assert (nn_selected == tl_selected).all(), ( + "String-target selected features don't match between backends" + ) + assert nn_tokens == tl_tokens, f"String-target logit tokens differ: {nn_tokens} vs {tl_tokens}" + assert torch.allclose(nn_adj, tl_adj, atol=5e-4, rtol=1e-5), ( + f"String-target adjacency matrices differ by max {(nn_adj - tl_adj).abs().max()}" + ) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_attribution_targets_logit_diff(models_sequential, dallas_austin_prompt): + """Test attribution with CustomTarget consistency between TL and NNSight.""" + model_nnsight, model_tl = models_sequential + + # --- NNSight backend (already on CUDA from fixture) --- + custom_nnsight, _, _ = _build_demo_custom_target( + model_nnsight, dallas_austin_prompt, "▁Austin", "▁Dallas", backend="nnsight" + ) + graph_nnsight = attribute_nnsight( + dallas_austin_prompt, + model_nnsight, + attribution_targets=[custom_nnsight], + verbose=False, + batch_size=256, + ) + nn_active = graph_nnsight.active_features.cpu() + nn_selected = graph_nnsight.selected_features.cpu() + nn_tokens = [t.token_str for t in graph_nnsight.logit_targets] + nn_adj = graph_nnsight.adjacency_matrix.cpu() + del graph_nnsight, custom_nnsight + _gpu_cleanup() + + # --- TL backend --- + with _swap_backend(model_nnsight, model_tl): + custom_tl, _, _ = _build_demo_custom_target( + model_tl, dallas_austin_prompt, "▁Austin", "▁Dallas", backend="transformerlens" + ) + graph_tl = attribute_transformerlens( + dallas_austin_prompt, + model_tl, + attribution_targets=[custom_tl], + verbose=False, + batch_size=128, + ) + tl_active = graph_tl.active_features.cpu() + tl_selected = graph_tl.selected_features.cpu() + tl_tokens = [t.token_str for t in graph_tl.logit_targets] + tl_adj = graph_tl.adjacency_matrix.cpu() + del graph_tl, custom_tl + _gpu_cleanup() + + # --- Compare CPU tensors --- + assert (nn_active == tl_active).all(), ( + "Custom-target active features don't match between backends" + ) + assert (nn_selected == tl_selected).all(), ( + "Custom-target selected features don't match between backends" + ) + assert nn_tokens == tl_tokens, f"Custom-target logit tokens differ: {nn_tokens} vs {tl_tokens}" + assert torch.allclose(nn_adj, tl_adj, atol=5e-4, rtol=1e-5), ( + f"Custom-target adjacency matrices differ by max {(nn_adj - tl_adj).abs().max()}" + ) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_attribution_targets_logit_diff_intervention(models_sequential, dallas_austin_prompt): + """Test custom-target feature amplification consistency between TL and NNSight.""" + model_nnsight, model_tl = models_sequential + n_top = 10 + + def _get_top_features(graph, n): + n_logits = len(graph.logit_targets) + n_features = len(graph.selected_features) + logit_weights = torch.zeros( + graph.adjacency_matrix.shape[0], device=graph.adjacency_matrix.device + ) + logit_weights[-n_logits:] = graph.logit_probabilities + node_influence = compute_node_influence(graph.adjacency_matrix, logit_weights) + _, top_idx = torch.topk(node_influence[:n_features], min(n, n_features)) + return [tuple(graph.active_features[graph.selected_features[i]].tolist()) for i in top_idx] + + # --- NNSight backend (already on CUDA from fixture) --- + custom_nnsight, idx_x_nn, idx_y_nn = _build_demo_custom_target( + model_nnsight, dallas_austin_prompt, "▁Austin", "▁Dallas", backend="nnsight" + ) + graph_nnsight = attribute_nnsight( + dallas_austin_prompt, + model_nnsight, + attribution_targets=[custom_nnsight], + verbose=False, + batch_size=256, + ) + top_feats_nn = _get_top_features(graph_nnsight, n_top) + del graph_nnsight, custom_nnsight + _gpu_cleanup() + + input_ids_nn = model_nnsight.ensure_tokenized(dallas_austin_prompt) + orig_logits_nn, acts_nn = model_nnsight.get_activations(input_ids_nn, sparse=True) + + interv_nn = [(ly, p, f, 10.0 * acts_nn[ly, p, f]) for (ly, p, f) in top_feats_nn] + new_logits_nn, _ = model_nnsight.feature_intervention(input_ids_nn, interv_nn) + + orig_gap_nn = ( + (orig_logits_nn.squeeze(0)[-1, idx_x_nn] - orig_logits_nn.squeeze(0)[-1, idx_y_nn]) + .cpu() + .item() + ) + new_gap_nn = ( + (new_logits_nn.squeeze(0)[-1, idx_x_nn] - new_logits_nn.squeeze(0)[-1, idx_y_nn]) + .cpu() + .item() + ) + del orig_logits_nn, acts_nn, new_logits_nn + _gpu_cleanup() + + # --- TL backend --- + with _swap_backend(model_nnsight, model_tl): + custom_tl, idx_x_tl, idx_y_tl = _build_demo_custom_target( + model_tl, dallas_austin_prompt, "▁Austin", "▁Dallas", backend="transformerlens" + ) + graph_tl = attribute_transformerlens( + dallas_austin_prompt, + model_tl, + attribution_targets=[custom_tl], + verbose=False, + batch_size=128, + ) + top_feats_tl = _get_top_features(graph_tl, n_top) + del graph_tl, custom_tl + _gpu_cleanup() + + input_ids_tl = model_tl.ensure_tokenized(dallas_austin_prompt) + orig_logits_tl, acts_tl = model_tl.get_activations(input_ids_tl, sparse=True) + + interv_tl = [(ly, p, f, 10.0 * acts_tl[ly, p, f]) for (ly, p, f) in top_feats_tl] + new_logits_tl, _ = model_tl.feature_intervention(input_ids_tl, interv_tl) + + orig_gap_tl = ( + (orig_logits_tl.squeeze(0)[-1, idx_x_tl] - orig_logits_tl.squeeze(0)[-1, idx_y_tl]) + .cpu() + .item() + ) + new_gap_tl = ( + (new_logits_tl.squeeze(0)[-1, idx_x_tl] - new_logits_tl.squeeze(0)[-1, idx_y_tl]) + .cpu() + .item() + ) + del orig_logits_tl, acts_tl, new_logits_tl + _gpu_cleanup() + + # --- Compare on CPU --- + assert new_gap_nn > orig_gap_nn, ( + f"NNSight: amplification should widen gap, got {orig_gap_nn:.4f} -> {new_gap_nn:.4f}" + ) + assert new_gap_tl > orig_gap_tl, ( + f"TL: amplification should widen gap, got {orig_gap_tl:.4f} -> {new_gap_tl:.4f}" + ) + + assert abs(new_gap_nn - new_gap_tl) < 0.5, ( + f"Post-intervention gaps differ too much: NNSight={new_gap_nn:.4f}, TL={new_gap_tl:.4f}" + ) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_attribution_targets_semantic(models_sequential, dallas_austin_prompt): + """Test attribution with semantic concept CustomTarget consistency between TL and NNSight.""" + model_nnsight, model_tl = models_sequential + capitals = ["▁Austin", "▁Sacramento", "▁Olympia", "▁Atlanta"] + states = ["▁Texas", "▁California", "▁Washington", "▁Georgia"] + label = "Concept: Capitals − States" + + # --- NNSight backend (already on CUDA from fixture) --- + sem_nnsight = _build_demo_semantic_target( + model_nnsight, dallas_austin_prompt, capitals, states, label, backend="nnsight" + ) + graph_nnsight = attribute_nnsight( + dallas_austin_prompt, + model_nnsight, + attribution_targets=[sem_nnsight], + verbose=False, + batch_size=256, + ) + nn_active = graph_nnsight.active_features.cpu() + nn_selected = graph_nnsight.selected_features.cpu() + nn_tokens = [t.token_str for t in graph_nnsight.logit_targets] + nn_adj = graph_nnsight.adjacency_matrix.cpu() + del graph_nnsight, sem_nnsight + _gpu_cleanup() + + # --- TL backend --- + with _swap_backend(model_nnsight, model_tl): + sem_tl = _build_demo_semantic_target( + model_tl, dallas_austin_prompt, capitals, states, label, backend="transformerlens" + ) + graph_tl = attribute_transformerlens( + dallas_austin_prompt, + model_tl, + attribution_targets=[sem_tl], + verbose=False, + batch_size=128, + ) + tl_active = graph_tl.active_features.cpu() + tl_selected = graph_tl.selected_features.cpu() + tl_tokens = [t.token_str for t in graph_tl.logit_targets] + tl_adj = graph_tl.adjacency_matrix.cpu() + del graph_tl, sem_tl + _gpu_cleanup() + + # --- Compare CPU tensors --- + assert (nn_active == tl_active).all(), ( + "Semantic-target active features don't match between backends" + ) + assert (nn_selected == tl_selected).all(), ( + "Semantic-target selected features don't match between backends" + ) + assert nn_tokens == tl_tokens, ( + f"Semantic-target logit tokens differ: {nn_tokens} vs {tl_tokens}" + ) + assert torch.allclose(nn_adj, tl_adj, atol=5e-4, rtol=1e-5), ( + f"Semantic-target adjacency matrices differ by max {(nn_adj - tl_adj).abs().max()}" + ) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_attribution_targets_semantic_intervention(models_sequential, dallas_austin_prompt): + """Test semantic-target feature amplification consistency between TL and NNSight.""" + model_nnsight, model_tl = models_sequential + n_top = 10 + capitals = ["▁Austin", "▁Sacramento", "▁Olympia", "▁Atlanta"] + states = ["▁Texas", "▁California", "▁Washington", "▁Georgia"] + label = "Concept: Capitals − States" + + def _get_top_features(graph, n): + n_logits = len(graph.logit_targets) + n_features = len(graph.selected_features) + logit_weights = torch.zeros( + graph.adjacency_matrix.shape[0], device=graph.adjacency_matrix.device + ) + logit_weights[-n_logits:] = graph.logit_probabilities + node_influence = compute_node_influence(graph.adjacency_matrix, logit_weights) + _, top_idx = torch.topk(node_influence[:n_features], min(n, n_features)) + return [tuple(graph.active_features[graph.selected_features[i]].tolist()) for i in top_idx] + + # --- NNSight backend (already on CUDA from fixture) --- + sem_nnsight = _build_demo_semantic_target( + model_nnsight, dallas_austin_prompt, capitals, states, label, backend="nnsight" + ) + idx_x_nn = model_nnsight.tokenizer.encode("▁Austin", add_special_tokens=False)[-1] + idx_y_nn = model_nnsight.tokenizer.encode("▁Dallas", add_special_tokens=False)[-1] + + graph_nnsight = attribute_nnsight( + dallas_austin_prompt, + model_nnsight, + attribution_targets=[sem_nnsight], + verbose=False, + batch_size=256, + ) + top_feats_nn = _get_top_features(graph_nnsight, n_top) + del graph_nnsight, sem_nnsight + _gpu_cleanup() + + input_ids_nn = model_nnsight.ensure_tokenized(dallas_austin_prompt) + orig_logits_nn, acts_nn = model_nnsight.get_activations(input_ids_nn, sparse=True) + + interv_nn = [(ly, p, f, 10.0 * acts_nn[ly, p, f]) for (ly, p, f) in top_feats_nn] + new_logits_nn, _ = model_nnsight.feature_intervention(input_ids_nn, interv_nn) + + orig_gap_nn = ( + (orig_logits_nn.squeeze(0)[-1, idx_x_nn] - orig_logits_nn.squeeze(0)[-1, idx_y_nn]) + .cpu() + .item() + ) + new_gap_nn = ( + (new_logits_nn.squeeze(0)[-1, idx_x_nn] - new_logits_nn.squeeze(0)[-1, idx_y_nn]) + .cpu() + .item() + ) + del orig_logits_nn, acts_nn, new_logits_nn + _gpu_cleanup() + + # --- TL backend --- + with _swap_backend(model_nnsight, model_tl): + sem_tl = _build_demo_semantic_target( + model_tl, dallas_austin_prompt, capitals, states, label, backend="transformerlens" + ) + idx_x_tl = model_tl.tokenizer.encode("▁Austin", add_special_tokens=False)[-1] + idx_y_tl = model_tl.tokenizer.encode("▁Dallas", add_special_tokens=False)[-1] + + graph_tl = attribute_transformerlens( + dallas_austin_prompt, + model_tl, + attribution_targets=[sem_tl], + verbose=False, + batch_size=128, + ) + top_feats_tl = _get_top_features(graph_tl, n_top) + del graph_tl, sem_tl + _gpu_cleanup() + + input_ids_tl = model_tl.ensure_tokenized(dallas_austin_prompt) + orig_logits_tl, acts_tl = model_tl.get_activations(input_ids_tl, sparse=True) + + interv_tl = [(ly, p, f, 10.0 * acts_tl[ly, p, f]) for (ly, p, f) in top_feats_tl] + new_logits_tl, _ = model_tl.feature_intervention(input_ids_tl, interv_tl) + + orig_gap_tl = ( + (orig_logits_tl.squeeze(0)[-1, idx_x_tl] - orig_logits_tl.squeeze(0)[-1, idx_y_tl]) + .cpu() + .item() + ) + new_gap_tl = ( + (new_logits_tl.squeeze(0)[-1, idx_x_tl] - new_logits_tl.squeeze(0)[-1, idx_y_tl]) + .cpu() + .item() + ) + del orig_logits_tl, acts_tl, new_logits_tl + _gpu_cleanup() + + # --- Compare on CPU --- + assert new_gap_nn > orig_gap_nn, ( + f"NNSight: semantic amplification should widen gap, got {orig_gap_nn:.4f} -> {new_gap_nn:.4f}" + ) + assert new_gap_tl > orig_gap_tl, ( + f"TL: semantic amplification should widen gap, got {orig_gap_tl:.4f} -> {new_gap_tl:.4f}" + ) + + assert abs(new_gap_nn - new_gap_tl) < 0.5, ( + f"Semantic post-intervention gaps differ too much: NNSight={new_gap_nn:.4f}, TL={new_gap_tl:.4f}" + ) + + def run_all_tests(): """Run all tests when script is executed directly.""" print("Loading models...") @@ -903,9 +1414,31 @@ def run_all_tests(): test_setup_attribution_consistency(models_fixture, dallas_austin) print("✓ Attribution setup consistency test passed") + print("\n=== Testing Attribution Targets Demo ===") + + print("Running test_attribution_targets_string...") + test_attribution_targets_string(models_fixture, dallas_austin) + print("✓ Attribution targets string test passed") + + print("Running test_attribution_targets_logit_diff...") + test_attribution_targets_logit_diff(models_fixture, dallas_austin) + print("✓ Attribution targets logit-diff test passed") + + print("Running test_attribution_targets_logit_diff_intervention...") + test_attribution_targets_logit_diff_intervention(models_fixture, dallas_austin) + print("✓ Attribution targets logit-diff intervention test passed") + + print("Running test_attribution_targets_semantic...") + test_attribution_targets_semantic(models_fixture, dallas_austin) + print("✓ Attribution targets semantic test passed") + + print("Running test_attribution_targets_semantic_intervention...") + test_attribution_targets_semantic_intervention(models_fixture, dallas_austin) + print("✓ Attribution targets semantic intervention test passed") + print("\n" + "=" * 70) print("All tutorial notebook tests passed! ✓") - print("Total tests run: 20") + print("Total tests run: 24") print("=" * 70) From be7654d777d784ff5d7eb82acfa9dff05939b7b6 Mon Sep 17 00:00:00 2001 From: Daniel Dale Date: Wed, 18 Feb 2026 20:39:46 -0800 Subject: [PATCH 15/18] updates to attribution_targets_demo.ipynb including: - refined initial description and overview of the attribution targets API - updated colab notebook setup to allow testing the attribution-targets branch before merging - improved formatting of the attribution target, token mapping outputs and top feature comparison tables - verified the demo notebook is working as expected in colab --- circuit_tracer/attribution/context_nnsight.py | 3 +- circuit_tracer/utils/demo_utils.py | 62 + demos/attribution_targets_demo.ipynb | 2213 ++--------------- 3 files changed, 218 insertions(+), 2060 deletions(-) diff --git a/circuit_tracer/attribution/context_nnsight.py b/circuit_tracer/attribution/context_nnsight.py index 63f5e7d3..bf093127 100644 --- a/circuit_tracer/attribution/context_nnsight.py +++ b/circuit_tracer/attribution/context_nnsight.py @@ -103,8 +103,7 @@ def compute_score( proxy = weakref.proxy(self) proxy._batch_buffer[write_index] += einsum( - grads[read_index], - # grads.to(output_vecs.dtype)[read_index], + grads.to(output_vecs.dtype)[read_index], output_vecs, "batch position d_model, position d_model -> position batch", ) diff --git a/circuit_tracer/utils/demo_utils.py b/circuit_tracer/utils/demo_utils.py index 803317b5..256ecc7b 100644 --- a/circuit_tracer/utils/demo_utils.py +++ b/circuit_tracer/utils/demo_utils.py @@ -7,6 +7,7 @@ import torch from IPython.display import HTML, display +from circuit_tracer.attribution.targets import CustomTarget from circuit_tracer.graph import compute_node_influence Feature = namedtuple("Feature", ["layer", "pos", "feature_idx"]) @@ -177,6 +178,67 @@ def display_top_features_comparison( display(HTML(style + body)) +def display_attribution_config( + token_pairs: list[tuple[str, int]], + target_pairs: list[tuple[str, CustomTarget]], +) -> None: + """Display token-mapping and custom-target summary tables. + + Args: + token_pairs: List of ``(token_str, vocab_id)`` pairs for the Token Mappings table. + target_pairs: List of ``(kind_label, target)`` pairs for the Attribution Targets + table, where each ``target`` is a CustomTarget with ``.token_str`` and ``.prob`` attributes. + """ + th_l = "padding:5px 14px 5px 6px; border-bottom:2px solid #888; text-align:left; white-space:nowrap" + th_r = "padding:5px 14px 5px 6px; border-bottom:2px solid #888; text-align:right; white-space:nowrap" + td_l = "padding:4px 14px 4px 6px; border-bottom:1px solid #ddd; text-align:left" + td_r = "padding:4px 14px 4px 6px; border-bottom:1px solid #ddd; text-align:right" + + # ── Token Mappings ──────────────────────────────────────────────────────── + token_rows = "".join( + "" + "" + html.escape(tok) + "" + "" + str(vid) + "" + "" + for tok, vid in token_pairs + ) + display( + HTML( + "Token Mappings" + "" + "" + "" + "" + "" + "" + token_rows + "" + "
TokenVocab ID
" + ) + ) + + # ── Attribution Targets ─────────────────────────────────────────────────── + target_rows = "".join( + "" + "" + html.escape(kind) + "" + "" + html.escape(tgt.token_str) + "" + "" + f"{tgt.prob * 100:.3f}%" + "" + "" + for kind, tgt in target_pairs + ) + display( + HTML( + "Attribution Targets" + "" + "" + "" + "" + "" + "" + "" + target_rows + "" + "
TargetLabelProbability
" + ) + ) + + def display_token_probs( logits: torch.Tensor, token_ids: list[int], diff --git a/demos/attribution_targets_demo.ipynb b/demos/attribution_targets_demo.ipynb index 33e37bca..8d25c9cc 100644 --- a/demos/attribution_targets_demo.ipynb +++ b/demos/attribution_targets_demo.ipynb @@ -6,67 +6,108 @@ "id": "Qa5r1-7RmS8j" }, "source": [ - "# Attribution Targets API\n", + "# Attribution Targets \n", "\n", "\n", " \"Open\n", "\n", "\n", - "This tutorial explores how to use the **attribution targets API** to attribute back from arbitrary tokens or functions thereof. The `AttributionTargets` class (in `circuit_tracer.attribution.targets`) accepts four input formats:\n", + "This tutorial walks through the **attribution targets API**, demonstrating how to attribute back from arbitrary tokens, functions thereof, or abstract concept directions in the residual stream.\n", "\n", - "1. **`None`** — *Salient logits* (default): auto-select the most probable next tokens via `max_n_logits` / `desired_logit_prob`\n", - "2. **`Sequence[str]`** — *Token strings*: attribute from explicitly named tokens (e.g., `[\"▁Austin\", \"▁Dallas\"]`)\n", - "3. **`Sequence[TargetSpec]`** — *Custom targets*: attribute from an arbitrary direction in the residual stream (e.g., a `CustomTarget` encoding `logit(Austin) − logit(Dallas)`)\n", - "4. **`torch.Tensor`** — *Token ID tensor*: attribute from specific vocabulary indices\n", + "The `AttributionTargets` class (in `circuit_tracer.attribution.targets`) accepts four input formats:\n", "\n", - "We use the capital-city prompt from the other demos: the model must resolve *\"capital of the state containing Dallas\"* via multi-hop reasoning (Dallas → Texas → Austin).\n", + "| Input type | Mode | Description |\n", + "|---|---|---|\n", + "| `None` | Salient logits | Auto-selects the most probable next tokens via `max_n_logits` / `desired_logit_prob` (default) |\n", + "| `Sequence[str]` | Token strings | Attribute from explicitly named tokens, e.g. `[\"▁Austin\", \"▁Dallas\"]` |\n", + "| `Sequence[TargetSpec]` | Custom target | Attribute from arbitrary residual-stream directions via a sequence of `CustomTarget(token_str, prob, vec)` namedtuples or raw `tuple[str, float, Tensor]` |\n", + "| `torch.Tensor` | Token ID tensor | Attribute from specific vocabulary indices |\n", "\n", - "After comparing the top features discovered under each mode, we run some relevant causal interventions." + "See the expandable reference below for `CustomTarget` / `TargetSpec` field descriptions and examples.\n", + "\n", + "We use the capital-city prompt from the other demos: the model must resolve *\"capital of the state containing Dallas\"* via multi-hop reasoning (Dallas → Texas → Austin). After comparing the top features discovered under each mode, we run some relevant causal interventions." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "TargetSpec / CustomTarget — field reference & examples\n", + "\n", + "The `attribution_targets` argument to `attribute()` accepts a `Sequence[TargetSpec]` for fully custom residual-stream directions. Two convenience types are involved:\n", + "\n", + "**`CustomTarget(token_str, prob, vec)`** is a `NamedTuple` with three fields:\n", + "\n", + "| Field | Type | Description |\n", + "|---|---|---|\n", + "| `token_str` | `str` | Human-readable label for this target (e.g. `\"logit(Austin)−logit(Dallas)\"`) |\n", + "| `prob` | `float` | Scalar weight — typically the softmax probability of the token, or \\|p(x)−p(y)\\| for a contrast direction |\n", + "| `vec` | `Tensor (d_model,)` | The direction in residual-stream space to attribute toward |\n", + "\n", + "**`TargetSpec`** is a type alias for `CustomTarget | tuple[str, float, torch.Tensor]`. Either form is accepted — a raw 3-tuple is coerced to a `CustomTarget` namedtuple automatically before processing.\n", + "\n", + "**Example — raw tuple (coerced automatically):**\n", + "\n", + "```python\n", + "raw: TargetSpec = (\"my-direction\", 0.05, some_tensor) # plain 3-tuple → TargetSpec\n", + "graph = attribute(prompt=prompt, model=model, attribution_targets=[raw])\n", + "```\n", + "\n", + "**Example — explicit `CustomTarget` namedtuple:**\n", + "\n", + "```python\n", + "from circuit_tracer.attribution.targets import CustomTarget\n", + "\n", + "target = CustomTarget(\n", + " token_str=\"logit(Austin)−logit(Dallas)\",\n", + " prob=abs(p_austin - p_dallas), # scalar weight\n", + " vec=unembed_austin - unembed_dallas, # shape: (d_model,)\n", + ")\n", + "graph = attribute(prompt=prompt, model=model, attribution_targets=[target])\n", + "```\n", + "\n", + "
" ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "# @title Colab Setup Environment\n", + "import sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", "\n", - "try:\n", - " import google.colab\n", + "def setup_environment():\n", + " from google.colab import output\n", + " output.enable_custom_widget_manager()\n", "\n", - " !mkdir -p repository && cd repository && \\\n", - " git clone https://github.com/safety-research/circuit-tracer && \\\n", - " curl -LsSf https://astral.sh/uv/install.sh | sh && \\\n", - " uv pip install -e circuit-tracer/\n", + " print(\"Setting up Colab environment...\")\n", + " %pip install -q uv\n", + "\n", + " # Use uv to install our PR branch for temporary testing, replace w/ commented line once installing from release.\n", + " !uv pip install --system --no-cache \"git+https://github.com/speediedan/circuit-tracer.git@attribution-targets\"\n", + " # after merged to main, install released version\n", + " # !uv pip install --system --no-cache circuit-tracer\n", "\n", - " import sys\n", " from huggingface_hub import notebook_login\n", + " notebook_login()\n", "\n", - " sys.path.append(\"repository/circuit-tracer\")\n", - " sys.path.append(\"repository/circuit-tracer/demos\")\n", - " notebook_login(new_session=False)\n", - " IN_COLAB = True\n", - "except ImportError:\n", - " IN_COLAB = False" + "if IN_COLAB:\n", + " setup_environment()\n", + "else:\n", + " print(\"Running in local environment. Skipping Colab-specific setup.\")" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "id": "P8fNhpqzmS8k" }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/mnt/cache/speediedan/.venvs/ct_dev/lib/python3.13/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n", - " warnings.warn(\n" - ] - } - ], + "outputs": [], "source": [ "from functools import partial\n", "\n", @@ -78,6 +119,7 @@ "from circuit_tracer.utils.demo_utils import (\n", " cleanup_cuda,\n", " display_ablation_chart,\n", + " display_attribution_config,\n", " display_token_probs,\n", " display_topk_token_predictions,\n", " display_top_features_comparison,\n", @@ -99,58 +141,15 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "id": "BBsETpl0mS8l" }, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "70526ca651f843d6bc193da8ad5abbf3", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Fetching 26 files: 0%| | 0/26 [00:00\n", - "
Baseline probabilities
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n", - "\n", - "\n", - " \n", - "
TokenProbabilityLogit
▁Austin41.380%26.1250
▁Dallas2.998%23.5000
▁Texas5.600%24.1250
\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Token X: '▁Austin' (vocab id 22605)\n", - "Token Y: '▁Dallas' (vocab id 26865)\n", - "Texas: '▁Texas' (vocab id 9447)\n", - "Custom target: logit(▁Austin)-logit(▁Dallas) prob=3.8477e-01\n", - "Semantic target: Concept: Capitals − States prob=1.0557e-01\n" - ] - } - ], + "outputs": [], "source": [ "prompt = \"Fact: the capital of the state containing Dallas is\"\n", "token_x, token_y = \"▁Austin\", \"▁Dallas\"\n", @@ -328,7 +313,7 @@ "states = [\"▁Texas\", \"▁California\", \"▁Washington\", \"▁Georgia\"]\n", "semantic_target = build_semantic_concept_target(\n", " model, prompt, capitals, states,\n", - " label=\"Concept: Capitals − States\", backend=backend,\n", + " label=\"Capitals − States\", backend=backend,\n", ")\n", "\n", "# Also track Texas — the intermediate hop in the multi-hop chain\n", @@ -350,11 +335,10 @@ "key_labels = [token_x, token_y, \"▁Texas\"]\n", "display_token_probs(baseline_logits, key_ids, key_labels, title=\"Baseline probabilities\")\n", "\n", - "print(f\"\\nToken X: {token_x!r} (vocab id {idx_x})\")\n", - "print(f\"Token Y: {token_y!r} (vocab id {idx_y})\")\n", - "print(f\"Texas: '▁Texas' (vocab id {idx_texas})\")\n", - "print(f\"Custom target: {custom_target.token_str} prob={custom_target.prob:.4e}\")\n", - "print(f\"Semantic target: {semantic_target.token_str} prob={semantic_target.prob:.4e}\")" + "display_attribution_config(\n", + " token_pairs=[(token_x, idx_x), (token_y, idx_y), (\"▁Texas\", idx_texas)],\n", + " target_pairs=[(\"Logit diff\", custom_target), (\"Semantic concept\", semantic_target)],\n", + ")" ] }, { @@ -370,41 +354,11 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": { "id": "2tLE4FzdmS8m" }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Phase 0: Precomputing activations and vectors\n", - "Precomputation completed in 0.21s\n", - "Found 9152 active features\n", - "Phase 1: Running forward pass\n", - "Forward pass completed in 0.10s\n", - "Phase 2: Building input vectors\n", - "Using 10 salient logits with cumulative probability 0.7422\n", - "Will include 8192 of 9152 feature nodes\n", - "Input vectors built in 1.37s\n", - "Phase 3: Computing logit attributions\n", - ":0: UserWarning: Full backward hook is firing when gradients are computed with respect to module outputs since no inputs require gradients. See https://docs.pytorch.org/docs/main/generated/torch.nn.Module.html#torch.nn.Module.register_full_backward_hook for more details.\n", - "Logit attributions completed in 0.09s\n", - "Phase 4: Computing feature attributions\n", - "Feature influence computation: 100%|██████████| 8192/8192 [00:02<00:00, 4001.74it/s]\n", - "Feature attributions completed in 2.05s\n", - "Attribution completed in 6.97s\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Salient-logits graph: 10 targets, 9152 active features\n" - ] - } - ], + "outputs": [], "source": [ "graph_salient = attribute(\n", " prompt=prompt, model=model,\n", @@ -431,40 +385,11 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": { "id": "Vh8HPtimmS8m" }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Phase 0: Precomputing activations and vectors\n", - "Precomputation completed in 0.18s\n", - "Found 9152 active features\n", - "Phase 1: Running forward pass\n", - "Forward pass completed in 0.09s\n", - "Phase 2: Building input vectors\n", - "Using 2 specified logit targets with cumulative probability 0.4434\n", - "Will include 8192 of 9152 feature nodes\n", - "Input vectors built in 1.35s\n", - "Phase 3: Computing logit attributions\n", - "Logit attributions completed in 0.05s\n", - "Phase 4: Computing feature attributions\n", - "Feature influence computation: 100%|██████████| 8192/8192 [00:02<00:00, 3998.39it/s]\n", - "Feature attributions completed in 2.05s\n", - "Attribution completed in 6.96s\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "String-targets graph: 2 targets, 9152 active features\n" - ] - } - ], + "outputs": [], "source": [ "graph_str = attribute(\n", " prompt=prompt, model=model,\n", @@ -486,45 +411,16 @@ "source": [ "## Custom Logit-Difference Target — `Sequence[TargetSpec]`\n", "\n", - "Pass a `CustomTarget` (or any `TargetSpec` — a tuple of `(token_str, prob, vec)`) that encodes an arbitrary direction in the residual stream. Here the direction is `logit(Austin) − logit(Dallas)`, so attribution will trace the circuit that drives the *correct* answer over the surface-level attractor, rather than tracing individual logit values separately." + "Pass a `CustomTarget` (or any `TargetSpec` — a tuple of `(token_str, prob, vec)`) that encodes an arbitrary direction in the residual stream. Here the direction is `logit(Austin) − logit(Dallas)`, so we notionally construct an attribution graph more narrowly driving the selection of the *correct* answer over the surface-level attractor." ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": { "id": "gMZ8Ee-KmS8m" }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Phase 0: Precomputing activations and vectors\n", - "Precomputation completed in 0.18s\n", - "Found 9152 active features\n", - "Phase 1: Running forward pass\n", - "Forward pass completed in 0.08s\n", - "Phase 2: Building input vectors\n", - "Using 1 custom attribution targets with total weight 0.3848\n", - "Will include 8192 of 9152 feature nodes\n", - "Input vectors built in 1.34s\n", - "Phase 3: Computing logit attributions\n", - "Logit attributions completed in 0.05s\n", - "Phase 4: Computing feature attributions\n", - "Feature influence computation: 100%|██████████| 8192/8192 [00:02<00:00, 3929.05it/s]\n", - "Feature attributions completed in 2.09s\n", - "Attribution completed in 7.08s\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Custom-target graph: 1 targets, 9152 active features\n" - ] - } - ], + "outputs": [], "source": [ "graph_custom = attribute(\n", " prompt=prompt, model=model,\n", @@ -541,43 +437,14 @@ "source": [ "## Semantic Direction — Concept Target\n", "\n", - "Instead of a pairwise logit difference, we can attribute to an **abstract concept direction** in the residual stream. We build a CustomTarget for an abstract concept direction via vector rejection. For each (capital, state) pair, project the capital vector onto the state vector and subtract that projection, leaving pure \"capital-ness\"." + "Instead of a pairwise logit difference, we can attribute to an **abstract concept direction** in the residual stream. We build a `CustomTarget` for an abstract concept direction via vector rejection. For each (capital, state) pair, project the capital vector onto the state vector and subtract that projection, leaving pure \"capital-ness\"." ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Phase 0: Precomputing activations and vectors\n", - "Precomputation completed in 0.14s\n", - "Found 9152 active features\n", - "Phase 1: Running forward pass\n", - "Forward pass completed in 0.08s\n", - "Phase 2: Building input vectors\n", - "Using 1 custom attribution targets with total weight 0.1056\n", - "Will include 8192 of 9152 feature nodes\n", - "Input vectors built in 1.34s\n", - "Phase 3: Computing logit attributions\n", - "Logit attributions completed in 0.05s\n", - "Phase 4: Computing feature attributions\n", - "Feature influence computation: 100%|██████████| 8192/8192 [00:02<00:00, 4067.53it/s]\n", - "Feature attributions completed in 2.02s\n", - "Attribution completed in 6.83s\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Semantic-target graph: 1 targets, 9152 active features\n" - ] - } - ], + "outputs": [], "source": [ "graph_semantic = attribute(\n", " prompt=prompt, model=model,\n", @@ -604,70 +471,11 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": { "id": "185O1Ck1mS8m" }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - "
Salient Logits
#NodeScore
1(20, 10, 15589)0.0197
2(24, 10, 6044)0.0159
3(21, 10, 5943)0.0118
4(23, 10, 12237)0.0083
5(14, 9, 2268)0.0080
6(16, 9, 25)0.0074
7(25, 10, 13300)0.0072
8(20, 9, 15589)0.0069
9(24, 10, 6394)0.0061
10(24, 10, 13277)0.0057
Strings [▁Austin, ▁Dallas]
#NodeScore
1(20, 10, 15589)0.0155
2(24, 10, 6044)0.0122
3(21, 10, 5943)0.0095
4(23, 10, 12237)0.0074
5(14, 9, 2268)0.0058
6(16, 9, 25)0.0057
7(25, 10, 13300)0.0054
8(20, 9, 15589)0.0054
9(24, 10, 6394)0.0051
10(4, 9, 13154)0.0040
Custom (logit(▁Austin)-logit(▁Dallas))
#NodeScore
1(19, 10, 7477)0.0057
2(23, 10, 12237)0.0035
3(18, 10, 8959)0.0028
4(0, 2, 16200)0.0026
5(14, 9, 2268)0.0025
6(25, 10, 583)0.0025
7(1, 4, 1000)0.0023
8(18, 10, 6101)0.0022
9(0, 3, 3820)0.0022
10(0, 5, 2848)0.0022
Semantic (Concept: Capitals − States)
#NodeScore
1(21, 10, 5943)0.0036
2(24, 10, 6394)0.0020
3(23, 10, 12237)0.0017
4(24, 10, 5999)0.0012
5(20, 10, 15589)0.0011
6(19, 10, 2695)0.0010
7(24, 10, 6044)0.0010
8(22, 10, 4999)0.0010
9(18, 10, 6101)0.0009
10(0, 2, 16200)0.0008
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "top_salient, scores_salient = get_top_features(graph_salient, n=10)\n", "top_str, scores_str = get_top_features(graph_str, n=10)\n", @@ -678,14 +486,14 @@ " {\n", " \"Salient Logits\": top_salient,\n", " f\"Strings [{token_x}, {token_y}]\": top_str,\n", - " f\"Custom ({custom_target.token_str})\": top_custom,\n", - " f\"Semantic ({semantic_target.token_str})\": top_semantic,\n", + " f\"Custom Fn ({custom_target.token_str})\": top_custom,\n", + " f\"Semantic Concept ({semantic_target.token_str})\": top_semantic,\n", " },\n", " scores_sets={\n", " \"Salient Logits\": scores_salient,\n", " f\"Strings [{token_x}, {token_y}]\": scores_str,\n", - " f\"Custom ({custom_target.token_str})\": scores_custom,\n", - " f\"Semantic ({semantic_target.token_str})\": scores_semantic,\n", + " f\"Custom Fn ({custom_target.token_str})\": scores_custom,\n", + " f\"Semantic Concept ({semantic_target.token_str})\": scores_semantic,\n", " },\n", " neuronpedia_model=\"gemma-2-2b\",\n", ")" @@ -704,371 +512,9 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "
\n", - "
Before amplification
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n", - "\n", - "\n", - " \n", - "
TokenProbabilityLogit
▁Austin41.380%26.1250
▁Dallas2.998%23.5000
▁Texas5.600%24.1250
\n", - "
\n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "\n", - "
\n", - "
After 10× amplification
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n", - "\n", - "\n", - " \n", - "
TokenProbabilityLogit
▁Austin83.206%24.7500
▁Dallas5.91e-0417.5000
▁Texas5.28e-095.8750
\n", - "
\n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "logit(Austin) − logit(Dallas): 2.6250 → 7.2500 (Δ = +4.6250)\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - "
\n", - "
Input Sentence:
\n", - "
Fact: the capital of the state containing Dallas is
\n", - " \n", - "
\n", - "
Original Top 5 Tokens
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TokenProbabilityDistribution
Austin0.414\n", - "
\n", - "
\n", - " 41.4%\n", - "
\n", - "
Texas0.056\n", - "
\n", - "
\n", - " 5.6%\n", - "
\n", - "
the0.056\n", - "
\n", - "
\n", - " 5.6%\n", - "
\n", - "
not0.056\n", - "
\n", - "
\n", - " 5.6%\n", - "
\n", - "
Fort0.044\n", - "
\n", - "
\n", - " 4.4%\n", - "
\n", - "
\n", - " \n", - "
New Top 5 Tokens
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TokenProbabilityDistribution
Austin0.832\n", - "
\n", - "
\n", - " 83.2%\n", - "
\n", - "
in0.017\n", - "
\n", - "
\n", - " 1.7%\n", - "
\n", - "
AUSTIN0.010\n", - "
\n", - "
\n", - " 1.0%\n", - "
\n", - "
Irving0.009\n", - "
\n", - "
\n", - " 0.9%\n", - "
\n", - "
D0.009\n", - "
\n", - "
\n", - " 0.9%\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - "
Key Tokens
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TokenOriginalNewChange
▁Austin0.41410.8320\n", - "
\n", - "
\n", - " +100.9%\n", - "
\n", - "
▁Dallas0.03000.0006\n", - "
\n", - "
\n", - " -98.0%\n", - "
\n", - "
▁Texas0.05590.0000\n", - "
\n", - "
\n", - " -100.0%\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# Get activations for interventions\n", "input_ids = model.ensure_tokenized(prompt)\n", @@ -1105,701 +551,9 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "
\n", - "
Before amplification (semantic)
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n", - "\n", - "\n", - " \n", - "
TokenProbabilityLogit
▁Austin41.380%26.1250
▁Dallas2.998%23.5000
▁Texas5.600%24.1250
\n", - "
\n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "\n", - "
\n", - "
After 2× amplification (semantic)
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n", - "\n", - "\n", - " \n", - "
TokenProbabilityLogit
▁Austin61.581%26.3750
▁Dallas3.474%23.5000
▁Texas0.470%21.5000
\n", - "
\n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "logit(Austin) − logit(Dallas): 2.6250 → 2.8750 (Δ = +0.2500) [2×]\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - "
\n", - "
Input Sentence:
\n", - "
Fact: the capital of the state containing Dallas is
\n", - " \n", - "
\n", - "
Original Top 5 Tokens
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TokenProbabilityDistribution
Austin0.414\n", - "
\n", - "
\n", - " 41.4%\n", - "
\n", - "
Texas0.056\n", - "
\n", - "
\n", - " 5.6%\n", - "
\n", - "
the0.056\n", - "
\n", - "
\n", - " 5.6%\n", - "
\n", - "
not0.056\n", - "
\n", - "
\n", - " 5.6%\n", - "
\n", - "
Fort0.044\n", - "
\n", - "
\n", - " 4.4%\n", - "
\n", - "
\n", - " \n", - "
New Top 5 Tokens
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TokenProbabilityDistribution
Austin0.617\n", - "
\n", - "
\n", - " 61.7%\n", - "
\n", - "
Dallas0.035\n", - "
\n", - "
\n", - " 3.5%\n", - "
\n", - "
Fort0.035\n", - "
\n", - "
\n", - " 3.5%\n", - "
\n", - "
San0.031\n", - "
\n", - "
\n", - " 3.1%\n", - "
\n", - "
not0.031\n", - "
\n", - "
\n", - " 3.1%\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - "
Key Tokens
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TokenOriginalNewChange
▁Austin0.41410.6172\n", - "
\n", - "
\n", - " +49.1%\n", - "
\n", - "
▁Dallas0.03000.0347\n", - "
\n", - "
\n", - " +15.4%\n", - "
\n", - "
▁Texas0.05590.0047\n", - "
\n", - "
\n", - " -91.6%\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "\n", - "
\n", - "
After 10× amplification (semantic)
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n", - "\n", - "\n", - " \n", - "
TokenProbabilityLogit
▁Austin65.185%26.1250
▁Dallas0.266%20.6250
▁Texas2.92e-112.2969
\n", - "
\n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "logit(Austin) − logit(Dallas): 2.6250 → 5.5000 (Δ = +2.8750) [10×]\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - "
\n", - "
Input Sentence:
\n", - "
Fact: the capital of the state containing Dallas is
\n", - " \n", - "
\n", - "
Original Top 5 Tokens
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TokenProbabilityDistribution
Austin0.414\n", - "
\n", - "
\n", - " 41.4%\n", - "
\n", - "
Texas0.056\n", - "
\n", - "
\n", - " 5.6%\n", - "
\n", - "
the0.056\n", - "
\n", - "
\n", - " 5.6%\n", - "
\n", - "
not0.056\n", - "
\n", - "
\n", - " 5.6%\n", - "
\n", - "
Fort0.044\n", - "
\n", - "
\n", - " 4.4%\n", - "
\n", - "
\n", - " \n", - "
New Top 5 Tokens
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TokenProbabilityDistribution
Austin0.652\n", - "
\n", - "
\n", - " 65.2%\n", - "
\n", - "
San0.113\n", - "
\n", - "
\n", - " 11.3%\n", - "
\n", - "
Austin0.025\n", - "
\n", - "
\n", - " 2.5%\n", - "
\n", - "
Fort0.025\n", - "
\n", - "
\n", - " 2.5%\n", - "
\n", - "
Washington0.020\n", - "
\n", - "
\n", - " 2.0%\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - "
Key Tokens
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TokenOriginalNewChange
▁Austin0.41410.6523\n", - "
\n", - "
\n", - " +57.5%\n", - "
\n", - "
▁Dallas0.03000.0027\n", - "
\n", - "
\n", - " -91.1%\n", - "
\n", - "
▁Texas0.05590.0000\n", - "
\n", - "
\n", - " -100.0%\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# Baseline\n", "display_token_probs(original_logits, key_ids, key_labels, title=\"Before amplification (semantic)\")\n", @@ -1843,325 +597,17 @@ "source": [ "## Ablate the Austin-Dallas Custom Difference Circuit\n", "\n", - "Now do the opposite: zero out progressively more custom-target features to remove the Austin-driving circuit. With enough of the multi-hop reasoning path suppressed, the model can no longer resolve the correct answer and reverts to nearby concepts — the intermediate state (Texas) rather than its capital." + "Now do the opposite: zero out progressively more features important to our custom target to dampen the Austin-driving circuit. With enough of the multi-hop reasoning path suppressed, the model can no longer resolve the correct answer and reverts to nearby concepts — the intermediate state (Texas) rather than its capital." ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAxUAAAHqCAYAAAByRmPvAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjgsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvwVt1zgAAAAlwSFlzAAAPYQAAD2EBqD+naQAApThJREFUeJzs3XdYFFfbBvB76R1EQECQIipgAcWGJTZUxIYaxRbA3htqDBp7Euy9d4099oJGRVGM2MWGBQ0EUVBsoKAisN8ffs7LSnFZylDu33XN5e7MmTPP7K7LPjOnSKRSqRREREREREQKUhI7ACIiIiIiKt6YVBARERERUZ4wqSAiIiIiojxhUkFERERERHnCpIKIiIiIiPKESQUREREREeUJkwoiIiIiIsoTJhVERERERJQnTCqIiIiIiChPmFQQUb7atGkTJBIJJBIJmjZtKtc+06ZNE/bx9fUt0PiaNm0qHGvTpk0FeqyiSpH3qLTz9fUVXrNp06YVyDEU+WxaW1sL+wQHB3+3rrycR3bHKkkK433OTmF+DxIVBBWxA6D8kZ6ejsOHD2Pbtm24fPkynj9/DlVVVZQvXx4uLi7w8vJCu3btIJFICjyW4OBg4Q+Os7MzPD09C/yY+SUsLAwHDhwA8OUPaEn6Yl+0aBHevn0L4MsfTmtra1HjKSgZfwiMHj0aBgYGosWSX0rLe0fiefv2LRYtWiQ8L+wf1IqSSqXYtGkTVq9ejYcPH+Ljx48wNTWFk5MTRowYgebNm4sdYr4oid9rVPIwqSgBnj9/jm7duuHcuXMy6z9+/Ij79+/j/v372LZtG968eVMoX0TBwcGYPn06AMDHx6fYJRVfY2/SpEmJSyr+++8/AF+uYpbUH6Zf3z/gyw/wbz/zS5cuRUJCAgCgcuXKhRmawkrLe0e5t2fPHnz8+BEAUL169e+WnzRpEvr37w8AqFChgrD+7du3Mv93skoqcnuswjB+/HjMnz9fZl1kZCQiIyPh5ORUrJKKvn37ws3NDQBQrlw5mW3f+14jKgqYVBRzycnJaN26NW7evAkAUFJSgq+vL9q1awd9fX08efIEx44dw759+0SOlL71+fNnSKVSqKmpiR1KqVJUfgyRON6/fw8dHR2xw8g3tWvXzlX5SpUqoVKlSoVyrMKwZs0a4fHo0aPRrl07PH/+HP/880+mH+ZFXYUKFWQSPaLihn0qirnFixcLCQUAbNu2DevXr0enTp3QvHlz+Pj4YOfOnbhz5w60tLQA5NyeOrv2pG/fvsW4ceNgb28PTU1NqKurw9zcHE2aNMH48eORnJyMqKgoSCQSmSsqmzdvFurLeHX16y3rZs2awdDQEKqqqjA1NUXHjh0RFBSU6Ty/1iGRSHDr1i0MGTIExsbG0NXVRfv27REVFQWpVIrFixejUqVKUFdXh4ODA7Zt2yb3aymRSNCnTx/h+dmzZ2WOCwBpaWkYOXIkGjdujPLly0NLSwvq6uqwsrJCr169EBYWJlPn19fk6xIbGwtfX1+YmJhAXV0d4eHhAIBXr16hf//+MDIygra2Npo0aYILFy7k2L43OTkZc+bMQd26daGnpwd1dXVUqlQJfn5+iI+PF8p9baf79Uo3ADRr1kzudsOPHz9G3759UatWLZQrVw5qamrQ1taGo6MjxowZgxcvXuS4/4MHD+Dp6QkDAwPo6uqibdu2uHv3bo77KHLsr69VRjY2NpnalefUbv358+cYP348HB0doaWlBU1NTdjb22PMmDF49uyZTNlv/x/dvXsXHTt2hL6+PrS1teHh4YFHjx7J7PPt5+F7cvPe5Sb27ISFhaFs2bJC/VOmTBG23bt3D/3794etrS00NDSgp6eHhg0bYtOmTZBKpTL1fPu5PXToEOrXrw9NTU0YGxtj0KBBSEpKkiumb+s6cOAA6tatC01NTZiYmGDQoEF48+aNzD4Z3+ONGzdi0aJFcHBwgJqaGn799Veh3M2bN+Ht7Q0rKyuoq6tDT08PdevWxbx58/Dp06cc4zp27Bjq168PLS2tbOPYsWMHOnbsCDs7OxgYGEBVVRVly5ZFkyZNsGHDhkyv27e2bNkCJycnaGhowMLCAhMmTBDuFHyV234OWX2nNG3aFDY2NjLlMn5Ov9ab07FiYmIwevRo4W+Ejo4OXFxcsHDhQnz+/FmmrDx/T+Slr68vPPbw8ECLFi3Qs2dPLF++HEOGDJG7Hnm8e/cOM2fORK1ataCrqwt1dXXY2tpiwIABiIiIyFQ+t9/rWfWpkPd7LSe3bt2Cu7s7tLW1YWhoiB49eiAmJibb9zO3n9tvv1PXrVuHGjVq5Pi5pRJKSsValSpVpACkAKTNmzeXa5+NGzcK+zRp0kRmm4+Pj7Bt6tSpwvoffvhBWJ/VEhsbK42MjMyxjJWVlVQqlUpTU1Olnp6eOZb9/fffZeLKuK1y5cqZytvY2EgHDx6cZV0XLlyQ63XJKZ6v/1U+fPiQYxk1NTXpxYsXhTq/fU0qVaok8/zGjRvSpKQkabVq1TLVpa6uLq1atWqW70d8fHyW+3xdypcvL/3333+lUqlUOnXq1BxjzlhvVo4dO5bj/tbW1tI3b95k+fmytraWlilTJtM++vr60vDwcGGfjDH6+PgodOyMn92slo0bN0qlUqm0SZMmmdZJpVJpeHi41MTEJNv9jYyMpDdv3szyPM3MzKTa2tqZ9nF0dJSmpaVl+3n4Hnnfu7zE/vU74ObNm9KyZcsK6+fMmSOU379/v1RDQyPb+nv16iVNT08Xymd8L+zs7LLcZ9CgQd89/2/rcnBwyLIuJycnaXJysrBPxvf42/9zo0aNkkqlUumOHTukqqqq2Z6Ti4uLNDExMcs4nJ2dpRKJ5LtxeHl55fj+fY0lq7hr1qyZ5T7u7u4yr7WVlZWw7cyZM1nWlfFzntV3fMayWS1f683uWKGhoVIDA4Ns92/WrJn048ePQnl5/p7Ia/z48cJ+VlZW0piYGLn3zUp2fwNjY2MzfZYyLlpaWtKTJ08K5RX5Xs/qe1De77Xs3L59W6qnp5dpPysrK6mhoWGW72dePreOjo5yfW6pZOKdimIsKSkJDx48EJ63atWqQI7z8uVLob+GpaUldu7ciaCgIGzduhUTJkxAtWrVIJFIYGZmhpCQEJmr/W3atEFISAhCQkKwZ88eAMDy5cuFztCqqqqYMWMGAgMDhXa+wJd2v5cvX84ynvj4eKxfvx7btm2DtrY2gC9taFetWoXhw4cjMDAQDRo0EMovWbJErvMMCQnBxIkThefOzs5C7CEhIQAAFRUVTJ48Gdu2bcOxY8cQHByMwMBAjBkzBgCQkpKCGTNmZHuM6OhozJgxA3///TfWrFkDIyMjzJ8/H3fu3AEAqKmp4Y8//sCRI0fg4eGR7RX9YcOGCfs4Oztjx44dOHbsGLp06QIAePr0KXx8fAB8aacbEhICU1NTmdfk63n17ds3x9fFysoKs2bNwp49e3DixAkEBwdj//79cHd3B/Dl6vvatWuz3DcqKgqOjo7Yv38/Nm7cKDRHSEhIwMiRI3M8bm6PPWnSJOF9+uqvv/4SztPDwyPHY/Xu3Vu481GpUiXs2LEDf/31FxwdHQF8+X/Qq1cvpKenZ9o3NjYWVapUwd69e7Fo0SKoqqoCAMLDw3Hy5Mnvnmd25H3v8hI7ANy5cwctWrTAq1evIJFIsHz5cowfPx7Al/9vP/30k3ClcfDgwTh+/Dj+/PNPWFlZAfhyh3Tjxo1Z1v3o0SP06NEDR44ckblyvH79erx//z5Xr8e9e/fQr18/BAYG4rfffhNe55s3b2LBggVZ7hMREYEOHTpg//79OHDgAFq2bIm4uDj069dPuILepk0bHD58GCtWrBCufF+7dg2//PJLlnWGhYWhb9++342jQ4cOWLVqFQ4dOoQzZ84gKCgI69evh5GREQBg2bJliIuLy/YYP//8MwIDA+Hn5yesP378OLZv356bl+27li5dir/++ktmXcbvvpo1a2a776dPn+Dl5SUMJNClSxccPXoUe/bsQY0aNQAAZ86cwe+//w5A/r8n8li0aJHM6/3ff/+hadOmePr0qbBuwoQJwlX0b+8k58bQoUOFuxHlypXDhg0bcODAATRq1AjAlzvHvXr1Eu7AKfK9npW8fq+NGjUKiYmJAAAzMzNs3rwZe/bsgba2Nl6/fp3lPnn53N67d6/QPrdUBImd1ZDiYmJiZK4ErF27Vq79cnun4sOHD1JlZWUpAGn16tWl165dk3748CHb+rO76vxVxitwI0aMkNlWu3ZtYdvQoUOF9RnPc8WKFcJ6Dw8PYX3dunWF9X/99ZewvlatWsL6W7duSUNCQmSWW7duyfXafPXPP/9If/zxR6mlpaVUTU0t0xUZQ0NDoey3V6aXLFmSqb6MV7NGjhwprP/06ZPU3Nw80/vx5s0b4f0AIN2+fbtwLmfOnJG5+nr//n2hvuyuMspjy5Yt0ubNm0uNjIxkjv116dy5c5avoaampjQ+Pl7YtmfPHmGbRCKRvnz5UiqV5vyZyc2xpVLZz0pkZGSmc8nqCu7Nmzdl9rt27ZpQ/s6dOzLbLl++nOk8VVVVZa6Quru75/ie51ZO711eY7e1tZUaGxtLAUiVlZUzXflcunSpULZatWoy/3cmTZokbKtfv76wT8bvkapVqwpXKNPS0qRaWlrCtoz/97KTsa46derIbBs+fLiwrUaNGsL6jO+xi4tLpjoXL14sbDc2Npb5Plu2bJmwTU9PT5qamqpwHC9fvpROmDBBWr16dam2tnaWdzcOHTqUZdxdu3aVOUa7du2EbR06dBDW58edCqlUvrtoWR3r8OHDMq/luXPnhM9Hxs+OmZmZVCrN/d+T7Ozfv1+o28HBQdqlSxfheaVKlaRPnz6VSqX/+7+oqqoqTUpK+m69Wb0+r1+/liopKQnr9+7dK5SPj4+XampqCtt2794tlUpz/70uleb8Pfi977WsxMfHy+y3b98+Ydu33w0ZPzuF8bmlkokdtYuxb0d/ePXqVYEcR0NDAz4+PtiwYQNu374NFxcXKCkpoUKFCqhXrx769OmD1q1by13f/fv3hcdfr/JkfH716tVM5TLKeBeibNmywmNXV1fh8dcrKgBkrsaMGDECZ8+elamvSZMmco+5fvLkSbRp0wZpaWnZlvm2XXVGX+8kZJSxLW7Dhg2Fx2pqaqhbt65wV+erhw8fyhy/Z8+e2R7vzp07qFKlSrbb5TFlyhTMnDkzxzLZnbO9vb3Me5Hx/ZZKpXj8+LHMe5ifx86NjJ81TU1N1KpVS3hetWpVGBgYCFdi79+/jzp16sjsb29vj/LlywvPM55TdlcD80teY//333+Fx9OnT8804tnXfj/Al89T48aNs4zj61XZbzVv3ly48qykpIQyZcoIbeZz+9pk9X2xbNkyAMiyTTsAdO7cOdO6jK9Z7dq1oaGhkeUxEhMT8ezZM1haWuY6jg8fPqBhw4Yyd5Ozkt3nN6tjHDlyROYYRUHGz0d8fDx++OGHLMvFxsbi1atXKFu2bL78PQkICBAef+170rFjRxw7dgwRERFo1qwZFi5ciL///hvAl3b/X/sV5lZERITMXb6M742RkRGqVKki3AX5+tnK7fd6Qfi2T1fGOL79bviqtHxuqWCw+VMxpq2tLfOD8dSpU3Ltl/HWcmpqqsy2jB18M1qzZg22bt2K7t27o1q1alBTU0NUVBR27doFd3d3HDx4UIEzUEzGjnlKSv/7CGc3xJ70O50hc2Pu3LnCD/q6detiz549CAkJwY4dO+Q6npmZWaZ1Gd+P/J5HJLfNS771+fNnmeYFvXr1wrFjxxASEoKff/5ZWJ9ds5rieuzcMjQ0lHmuovK/6zX5+fkrCMrKysLjRYsWyfxIzI3sPmtivzZZ/Z8rDPv37xd+mGlra2PJkiU4c+YMQkJCZEYgKwqf38Ly9TOSH39Pbt++LTy2t7eHqqoq9uzZI/xwfvjwIdq2bSt8xkaPHp3/J5SDgvxeVyQGeePg55bygklFMZfxquKpU6cytYv9KiIiAikpKQCAMmXKCOsztj19+/Ytzp8/n+X+SkpK6NWrF3bs2IHbt28jKSkJc+fOFbZn/FGd8Yd+Vl889vb2wuN//vlHZlvG5xnL5Zfg4GBIpVKZJeNdiu/FHh0dLTyePHkyunTpgkaNGmVKzrKT1Zd6xuEdL168KDxOSUnJsl9J5cqVZX4IPnjwINM5SaVSvH//XuhXIc+5ZeXVq1cyo/SsWrUK7u7uaNSokVx3xu7fvy9TLuP7K5FIULFixXw/dsbXWN7zzPhZ+/DhA27cuCE8Dw8Pl7maVxCfy+/J6b3La+wNGzaEt7c3gC/t3d3c3GSucDo4OAiPGzRokOVn7evnraDl9H1hZ2eX5T5Z/Z/L+Dpcu3ZNZmSajHXq6ellmZTIE0fG7wp3d3eMGDECTZs2RY0aNRATE5NlrLk9Rn7K+BkD5P+/k/HzUaFCBWGo7Kw+H1/74OTm70l2dHV1hceHDh0CAGhpaeHIkSNCX46vevfu/d2+BzmpVKmSzOuT8b149eqVzFX9r5+t3H6vf48i32t2dnYy+2WM486dO5nuUgDF73NLRQubPxVzo0aNws6dO4VhZXv06IETJ06gXbt20NPTw9OnT3H8+HH89ddfeP78OdTU1GQm/IqKioKvry9q166N9evXCx26vmVnZwcPDw+4uLjA3NwcaWlpMpPtZfyjnLHpR0hICI4ePQp9fX2YmprCzs4Ovr6+wg+fVatWwcTEBC4uLti/fz+uXLki7JvxB3FhyRj7rVu3sG/fPpiYmMDAwADVqlWDra2t8Adk4cKFUFVVxePHj2WGqcytbt26CVfdli9fDlNTU1StWhXr1q3LcihQAwMDdO7cWUggPTw8MH78eNjZ2eHt27f477//cO7cOWHiw4znFhkZCeDLUL9KSkpQUVFBjRo1oKenl2Vs5cqVg7a2tvDjfuLEiWjfvj1Onz6dbcfcjD58+IBOnTph7NixePv2Lfz9/YVtzZs3z7Hpk6LHLlu2LF6+fAngy+erXbt2UFJSQt26dbOdE6RGjRqoVasWrl+/DuDL/6Pp06dDWVlZZojkatWqwcXF5bvnnZ2oqCiZoTvlvVKf03uX19glEgnWr1+PN2/e4PDhw4iNjUWLFi1w7tw5WFlZwcvLCxMnTsT79+9x4cIF/Pjjj+jZsyf09fXx9OlTPHjwAIGBgfD09MTUqVMVfm3kcfnyZQwcOBCdO3fGjRs3sHr1amFbt27d5K6nW7du8Pf3R3JyMl68eIEff/wRgwcPRkxMDCZNmiSU6927t8ydldzEYWtrK6wLCgrCn3/+CX19fcybN0+uJnt79uyBv78/mjRpgtOnTwtNSHJ7rvIyNDSERCIRPpMLFy5E3bp1oaSkJNNs5lstW7aEpaUlnjx5gujoaLRu3RoDBgyAiYkJYmNj8fjxY5w4cQKVKlUS/t/m5u9Jdrp06YKVK1cCgDCkq6urK6KiojLt/+LFC6Smpmb5XsqjTJky6NixI/bv3w/gy0AZCQkJMDQ0xPz58/HhwwcAgLGxsZC85PZ7/XsU+V4rW7YsmjdvLgzT/jVuTU3NbP+vFrfPLRUxhdN1gwpSbGzsd4foAyAz7GfLli0zbVdTU5MZrjVjBzJ1dfUc687YcS08PFymU9vXpV+/flKpVL4hZX/77TeZc8y4LWMntew6HZ45c0ZY/3UoW3m8fv1apiPp16VFixZSqTT7IU6bNm2aZSdHeTo/Zjf0oJqamswQmhnP78WLFzkOKZvVefv7+2dZLiQkJMfX5JdffvnuOWfs1J6xE3D58uWlOjo6mfbV09OT3rlzR9gnuw6KuT22VCqV9ujRI8t9njx5IpVKs+/Aevfu3RyHZS1btux3h2X9Ki+dYbPyvfcuP2L/8OGDzPdIxYoVhc6u+/bty3FI2W/PM7vzl0pzP2CAPEO5Vq9eXaYTbnbvcUbyDCmbkJCQZRwZh/LOLo6kpCSpra1tpjKmpqZSe3v7LOPLGHfGMhmXli1bygxTnF8dtaVSqdTV1TXT8ZSVlb97rAsXLuQ4pOy3/69z8/ckO2/evJHWqlUrx3oyflaGDBny3Tpzen3kGVL2xIkTQnlFvtdz6qj9ve+17Ny5c0eqq6ubaT9LS8ssh5TN6+c2u6GQv/3cUsnE5k8lgKmpKc6cOYP9+/fjxx9/RIUKFaChoQEdHR1UqVIFPXv2xMGDB2X6ImzZsgXdunWDnp4etLS0hCuTGTs7ZxQQEIAOHTrA2toaOjo6UFZWhrGxMdzd3REYGCjTGdLBwQFbtmxB1apVhaEWM1JWVsa+ffuwYcMGNGnSBAYGBlBRUYGJiQk6dOiAU6dOyVwtLExlypTBvn37ULt2bairq2fa7u7ujr1798LZ2RmampqoUKECpkyZIjOra25paWnhzJkz6Nu3LwwNDaGpqYlGjRrh1KlTMn1mvg6fC3y5Inb58mXMmzcP9evXh76+PlRVVWFubo769etj0qRJ2Lt3r8xxfv31VwwaNAgmJia5auM7c+ZMzJw5U5j0rEaNGti2bZtcd5Ls7Oxw/vx5uLu7Q1dXF9ra2nB3d8f58+dRtWrVAjn24sWL4eXlJVx5lZejoyNu3bqFsWPHwt7eHhoaGtDQ0EDlypUxatQo3Lp1K1OzisLyvfcuP2LX0NDAoUOH4OzsDODLxINubm6Ij49Hp06dcOPGDQwcOBB2dnbQ0NCAtrY27Ozs0K5dO6xatQpDhw4tiFOX0bFjRxw9ehT16tWDhoYGjIyMMGDAAJw5cybXnXC7d++Oy5cvo3fv3rC0tISqqqowYducOXNw/vz5bO/gde/eHXv27IGLi0u2cWhpaeH06dPo1KkTDA0Noa+vjw4dOuD8+fNyzfQ8YcIErFy5Eo6OjlBTU4O5uTnGjRuHgwcPZmqqlF/+/PNPeHh4yDQtkoerqytu374NPz8/VK1aVZh80cbGBi1btsTChQtlhtrOzd+T7BgYGODChQuYM2cOXFxcoK2tDVVVVVhaWqJr1644ePAgDh8+LLxWK1euxOLFi3P3gmRgamqKq1evYvr06XB2doaWlhbU1NRgbW2Nfv364caNG2jZsqVQXpHv9Zwo+r1WtWpVnD9/Hq1atYKWlhb09fXRtWtX/PPPPzLNqL7GkdfP7ciRI7F582bUqFFDmNSwoD+3VHRIpNIi3ouQqBSQSqWZ/lB8/PgRdnZ2Qr+XAwcOoGPHjmKERyQaX19fbN68GQAwderU784AT1RUFOXv9du3bwsXGpSUlPDy5UuZ/pa50bRpU2FUxY0bN2YaQY5KD/apICoCvLy84OrqikaNGsHMzAz//fcf/vjjD+EPj6mpqcxVMCIiKtqKwvf6x48f0bRpUwwfPhzOzs7Q1dXFzZs3MWHCBKFMu3btFE4oiDJiUkFUBERHR2c7cpeOjg62b9+u8BjrRERU+IrK9/qlS5dw6dKlLLdVqlRJ6PBOlFdMKoiKgJ9++gnq6up48OABXr9+DTU1NdjY2MDNzQ2jR48WhmIkIqLioSh8r6uqqmLEiBEICQlBdHQ0EhMToaOjAwcHB3h6emLYsGFy9+sg+h72qSAiIiIiojxhV3wiIiIiIsoTJhVERERERJQnpa5PRWpqKm7cuIFy5cpxzGQiIiIiypX09HQ8f/4cNWvWVHim9pKo1L0SN27cQN26dcUOg4iIiIiKscuXL6NOnTpih1FklLqk4uuMkJcvX4aZmZnI0RARERFRcRIbG4u6devKNct4aVLqkoqvTZ7MzMxgYWEhcjREREREVByxGb0svhpERERERJQnTCqIiIiIiChPmFQQEREREVGelLo+FfJKS0vD58+fxQ6DckFVVRXKyspih0FERCQjPT0dKSkpYodBcuLvCcUwqfiGVCpFXFwc3r59K3YopAADAwOYmppCIpGIHQoRERFSUlIQGRmJ9PR0sUOhXODvidxjUvGNrwmFiYkJtLS0+GEqJqRSKZKTk/HixQsA4HDBREQkOqlUitjYWCgrK8PS0pKjBRUD/D2hOCYVGaSlpQkJRdmyZcUOh3JJU1MTAPDixQuYmJjw1iUREYkqNTUVycnJMDc3h5aWltjhkJz4e0IxTJkz+NqHgv/xi6+v7x37wxARkdjS0tIAAGpqaiJHQrnF3xO5x6QiC2zyVHzxvSMioqKGf5uKH75nucekgoiIiIiI8oRJRSny008/4Y8//hA7DABfrgAcOHBA7vLHjx+Hs7MzR88gIiIqAaytrbFo0aI81xMVFQWJRIKwsDAAQHBwMCQSicwongcOHICdnR2UlZUxevTobNdR3rCjtpxaBRws1OOd8O+Yq/K+vr7YvHkzgC/jK1eoUAHe3t6YOHEiVFRUcPPmTQQGBmLlypWZ9t2xYwd69+6NwYMHY/ny5fkS/1fTpk3DgQMHhP/sX8XGxqJMmTJy1+Pu7o7Jkydj27Zt+Omnn/I1RiIiIvofX19fvH37NlcX/3LrypUr0NbWFp5LJBLs378fnp6eeaq3QYMGiI2Nhb6+vrBu0KBB6NOnD0aOHAldXd1s11He8E5FCeLu7o7Y2FhERERg7NixmDZtGubOnQsAWLp0Kbp27QodHZ1M+61fvx4///wzduzYgY8fPxZKrKamplBXV8/VPr6+vliyZEkBRURERFQ0RUREwN/fHz169IC/vz8iIiLEDinPjI2NC2RgHDU1NZn5Jd6/f48XL16gdevWMDc3h66ubpbrKO+YVJQg6urqMDU1hZWVFYYMGQI3NzccOnQIaWlp2LNnD9q3b59pn8jISFy4cAG//PILKleujH379slsnzZtGpydnWXWLVq0CNbW1sLz4OBg1K1bF9ra2jAwMEDDhg3x33//YdOmTZg+fTpu3rwJiUQCiUSCTZs2AZBt/vT11uW+ffvQrFkzaGlpwcnJCaGhoTLHbd++Pa5evYqgoKAS9+VKRESUlY0bN8Le3h5z587F7t27MXfuXNjb2wt/T8Vw9uxZ1K1bF+rq6jAzM8Mvv/yC1NRUYfu7d+/Qq1cvaGtrw8zMDAsXLkTTpk1lmhllbP709TdFp06dIJFIZH5jfOvy5cuoWbMmNDQ0ULt2bdy4cUNme8bmT8HBwULC0Lx5c0gkkmzXUd4xqSjBNDU1kZKSglu3biEhIQG1a9fOVGbjxo1o27Yt9PX10bt3b6xfvz5Xx0hNTYWnpyeaNGmCW7duITQ0FAMHDoREIoGXlxfGjh2LqlWrIjY2FrGxsfDy8sq2rkmTJmHcuHEICwtD5cqV0aNHD5kvqQoVKkBPTw8tW7YsUl+uREREuZGUlJTtkrHFQEREBPr374/09HSkpaXJ/NuvXz/cuXNHrnrz09OnT+Hh4YE6derg5s2bWLlyJdavX4/ffvtNKOPn54d//vkHhw4dwsmTJxESEoLr169nW+eVK1cAfPlNEhsbKzz/1vv379GuXTs4Ojri2rVrmDZtGsaNG5dtvQ0aNMCDBw8AAHv37kVsbGy26yjv2KeiBJJKpQgKCsLff/+NESNG4L///oOysjJMTExkyqWnp2PTpk1YunQpAKB79+4YO3YsIiMjYWNjI9exEhMTkZCQgHbt2qFixYoAAAcHB2G7jo4OVFRUYGpq+t26xo0bh7Zt2wIApk+fjqpVq+LRo0ewt7cH8OXLNTExEcD/xv7+ql+/fmjUqBEsLCzkipuIiEgsWTVF/srDwwNHjx4FAGzYsCHbAUrS09Ph4eGB6OhoYZ21tTVevnyZqaxUKs1jxP+zYsUKWFpaYtmyZZBIJLC3t8ezZ88wYcIETJkyBUlJSdi8eTO2b9+OFi1aAPiSLJibm2dbp7GxMQDAwMAgx98L27dvR3p6OtavXw8NDQ1UrVoVMTExGDJkSJbl1dTUhN8+hoaGQt1ZraO8452KEuTIkSPQ0dGBhoYG2rRpAy8vL0ybNg0fPnyAurp6pjGXT548iaSkJHh4eAAAjIyM0LJlS2zYsEHuYxoaGsLX1xetW7dG+/btsXjxYsTGxioUf40aNYTHZmZmAL7MZvlVTnFJJJJc32UhIiIqyqKionLcXlj9IDO6d+8eXF1dZX5TNGzYEO/fv0dMTAz+/fdffP78GXXr1hW26+vro0qVKvly7Bo1akBDQ0NY5+rqmud6KX/wTkUJ0qxZM6xcuRJqamowNzeHisqXt9fIyAjJyclISUmRmdVz/fr1eP36tTAdPfDlysetW7cwffp0KCkpQUlJKdMVjm9nl9y4cSNGjhyJ48ePY9euXfj1119x8uRJ1K9fP1fxq6qqCo+/flllvEKT05erVCr97pcvERFRUfD+/ftstykrKwuPra2toaysnOnu/NdyPj4+Muv4d5DExDsVJYi2tjbs7OxQoUIFIaEAIHS0Dg8PF9a9evUKBw8exM6dOxEWFiYsN27cwJs3b3DixAkAX25JxsXFySQW3w4PCwA1a9aEv78/Lly4gGrVqmH79u0Avtx6zOrLUBE5NW36XscuIiKiokJbWzvbJeNV+L59+2bbdEkqlWLQoEFy1ZufHBwcEBoaKhPXP//8A11dXVhYWMDW1haqqqoy/SISEhLw8OHDHOtVVVX97u8FBwcH3Lp1S+YOzcWLFxU8E8pvTCpKAWNjY9SqVQvnz58X1v35558oW7YsunXrhmrVqgmLk5MTPDw8hKZETZs2RXx8PObMmYPHjx9j+fLlOHbsmFBPZGQk/P39ERoaiv/++w8nTpxARESE0K/C2toakZGRCAsLw8uXL/Hp0yeFz8PJySnbbenp6ejbt6/CdRMRERU1lSpVwvr166GkpARlZWWZf9evXw87O7sCO3ZCQoLMRcewsDA8efIEQ4cOxZMnTzBixAjcv38fBw8exNSpU+Hn5wclJSXo6urCx8cH48ePx5kzZ3D37l3069cPSkpKmZphZ2RtbY2goCDExcXhzZs3WZbp2bMnJBIJBgwYgPDwcAQGBmLevHkF9RJQLjGpKCX69++Pbdu2Cc83bNggDN32rS5duuDQoUN4+fIlHBwcsGLFCixfvhxOTk64fPmyzEgLWlpauH//Prp06YLKlStj4MCBGDZsmHD1pEuXLnB3d0ezZs1gbGyMHTt2KHwOISEhaNq0qcyXqpLSl4+wVCrFrl27FK6biIioKPL19cWDBw8wfvx4dOvWDePHj8eDBw/g6+tboMcNDg5GzZo1ZZbp06ejfPnyCAwMxOXLl+Hk5ITBgwejX79++PXXX4V9FyxYAFdXV7Rr1w5ubm5o2LAhHBwcZO7CfGv+/Pk4efIkLC0tUbNmzSzL6Ojo4PDhw7h9+zZq1qyJSZMmYfbs2fl+7qQYiTQ/hwQoBmJiYmBpaYknT55kak7z8eNHYeSjnD74xdGHDx9QpUoV7Nq1q1h2anr58iWqVKmCq1evIi0tDevXr0dUVBSsra2hrq6O6dOnAwAOHDiAypUrl8j3kIiIipeS/LsiN5KSklC+fHnMnz8f/fr1EzscueT03uX0W7I0Y0ftUkJTUxNbtmzJcqi54iAqKgorVqwQhroNCAiQ2Z6cnIynT5+iadOmePbsmRghEhEREYAbN27g/v37qFu3LhISEjBjxgwAQMeOHUWOjAoSk4pSpGnTpmKHoLDatWtnOXnfV7NmzYJEIslTnw0iIiLKH/PmzcODBw+gpqYGFxcXhISEwMjISOywqAAxqaAS4WvfCuBLp+3Zs2dj4MCBwnwXREREVDhq1qyJa9euiR0GFTJ21KYSZ968eZg2bRratWuX41jgRERERJQ/mFRQidO9e3cYGRnh+vXr6N69O1JTU8UOiYiIiKhEY1JBJU6FChWwZ88eaGho4OjRoxg5cmS2kwcREREVNP4NKn7S09PFDqHYYZ8KKpHq1auHbdu24ccff8TKlSthY2OD8ePHix0WERGVIqqqqpBIJIiPj4exsXGOk79R0SCVSpGSkoL4+HgoKSlBTU1N7JCKDSYVVGJ17twZCxYswJgxY/Dzzz/DysoK3bp1EzssIiIqJZSVlWFhYYGYmBhERUWJHQ7lgpaWFipUqCAzEAzljEkFlWijR49GZGQkVq9ezStERERU6HR0dFCpUiV8/vxZ7FBITsrKylBRUeHvhlxiUlGK/PTTT3BwcMDEiRPzve5p06bhwIEDCAsLAwD4+vri7du3OHDgQJ7rTklJQeXKlbFnz54c56rIzoIFCzBgwABUq1Ytz7EQERHllrKyMpSVlcUOg6hAMamQ04slboV6PJORp3JV3tfXF5s3bwbwpQ1nhQoV4O3tjYkTJ0JFRQU3b95EYGAgVq5cKezTtGlTnD17FgCgpqYGIyMj1KpVC3369EHnzp3z72TySE1NDePGjcOECRMQFBSU6/2VlZVlEoqYmBioq6vD2Ng4P8MkIiIiKrXYUKwEcXd3R2xsLCIiIjB27FhMmzYNc+fOBQAsXboUXbt2hY6Ojsw+AwYMQGxsLB4/foy9e/fC0dER3bt3x8CBA8U4hWz16tUL58+fx927d/NUz61bt1C/fn106NABHz58yKfoiIiIiEo3JhUliLq6OkxNTWFlZYUhQ4bAzc0Nhw4dQlpaGvbs2YP27dtn2kdLSwumpqawsLBA/fr1MXv2bKxevRpr167FqVP/u1syYcIEVK5cGVpaWrC1tcXkyZNz1T70+PHjaNSoEQwMDFC2bFm0a9cOjx8/FranpKRg+PDhMDMzg4aGBqysrBAQECBsL1OmDBo2bIidO3cq+Op8oaamhuTkZFy8eBG9evVCWlpanuojIiIiIiYVJZqmpiZSUlJw69YtJCQkyN0fwcfHB2XKlMG+ffuEdbq6uti0aRPCw8OxePFirF27FgsXLpQ7lqSkJPj5+eHq1asICgqCkpISOnXqJIwDvWTJEhw6dAi7d+/GgwcPsG3bNlhbW8vUUbduXYSEhMh9zKzY29vj4MGDUFNTw/79+zFu3Lg81UdERERE7FNRIkmlUgQFBeHvv//GiBEj8N9//0FZWRkmJiZy7a+kpITKlSvLDH/366+/Co+tra0xbtw47Ny5Ez///LNcdXbp0kXm+YYNG2BsbIzw8HBUq1YN0dHRqFSpEho1agSJRAIrK6tMdZibm+O///6T63g5ady4MTZv3owePXpg0aJFsLGxwciRI/NcLxEREVFpxTsVJciRI0ego6MDDQ0NtGnTBl5eXpg2bRo+fPgAdXX1XA2NJpVKZcrv2rULDRs2hKmpKXR0dPDrr78iOjpa7voiIiLQo0cP2NraQk9PT7gL8bUOX19fhIWFoUqVKhg5ciROnDiRqQ5NTU0kJyfLfcycdO/eHbNmzQLwZdjZ/BilioiIiKi0YlJRgjRr1gxhYWGIiIjAhw8fsHnzZmhra8PIyAjJyclISUmRq560tDRERETAxsYGABAaGopevXrBw8MDR44cwY0bNzBp0iS56wOA9u3b4/Xr11i7di0uXbqES5cuAYBQR61atRAZGYmZM2fiw4cP6NatG3788UeZOl6/fp2vIzb9/PPPGDRoEKRSKWbPni00xSIiIiKi3GHzpxJEW1sbdnZ2mdY7OzsDAMLDw4XHOdm8eTPevHkjNFm6cOECrKysMGnSJKFMbpohvXr1Cg8ePMDatWvRuHFjAMD58+czldPT04OXlxe8vLzw448/wt3dHa9fv4ahoSEA4M6dO6hZs6bcx/0eiUSCZcuWwczMDGPGjOGsmUREREQK4q+oUsDY2Bi1atXK8od8cnIy4uLiEBMTg4sXL2LChAkYPHgwhgwZgmbNmgEAKlWqhOjoaOzcuROPHz/GkiVLsH//frmPX6ZMGZQtWxZr1qzBo0ePcPr0afj5+cmUWbBgAXbs2IH79+/j4cOH+Ouvv2BqagoDAwOhTEhICFq1aqXYi5ANFRUVTJ06FXp6esK61NTUfD0GERERlV4BAQGoU6cOdHV1YWJiAk9PTzx48CDHfTZt2gSJRCKzaGhoyJSRSqWYMmUKzMzMoKmpCTc3N0RERBTkqeSISUUp0b9/f2zbti3T+rVr18LMzAwVK1ZE586dER4ejl27dmHFihVCmQ4dOmDMmDEYPnw4nJ2dceHCBUyePFnuYyspKWHnzp24du0aqlWrhjFjxgjzZ3ylq6uLOXPmoHbt2qhTpw6ioqIQGBgo3D0IDQ1FQkJCpiZR+UkqlWLu3Llo0aIFPn78WGDHISIiotLj7NmzGDZsGC5evIiTJ0/i8+fPaNWqFZKSknLcT09PD7GxscLybSuROXPmYMmSJVi1ahUuXboEbW1ttG7dWrTfMBKpVCoV5cgiiYmJgaWlJZ48eQILCwuZbR8/fkRkZCRsbGwyZYPF3YcPH1ClShXs2rULrq6uYoeTa15eXnBycsLEiRNzLJeX9/DZs2dwcHBAYmIiunfvjm3btrFJFBEREcnI6bekPOLj42FiYoKzZ8/ihx9+yLLMpk2bMHr0aLx9+zbL7VKpFObm5hg7dqwwPH5CQgLKlSuHTZs2oXv37rmOK6/4i6mU0NTUxJYtW/Dy5UuxQ8m1lJQUVK9eHWPGjCnQ45ibm2Pfvn1QUVHBzp07ZfqQEBEREWX07t07JCYmCsunT5/k2i8hIQEAhD6j2Xn//j2srKxgaWmJjh074u7du8K2yMhIxMXFwc3NTVinr6+PevXqITQ0VIGzyTsmFaVI06ZNs5xVu6hTU1PDr7/+Ck1NzQI/VosWLbBu3ToAwKxZs7BmzZoCPyYREREVP46OjtDX1xeWgICA7+6Tnp6O0aNHo2HDhqhWrVq25apUqYINGzbg4MGD2Lp1K9LT09GgQQPExMQAAOLi4gAA5cqVk9mvXLlywrbCxtGfiL7h4+ODqKgoTJs2DUOHDoWFhQU8PDzEDouIiIiKkPDwcJQvX154rq6u/t19hg0bhjt37mQ5eE5Grq6uMs3VGzRoAAcHB6xevRozZ85UPOgCxDsVRFmYMmUKfH19kZaWhm7duuHp06dih0RERERFiK6uLvT09ITle0nF8OHDceTIEZw5cybXfTFUVVVRs2ZNPHr0CABgamoKAHj+/LlMuefPnwvbChuTCqIsSCQSrFmzBu7u7pgzZ47MlQgiIiIieUmlUgwfPhz79+/H6dOnhcmFcyMtLQ23b9+GmZkZAMDGxgampqYICgoSyiQmJuLSpUuiDcjD5k9E2VBVVcXRo0c5AhQREREpbNiwYdi+fTsOHjwIXV1doc+Dvr6+0F/U29sb5cuXF/plzJgxA/Xr14ednR3evn2LuXPn4r///kP//v0BfLn4OXr0aPz222+oVKkSbGxsMHnyZJibm8PT01OU82RSQZSDjAnF69evMXv2bMycORNqamoiRkVERETFxcqVKwF8GTAno40bN8LX1xcAEB0dLfOb482bNxgwYADi4uJQpkwZuLi44MKFC3B0dBTK/Pzzz0hKSsLAgQPx9u1bNGrUCMePHxdtWgTOU5FBSZ6norQoqPcwPT0d9erVw9WrV+Ht7S3MdElERESlS17nqSipikS7juXLl8Pa2hoaGhqoV68eLl++nG1ZeaYtJ8pvSkpKmDFjBpSVlbFlyxZMnz5d7JCIiIiIigzRk4pdu3bBz88PU6dOxfXr1+Hk5ITWrVvjxYsX2e7zvWnLKWs//fQT/vjjD7HD+K5Vq1YVyfk02rRpgxUrVgAApk+fjk2bNokbEBEREVERIXqfigULFmDAgAHo06cPgC8/KI8ePYoNGzbgl19+yXIfiURS6MNluZ/0L9TjHW/5/QlUMvL19cXmzZsBfOlgXKFCBXh7e2PixIlQUVHBzZs3ERgYiJUrVyIqKuq7Iw9kbOdX2Pr27YuZM2ciJCQEjRs3FiWG7AwcOBBRUVEICAjAgAEDYGFhITObJREREVFpJOqdipSUFFy7dk3mR5mSkhLc3NxynGI8p2nLv/Xp0yeZKdTfvXuXr+dQlLi7uyM2NhYREREYO3Yspk2bhrlz5wIAli5diq5du0JHRweWlpYyd3rGjh2LqlWryqzz8vIS7TzU1NTQs2dPLFmyRLQYcvLbb7+hR48eSE1NRZcuXXD79m2xQyIiIiISlahJxcuXL5GWlparKca/N235twICAmSmUM/Ya76kUVdXh6mpKaysrDBkyBC4ubnh0KFDSEtLw549e4QmRcrKyjA1NRUWHR0dqKioCM9NTEywaNEi2NjYQFNTE05OTtizZw+AL2Mtu7m5oXXr1vjax//169ewsLDAlClTAHwZS7lfv37C/lWqVMHixYtlYg0ODkbdunWhra0NAwMDNGzYUKYZW/v27XHo0CF8+PChMF66XFFSUsLGjRvxww8/wNDQECoqot/wIyIiIhKV6H0qcsvV1RXe3t5wdnZGkyZNsG/fPhgbG2P16tVZlvf390dCQoKwhIeHF3LE4tHU1ERKSgpu3bqFhIQE1K5dW679AgICsGXLFqxatQp3797FmDFj0Lt3b5w9exYSiQSbN2/GlStXhDsJgwcPRvny5YWkIj09HRYWFvjrr78QHh6OKVOmYOLEidi9ezcAIDU1FZ6enmjSpAlu3bqF0NBQDBw4UGY0pdq1ayM1NRWXLl3K51clf6irq2P//v0IDQ2Fg4OD2OEQERERiUrUS6xGRkZQVlbO0xTj305b/i11dXWZadMTExMVD7iYkEqlCAoKwt9//40RI0bgv//+g7KyMkxMTL6776dPn/DHH3/g1KlTwoyMtra2OH/+PFavXo0mTZqgfPnyWL16Nby9vREXF4fAwEDcuHFDuGKvqqoqMzqSjY0NQkNDsXv3bnTr1g2JiYlISEhAu3btULFiRQDI9MNcS0sL+vr6RboTvqGhoczza9euoUaNGlBVVRUpIiIiIiJxiHqnQk1NDS4uLjJTjKenpyMoKEjuKca/nba8NDty5Ah0dHSgoaGBNm3awMvLC9OmTcOHDx+grq4u17wKjx49QnJyMlq2bAkdHR1h2bJlCx4/fiyU69q1Kzp16oRZs2Zh3rx5qFSpkkw9y5cvh4uLC4yNjaGjo4M1a9YgOjoawJcf476+vmjdujXat2+PxYsXIzY2NlMsmpqaSE5OzuOrUji2bdsGV1dXDBs2DKVs6hciIiIi8Ud/8vPzg4+PD2rXro26deti0aJFSEpKEkaDyu205aVZs2bNsHLlSqipqcHc3Fy4c2BkZITk5GSkpKR8dybo9+/fAwCOHj2K8uXLy2zLeMcnOTkZ165dg7KyMiIiImTK7dy5E+PGjcP8+fPh6uoKXV1dzJ07V6Yp08aNGzFy5EgcP34cu3btwq+//oqTJ0+ifv36QpnXr1/D2NhYsRejkOnq6iItLQ1r166FjY0N/P0Ld7QwIiIiIjGJnlR4eXkhPj4eU6ZMQVxcHJydnXH8+HGh87Yi05aXVtra2rCzs8u03tnZGQAQHh4uPM6Oo6Mj1NXVER0djSZNmmRbbuzYsVBSUsKxY8fg4eGBtm3bonnz5gCAf/75Bw0aNMDQoUOF8hnvcnxVs2ZN1KxZE/7+/nB1dcX27duFpOLx48f4+PEjatas+b3TLhI6dOiAxYsXY8SIEZg4cSKsrKzQs2dPscMiIiIiKhSiJxUAMHz4cAwfPjzLbcHBwTLPFy5ciIULFxZCVCWHsbExatWqhfPnz383qdDV1cW4ceMwZswYpKeno1GjRkhISMA///wDPT09+Pj4CPOIhIaGolatWhg/fjx8fHxw69YtlClTBpUqVcKWLVvw999/w8bGBn/++SeuXLkizI0RGRmJNWvWoEOHDjA3N8eDBw8QEREBb29vIY6QkBDY2toKfS6Kg+HDhyMyMhILFixAnz59YGFhgR9++EHssIiIiIgKXLEb/YkU079/f2zbtk2usjNnzsTkyZMREBAABwcHuLu74+jRo7CxsUF8fDz69euHadOmoVatWgC+zC5drlw5DB48GAAwaNAgdO7cGV5eXqhXrx5evXolc9dCS0sL9+/fR5cuXVC5cmUMHDgQw4YNw6BBg4QyO3bswIABA/LxFSgcc+fORZcuXZCSkgJPT0/cv39f7JCIiIiICpxEWsp6lcbExMDS0hJPnjyBhYWFzLaPHz8iMjISNjY20NDQECnCgvHhwwdUqVIFu3btkrsTvFju3r2L5s2b4+HDh9DX18/VvkXhPfzw4QNatGiB0NBQ/Pzzz5g9e7YocRAREVH+y+m3ZGlWJJo/UcHT1NTEli1b8PLlS7FD+a7Y2Fhs2bIl1wlFUaGpqSlMzjh69GixwyEiIiIqcEwqSpGmTZuKHYJc3NzcxA4hz4yNjTFmzBjheVpaGoAvs5kTERERlTTsU0FUwJKTk9G1a1eMHj2ac1gQERFRicQ7FUQF7Pz589i/fz+AL7OL+/n5iRwRERERUf7inQqiAtaqVSvMnTsXADBu3Djs3btX5IiIiIiI8heTiiykp6eLHQIpqKi+d2PHjsWwYcMglUrRu3dvhIaGih0SERERUb5h86cM1NTUoKSkhGfPnsHY2BhqamqQSCRih0VykEqlSElJQXx8PJSUlKCmpiZ2SDIkEgkWL16M6OhoHD58GB06dEBoaGiWM6ATERERFTdMKjJQUlKCjY0NYmNj8ezZM7HDIQVoaWmhQoUKUFIqejfhlJWVsWPHDjRt2hRXr15Fly5dcOPGjSIZKxEREVFuMKn4hpqaGipUqIDU1FRhGFAqHpSVlaGiolKk7y5pa2sLdyqWLFnChIKIiIhKBCYVWZBIJFBVVYWqqqrYoVAJZGpqikuXLhXp5IeIiIgoN3iZlEgEGROKGzdu4PfffxcxGiIiIqK84Z0KIhE9f/4cTZo0wbt372BgYIBhw4aJHRIRERFRrvFOBZGIypUrhwkTJgAARo4cicOHD4scEREREVHuMakgEtnEiRPRv39/pKeno3v37rh69arYIRERERHlCpMKIpFJJBKsWLECrVu3RnJyMtq1a4eoqCixwyIiIiKSG5MKoiJAVVUVu3fvRo0aNfD8+XN4eHjgzZs3YodFREREJBcmFURFhJ6eHgIDA2FhYQFra2uoqHAcBSIiIioe+KuFqAgpX748zp8/j/LlyzOpICIiomKDdyqIihgrKyshoZBKpTh37pzIERERERHljEkFUREllUoxZMgQNGnSBOvWrRM7HCIiIqJsMakgKqIkEglMTEwAAIMHD8bff/8tckREREREWWNSQVSETZ8+HT/99BPS0tLw448/4ubNm2KHRERERJQJkwqiIkwikWDdunVo1qwZ3r9/j7Zt2yImJkbssIiIiIhkMKkgKuLU1NSwb98+ODo64unTp2jbti0SExPFDouIiIhIwKSCqBgwMDBAYGAgTE1NcefOHY4IRUREREUKB8InKiasrKxw5MgRYcZtIiIioqKCSQVRMeLi4iLz/NOnT1BXVxcpGiIiIqIv2PyJqJh6/PgxatSogT///FPsUIiIiKiUY1JBVExt3boVDx8+RL9+/XD69GmxwyEiIqJSjEkFUTE1efJkdOvWDZ8/f0bnzp1x9+5dsUMiIiKiUopJBVExpaSkhM2bN6NRo0ZISEiAh4cHYmNjxQ6LiIiISiEmFUTFmIaGBg4cOIDKlSsjOjoa7dq1w/v378UOi4iIiP5fQEAA6tSpA11dXZiYmMDT0xMPHjzIcZ+1a9eicePGKFOmDMqUKQM3NzdcvnxZpoyvry8kEonM4u7uXpCnkiMmFUTFXNmyZREYGAhjY2Ncv34d48aNEzskIiIi+n9nz57FsGHDcPHiRZw8eRKfP39Gq1atkJSUlO0+wcHB6NGjB86cOYPQ0FBYWlqiVatWePr0qUw5d3d3xMbGCsuOHTsK+nSyJZFKpVLRji6CmJgYWFpa4smTJ7CwsBA7HKJ8c/HiRfj7+2PXrl0wMTEROxwiIqISKa+/JePj42FiYoKzZ8/ihx9+kGuftLQ0lClTBsuWLYO3tzeAL3cq3r59iwMHDuQ6hoLAOxVEJUT9+vVx+vRpJhRERESF4N27d0hMTBSWT58+ybVfQkICAMDQ0FDuYyUnJ+Pz58+Z9gkODoaJiQmqVKmCIUOG4NWrV/KfQD5jUkFUgkgkEuHxxo0bsXv3bhGjISIiKrkcHR2hr68vLAEBAd/dJz09HaNHj0bDhg1RrVo1uY81YcIEmJubw83NTVjn7u6OLVu2ICgoCLNnz8bZs2fRpk0bpKWlKXQ+ecUZtYlKoGPHjqFv375QU1ODubk5GjVqJHZIREREJUp4eDjKly8vPFdXV//uPsOGDcOdO3dw/vx5uY8za9Ys7Ny5E8HBwdDQ0BDWd+/eXXhcvXp11KhRAxUrVkRwcDBatGghd/35hXcqiEqgVq1awdPTEykpKejYseN3R5kgIiKi3NHV1YWenp6wfC+pGD58OI4cOYIzZ87I3Rdj3rx5mDVrFk6cOIEaNWrkWNbW1hZGRkZ49OiR3OeQn5hUEJVAysrK2LZtG+rWrYvXr1/Dw8MDL168EDssIiKiUkcqlWL48OHYv38/Tp8+DRsbG7n2mzNnDmbOnInjx4+jdu3a3y0fExODV69ewczMLK8hK4RJBVEJpaWlhcOHD8PW1hb//vsvOnTogOTkZLHDIiIiKlWGDRuGrVu3Yvv27dDV1UVcXBzi4uLw4cMHoYy3tzf8/f2F57Nnz8bkyZOxYcMGWFtbC/t8nYvq/fv3GD9+PC5evIioqCgEBQWhY8eOsLOzQ+vWrQv9HAEmFUQlmomJCQIDA2FoaIhLly6hd+/eonXgIiIiKo1WrlyJhIQENG3aFGZmZsKya9cuoUx0dDRiY2Nl9klJScGPP/4os8+8efMAfGmRcOvWLXTo0AGVK1dGv3794OLigpCQELn6dhQEdtQuwiIiIuDj44OXL19CX18fmzZtQtWqVbMsK5VK0aJFC1y/fh1v374F8CWL7dKlC65du4bU1FRh/bd8fX2xefNmvHnzBgYGBgVzMiSaKlWq4ODBg3Bzc4OzszOUlHgtgYiIqLDIMyVccHCwzPOoqKgcy2tqauLvv//OQ1T5j78uirBBgwZh4MCBePjwISZMmABfX99syy5cuBAVK1aUWaeqqooJEybg1KlT2e63b98+qKqq5lfIVEQ1atQIDx8+xJQpU2SGnSUiIiLKD0wqiqgXL17g6tWr6N27NwCgS5cuePLkSZY9+u/evYsDBw7gl19+kVmvrq6O5s2bZ3v34fnz5/jjjz+wYMGCfI+fip4KFSoIj5OSkvDPP/+IGA0RERGVJEwqiqgnT57AzMwMKipfWqhJJBJUqFAB0dHRMuU+f/6MAQMGYPXq1VBWVs7VMQYMGIA5c+ZAV1c33+Kmou/169do2rQp3NzccOnSJbHDISIiohKASUUxN336dHTu3BkODg652m/dunWoUKECmjdvXkCRUVGlp6cHExMTfPz4Ee3bt8fjx4/FDomIiIiKOSYVRZSlpSViY2ORmpoK4Esnn+joaJkmLABw9uxZLF26FNbW1mjUqBESExNhbW2N+Pj4HOs/c+YMDh48CGtra1hbWwMAatSogRs3bhTI+VDRoaKigl27dqFmzZqIj4+Hh4cHXr16JXZYREREVIxx9KciysTEBLVq1cLWrVvh6+uLvXv3wsLCAnZ2djLlQkJChMdRUVFwdnb+7ogBALBt2zaZ5xKJBLdu3eLoT6WEjo4Ojhw5AldXVzx8+BCenp44efIkNDQ0xA6NiIiIiiHeqSjCVq9ejdWrV6Ny5cqYNWsWNm7cCADo378/Dh06JFcdNWrUgKurKxITE2FhYYGffvqpIEOmYsTc3ByBgYHQ19fH+fPn4evri/T0dLHDIiIiomJIIpVn8NwSJCYmBpaWlnjy5AksLCzEDodIdKdPn4a7uzsMDAxw+fJloTkcERERZcbfkllj8yeiUq558+bYtWsXnJycmFAQERGRQphUEBE6deok8zw5ORlaWloiRUNERETFDftUEJGMw4cPw8bGBtevXxc7FCIiIiommFQQkUAqlWL58uV48eIF2rZtm2myRSIiIqKssPlTIWoVcFDsEPLVCf+OYodA+UwikWDXrl1o3Lgxbt++DQ8PD5w/f55DDRMREVGOeKeCiGTo6+vj6NGjMDc3x927d9G5c2ekpKSIHRYREREVYUwqiCgTS0tLHD16FDo6Ojhz5gz69++PUjb6NBEREeUCkwoiypKzszP27NkDZWVl/Pnnn5lmYSciIiL6in0qiChbrVu3xqpVq3D9+nV0795d7HCIiIioiGJSQUQ56t+/v9ghEBERURFXJJo/LV++HNbW1tDQ0EC9evVw+fJlufbbuXMnJBIJPD09CzZAIgIAfP78GaNGjcKtW7fEDoWIiIiKENGTil27dsHPzw9Tp07F9evX4eTkhNatW+PFixc57hcVFYVx48ahcePGhRQpEU2bNg1LliyBh4cHnj59KnY4REREVESInlQsWLAAAwYMQJ8+feDo6IhVq1ZBS0sLGzZsyHaftLQ09OrVC9OnT4etrW0hRktUuo0bNw729vZ4+vQp2rZti8TERLFDIiIioiJA1KQiJSUF165dg5ubm7BOSUkJbm5uCA0NzXa/GTNmwMTEBP369SuMMIno/5UpUwaBgYEwMTHBzZs30a1bN3z+/FnssIiIiEhkoiYVL1++RFpaGsqVKyezvly5coiLi8tyn/Pnz2P9+vVYu3atXMf49OkTEhMTheXdu3d5jpuoNLOxscHRo0ehpaWFv//+G0OHDuUcFkRERKWc6M2fcuPdu3f46aefsHbtWhgZGcm1T0BAAPT19YXF0dGxgKMkKvlq166NnTt3QklJCevWrcOsWbPEDomIiIhEJGpSYWRkBGVlZTx//lxm/fPnz2Fqapqp/OPHjxEVFYX27dtDRUUFKioq2LJlCw4dOgQVFRU8fvw40z7+/v5ISEgQlvDw8AI7H6LSpH379liyZAl0dHRQq1YtscMhIiIiEYmaVKipqcHFxQVBQUHCuvT0dAQFBcHV1TVTeXt7e9y+fRthYWHC0qFDBzRr1gxhYWGwtLTMtI+6ujr09PSERVdXt0DPiag0GTZsGB48eIDWrVuLHQoRERGJSPTJ7/z8/ODj44PatWujbt26WLRoEZKSktCnTx8AgLe3N8qXL4+AgABoaGigWrVqMvsbGBgAQKb1RFQ4zM3NhccRERFITU2Fg4ODiBERERFRYRM9qfDy8kJ8fDymTJmCuLg4ODs74/jx40Ln7ejoaCgpFauuH0Sl0vXr19GqVSvo6Ojg4sWLWTZhJCIiopJJ9KQCAIYPH47hw4dnuS04ODjHfTdt2pT/ARFRrlWoUAFlypTBo0eP0K5dO5w9exba2tpih0VERESFgLcAiChfGBkZ4dixYyhbtiyuXbuGHj16IC0tTeywiIiIqBAwqSCifGNnZ4dDhw5BXV0dhw8fxujRozmHBRERUSnApIKI8lWDBg2wdetWSCQSLFu2DAsXLhQ7JCIiIipgTCqIKN/9+OOPmDt3LgBg3759SE1NFTkiIiIiKkhFoqM2EZU8fn5+MDQ0RPfu3aGiwq8aIiKikox3KoioQEgkEvTp0weamprCunfv3okYERERERUUJhVEVOCkUikmTZqEWrVq4eXLl2KHQ0RERPmMSQURFbi3b99i+/btePToETp06IAPHz6IHRIRERHlIyYVRFTgypQpg8DAQBgYGCA0NBQ//fQT0tPTxQ6LiIiI8gmTCiIqFA4ODjhw4ADU1NSwd+9e/Pzzz2KHRERERPmESQURFZomTZpg48aNAID58+dj+fLlIkdERERE+YFJBREVqp49e+L3338HAIwaNQqPHj0SOSIiIiLKKw4eT0SFzt/fH8+ePYOrqyvs7OzEDoeIiIjyiEkFERU6iUSCZcuWiR0GERER5RM2fyIi0cXGxqJnz554/fq12KEQERGRAphUEJGopFIpunXrhh07dqBz58749OmT2CERERHlm4CAANSpUwe6urowMTGBp6cnHjx48N39/vrrL9jb20NDQwPVq1dHYGCgzHapVIopU6bAzMwMmpqacHNzQ0REREGdxncxqSAiUUkkEixfvhy6uro4e/Ys+vbtyzksiIioxDh79iyGDRuGixcv4uTJk/j8+TNatWqFpKSkbPe5cOECevTogX79+uHGjRvw9PSEp6cn7ty5I5SZM2cOlixZglWrVuHSpUvQ1tZG69at8fHjx8I4rUwkUqlUKsqRRRITEwNLS0s8efIEFhYWhXrsVgEHC/V4Be2Ef0exQ6AS5OTJk/Dw8EBqaiomTpwojBBFRERUlOT1t2R8fDxMTExw9uxZ/PDDD1mW8fLyQlJSEo4cOSKsq1+/PpydnbFq1SpIpVKYm5tj7NixGDduHAAgISEB5cqVw6ZNm9C9e3fFTi4PeKeCiIqEli1bYu3atQCAP/74Q3hMRERUFL179w6JiYnCIm/z3YSEBACAoaFhtmVCQ0Ph5uYms65169YIDQ0FAERGRiIuLk6mjL6+PurVqyeUKWxMKoioyPD19cWUKVMAAEOGDMGJEydEjoiIiChrjo6O0NfXF5aAgIDv7pOeno7Ro0ejYcOGqFatWrbl4uLiUK5cOZl15cqVQ1xcnLD967rsyhQ2DilLREXKtGnTEBUVhatXr6JKlSpih0NERJSl8PBwlC9fXniurq7+3X2GDRuGO3fu4Pz58wUZmiiYVBBRkSKRSLB27VokJyfDwMBA7HCIiIiypKurCz09PbnLDx8+HEeOHMG5c+e+2xfD1NQUz58/l1n3/PlzmJqaCtu/rjMzM5Mp4+zsLHdMAPDx40doaGjkap+ssPkTERU5ampqMgnFiRMnhDaoRERExYlUKsXw4cOxf/9+nD59GjY2Nt/dx9XVFUFBQTLrTp48CVdXVwCAjY0NTE1NZcokJibi0qVLQpmcpKenY+bMmShfvjx0dHTw77//AgAmT56M9evX5+b0BEwqiKhIW79+Pdzd3fHjjz/i8+fPYodDRESUK8OGDcPWrVuxfft26OrqIi4uDnFxcfjw4YNQxtvbG/7+/sLzUaNG4fjx45g/fz7u37+PadOm4erVqxg+fDiAL3f1R48ejd9++w2HDh3C7du34e3tDXNzc3h6en43pt9++w2bNm3CnDlzoKamJqyvVq0a1q1bp9B5MqkgoiKtZs2a0NLSwqlTpzBw4ECUslGwiYiomFu5ciUSEhLQtGlTmJmZCcuuXbuEMtHR0YiNjRWeN2jQANu3b8eaNWvg5OSEPXv24MCBAzKdu3/++WeMGDECAwcORJ06dfD+/XscP35crqZMW7ZswZo1a9CrVy8oKysL652cnHD//n2FzpN9KoioSKtVqxZ2796N9u3bY9OmTbCxsRFGiCIiIirq5LkYFhwcnGld165d0bVr12z3kUgkmDFjBmbMmJHrmJ4+fQo7O7tM69PT0xVuFcA7FURU5Hl4eGD58uUAgKlTp2LLli0iR0RERFR8OTo6IiQkJNP6PXv2oGbNmgrVyTsVRFQsDB48GJGRkZgzZw769esHCwsLNG/eXOywiIiIip0pU6bAx8cHT58+RXp6Ovbt24cHDx5gy5YtMrN45wbvVBBRsREQEAAvLy+kpqbi5MmTYodDRERULHXs2BGHDx/GqVOnoK2tjSlTpuDevXs4fPgwWrZsqVCdvFNBRMWGkpISNm3ahI4dO6J79+5ih0NERFRsNW7cOF8v0PFOBREVKxoaGujRowckEgkAICUlBUlJSSJHRUREVHxcuXIFly5dyrT+0qVLuHr1qkJ1MqkgomIrISEBHh4e6NatG1JTU8UOh4iIqFgYNmwYnjx5kmn906dPMWzYMIXqZFJBRMXWo0ePcOHCBQQGBmL48OGcw4KIiEgO4eHhqFWrVqb1NWvWRHh4uEJ1MqkgomLLxcUF27dvh0QiwerVqzFnzhyxQyIiIiry1NXV8fz580zrY2NjoaKiWJdrJhVEVKx5enpi4cKFAIBffvkFO3fuFDkiIiKioq1Vq1bw9/dHQkKCsO7t27eYOHGiwqM/MakgomJv1KhRGD16NADAx8cnywl9iIiI6It58+bhyZMnsLKyQrNmzdCsWTPY2NggLi4O8+fPV6hOJhVEVCLMmzcPnTp1QkpKCry9vfH582exQyIiIiqSypcvj1u3bmHOnDlwdHSEi4sLFi9ejNu3b8PS0lKhOjlPBRGVCMrKyti6dSt8fHwwefJkqKqqih0SERFRkaWtrY2BAwfmW31MKoioxNDS0sJff/0ldhhERERFXkREBM6cOYMXL14gPT1dZtuUKVNyXR+TCiIqsUJCQrBx40asXbsWysrKYodDRERUJKxduxZDhgyBkZERTE1NhQllAUAikRReUnHmzBk0a9ZMkV2JiArF27dv0a5dOyQmJkJPTw+LFi0SOyQiIqIi4bfffsPvv/+OCRMm5FudCnXUdnd3R8WKFfHbb79lORsfEZHYDAwMsGbNGgDA4sWLsXjxYpEjIiIiKhrevHmDrl275mudCiUVT58+xfDhw7Fnzx7Y2tqidevW2L17N1JSUvI1OCKivPDy8sLs2bMBAGPGjMH+/ftFjoiIiEh8Xbt2xYkTJ/K1ToWaPxkZGWHMmDEYM2YMrl+/jo0bN2Lo0KEYOnQoevbsiX79+sHJySlfAyUiUsT48eMRGRmJVatWoWfPnjhz5gzq168vdlhERESisbOzw+TJk3Hx4kVUr14904iJI0eOzHWdEqlUKs1rYM+ePcOaNWswa9YsqKio4OPHj3B1dcWqVatQtWrVvFafr2JiYmBpaYknT57AwsKiUI/dKuBgoR6voJ3w7yh2CERySU1NhaenJ44ePQojIyNcuXIF1tbWYodFRETFkJi/JfOLjY1NttskEgn+/fffXNep8OhPnz9/xsGDB7FhwwacPHkStWvXxrJly9CjRw/Ex8fj119/RdeuXREeHq7oIYiI8oWKigp27tyJJk2awM7ODqampmKHREREJJrIyMh8r1OhpGLEiBHYsWMHpFIpfvrpJ8yZMwfVqlUTtmtra2PevHkwNzfPt0CJiPJCR0cHQUFB0NPTg5KSQt3JiIiISpSUlBRERkaiYsWKUFHJ20wTCv1lDQ8Px9KlS/Hs2TMsWrRIJqH4ysjICGfOnMlTcERE+cnAwEBIKNLT07F9+/ZME/4QERGVdMnJyejXrx+0tLRQtWpVREdHA/hy42DWrFkK1alQUjF16lR07doV6urqMutTU1Nx7tw5AF+aGzRp0kShoIiIClqfPn3Qq1cv+Pv7ix0KERFRofL398fNmzcRHBwMDQ0NYb2bmxt27dqlUJ0KJRXNmjXD69evM61PSEjgpHhEVCy0aNECADBnzhysWrVK5GiIiIgKz4EDB7Bs2TI0atRIZjbtqlWr4vHjxwrVqVBSIZVKZQL46tWrV9DW1lYoECKiwuTt7Y0ZM2YAAIYNG4ajR4+KHBEREVHhiI+Ph4mJSab1SUlJWf7Gl0euemR07twZwJehpnx9fWWaP6WlpeHWrVto0KCBQoEQERW2X3/9FVFRUdiwYQO8vLxw9uxZuLi4iB0WERFRgapduzaOHj2KESNGAICQSKxbtw6urq4K1ZmrpEJfXx/AlzsVurq60NTUFLapqamhfv36GDBggEKBEBEVNolEglWrVuHJkyc4efIk2rVrh4sXL8LKykrs0IiIiArMH3/8gTZt2iA8PBypqalYvHgxwsPDceHCBZw9e1ahOnOVVGzcuBEAYG1tjXHjxrGpExEVe6qqqtizZw8aN26M+/fv49atW0wqiIioRGvUqBFu3ryJgIAAVK9eHSdOnECtWrUQGhqK6tWrK1SnQgPSTp06VaGDEREVRXp6ejh69CgiIyPRuHFjscMhIiIqMJ8/f8agQYMwefJkrF27Nt/qlTupqFWrFoKCglCmTBnUrFkzx04c169fz5fgiIgKi4WFBSwsLITnr169gqGhocId1oiIiIoiVVVV7N27F5MnT87XeuVOKjp27Ch0zPb09MzXIIiIipLbt2/Dw8MDffr0EUaIIiIiKik8PT1x4MABjBkzJt/qlDupyNjkic2fiKgku3LlCmJiYjBz5kxYW1ujb9++YodERESUbypVqoQZM2bgn3/+gYuLS6Z+0iNHjsx1nQr1qSAiKsn69u2Lx48f448//sCgQYNgaWmJli1bih0WERFRvli/fj0MDAxw7do1XLt2TWabRCIp2KSiTJkycrctzmq2bSKi4uS3335DVFQUtm/fji5duuD8+fOoUaOG2GERERHlWWRkZL7XKXdSsWjRonw/OBFRUSWRSLBhwwY8e/YMwcHB8PDwwKVLl1C+fHmxQyMiIsoXKSkpiIyMRMWKFaGikrcGTHLv7ePjk6cDEREVN+rq6ti3bx8aNmyIe/fuYdy4cdixY4fYYREREeVJcnIyRowYgc2bNwMAHj58CFtbW4wYMQLly5fHL7/8kus6leQtmJiYKPM4pyW3li9fDmtra2hoaKBevXq4fPlytmX37duH2rVrw8DAANra2nB2dsaff/6Z62MSEcmjTJkyCAwMRI8ePbBq1SqxwyEiIsozf39/3Lx5E8HBwdDQ0BDWu7m5YdeuXQrVmas+FbGxsTAxMYGBgUGW/SukUikkEgnS0tLkDmDXrl3w8/PDqlWrUK9ePSxatAitW7fGgwcPYGJikqm8oaEhJk2aBHt7e6ipqeHIkSPo06cPTExM0Lp1a7mPS0QkL2tra2zfvl3sMIiIiPLFgQMHsGvXLtSvX1/mN33VqlXx+PFjheqUO6k4ffo0DA0NAQBnzpxR6GBZWbBgAQYMGIA+ffoAAFatWoWjR49iw4YNWd56adq0qczzUaNGYfPmzTh//jyTCiIqFIsWLUJycjImTpwodihERES5Fh8fn+XF+6SkJIUnfZU7qWjSpEmWj/MiJSUF165dg7+/v7BOSUkJbm5uCA0N/e7+UqkUp0+fxoMHDzB79ux8iYmIKCchISHCZEFWVlbo1auXyBERERHlTu3atXH06FGMGDECAIREYt26dXB1dVWoToW7eb958wbr16/HvXv3AACOjo7o06ePcDdDHi9fvkRaWhrKlSsns75cuXK4f/9+tvslJCSgfPny+PTpE5SVlbFixYpsx5D/9OkTPn36JDx/9+6d3PEREX2rcePGGDduHObNm4c+ffqgfPnyme6gEhERFWV//PEH2rRpg/DwcKSmpmLx4sUIDw/HhQsXcPbsWYXqlLujdkbnzp2DtbU1lixZgjdv3uDNmzdYsmQJbGxscO7cOYUCyQ1dXV2EhYXhypUr+P333+Hn54fg4OAsywYEBEBfX19YHB0dCzw+IirZZs+eja5du+Lz58/o1KmTcHGFiIioOGjUqBHCwsKQmpqK6tWr48SJEzAxMUFoaChcXFwUqlMilUqlud2pevXqcHV1xcqVK6GsrAwASEtLw9ChQ3HhwgXcvn1brnpSUlKgpaWFPXv2wNPTU1jv4+ODt2/f4uDBg3LV079/fzx58gR///13pm3f3ql4+vQpHB0d8eTJE1hYWMhVf35pFSDf+RQXJ/w7ih0CkWg+fvyIFi1a4MKFC7CyssLFixdhamoqdlhERFTAYmJiYGlpKcpvybzw8/PDzJkzoa2tjXPnzqFBgwZ5npsiI4XuVDx69Ahjx44VEgoAUFZWhp+fHx49eiR3PWpqanBxcUFQUJCwLj09HUFBQblqz5Weni6TOGSkrq4OPT09YdHV1ZW7XiKi7GhoaODgwYOoVKkS/vvvP7Rr1w4pKSlih0VERJSlpUuX4v379wCAZs2a4fXr1/lav0JJRa1atbK83X/v3j04OTnlqi4/Pz+sXbsWmzdvxr179zBkyBAkJSUJo0F5e3vLdOQOCAjAyZMn8e+//+LevXuYP38+/vzzT/Tu3VuRUyEiUpiRkRECAwNhYmICb29vqKmpiR0SEREVMefOnUP79u1hbm4OiUSCAwcO5Fje19cXEokk01K1alWhzLRp0zJtt7e3z7Her10Xzp49C6lUitDQUJw7dy7LRRFy3/O4deuW8HjkyJEYNWoUHj16hPr16wMALl68iOXLl2PWrFm5CsDLywvx8fGYMmUK4uLi4OzsjOPHjwudt6Ojo6Gk9L/cJykpCUOHDkVMTAw0NTVhb2+PrVu3wsvLK1fHJSLKD3Z2doiIiICenp7YoRARURGUlJQEJycn9O3bF507d/5u+cWLF8v8nk5NTYWTkxO6du0qU65q1ao4deqU8Px7TZnmzp2LwYMHIyAgABKJBJ06dcqyXG7nnBP2k7dPhZKSEiQSCb5XXNFACouY7eDYp4Ko5Hv79i1OnDiBbt26iR0KEREVgLz8lpRIJNi/f79MX+LvOXDgADp37ozIyEhYWVkB+HKn4sCBAwgLC8vV8QHg/fv30NPTy3aiaQDQ19fPdb1y36mIjIzMdeVERKVJYmIiGjZsiPDwcEgkkkxXlYiIqOR49+4dEhMThefq6upQV1fP9+OsX78ebm5uQkLxVUREBMzNzaGhoQFXV1cEBASgQoUK2dbztaO2jo4Ozpw5AxsbG3E6altZWcm9EBGVRrq6umjevDkA4KeffsI///wjckRERFRQHB0dZaYtCAgIyPdjPHv2DMeOHUP//v1l1terVw+bNm3C8ePHsXLlSkRGRqJx48Y5zseWsaN28+bN872jdp7Sk/DwcERHR2ca8aRDhw55CoqIqDiSSCRYtGgRoqOjcejQIXTs2BGhoaGoVKmS2KEREVE+Cw8PR/ny5YXnBXGXYvPmzTAwMMjUXKpNmzbC4xo1aqBevXqwsrLC7t270a9fvyzr+tpRu1WrVkJH7TJlymRZ9ocffsh1rAolFf/++y86deqE27dvy/Sz+DrFd1HuU0FEVJCUlZWxfft2NGvWDFeuXEGbNm0QGhoKY2NjsUMjIqJ8pKurW6CDdEilUmzYsAE//fTTd0cXNDAwQOXKlXOc2qGgO2orNKTsqFGjYGNjgxcvXkBLSwt3797FuXPnULt27WxntiYiKi20tbVx+PBh2NjY4PHjx+jQoQM+fPggdlhERFSMnD17Fo8ePcr2zkNG79+/x+PHj2FmZpZtGU9PT8TFxSExMRFSqRQPHjzAmzdvMi2KNotS6E5FaGgoTp8+DSMjIygpKUFJSQmNGjVCQEAARo4ciRs3bigUDBFRSVGuXDkEBgaiQYMGePbsGWJjY2Frayt2WEREVMjev38vcwchMjISYWFhMDQ0RIUKFeDv74+nT59iy5YtMvutX78e9erVQ7Vq1TLVOW7cOLRv3x5WVlZ49uwZpk6dCmVlZfTo0eO78RRUR22FakpLSxNmpjYyMsKzZ89QpUoVWFlZ4cGDB/kWHBFRcWZvb49jx46hQoUKOV49IiKikuvq1ato1qyZ8NzPzw8A4OPjg02bNiE2NhbR0dEy+yQkJGDv3r1YvHhxlnXGxMSgR48eePXqFYyNjdGoUSNcvHgxx6a2iYmJQnOtmjVrIjk5OduyijTrUiipqFatGm7evAkbGxvUq1cPc+bMgZqaGtasWcMrcUREGdSrV0/meVxcHExNTUWKhoiIClvTpk1znOdt06ZNmdbp6+vn+KN/586duY6jTJkyiI2NhYmJCQwMDIS+0BlJpVKF+1QolFT8+uuvSEpKAgDMmDED7dq1Q+PGjVG2bFns2rVLkSqJiEq87du3o1+/fti5cyc6duTkkUREVHhOnz4NQ0NDAMCZM2fyvX6FkorWrVsLj+3s7HD//n28fv0aZcqUyTLrISKiL53uPn78iB49eiA4OBh169YVOyQiIiolmjRpkuXj/JLn3hlPnjwBAFhaWuY5GCKikmz58uWIjo7G8ePH0b59e1y8eBE2NjZih0VERKXArVu35C5bo0aNXNevUFKRmpqK6dOnY8mSJcLMfDo6OhgxYgSmTp0KVVVVRaolIirRVFRUsHv3bvzwww8ICwuDh4cHLly4kO3kQ0RERPnF2dlZmF/uey2LCm2eihEjRmDNmjWYM2cObty4gRs3bmDOnDlYv349Ro4cqUiVRESlgq6uLo4ePQoLCwvcv38fnTp1wqdPn8QOi4iISrjIyEj8+++/iIyMxN69e2FjY4MVK1YIv+VXrFiBihUrYu/evQrVr9Cdiu3bt2Pnzp2Zpgi3tLREjx49sHLlSoWCISIqDczNzREYGIhGjRrh7Nmz2LBhA4YMGSJ2WEREVIJZWVkJj7t27YolS5bAw8NDWPf1t/zkyZPh6emZ6/oVSirU1dVhbW2dab2Njc13pxEnIiKgevXq2Lt3L86ePYtBgwaJHQ4REZUit2/fzrJPn42NDcLDwxWqU6HmT8OHD8fMmTNlbtl/+vQJv//+O4YPH65QIEREpY2bmxtmzpwJJSWFvoqJiIgU4uDggICAAKSkpAjrUlJSEBAQAAcHB4XqlPtORefOnWWenzp1ChYWFnBycgIA3Lx5EykpKWjRooVCgRARlWYfPnxA37594e3tLdO0lIiIKL+tWrUK7du3h4WFhTDS061btyCRSHD48GGF6pQ7qdDX15d53qVLF5nnHFKWiEhxixYtws6dO3HkyBGcO3cONWvWFDskIiIqoerWrYt///0X27Ztw/379wEAXl5e6NmzJ7S1tRWqU+6kYuPGjQodgIiIvm/s2LE4deoUTp8+jbZt2+LixYuoUKGC2GEREVEJpa2tjYEDB+ZbfXlqyBsfH4/z58/j/PnziI+Pz6+YiIhKHTU1NezduxdVq1ZFbGws2rZti4SEBLHDIiIikotCSUVSUhL69u0LMzMz/PDDD/jhhx9gbm6Ofv36ITk5Ob9jJCIqFQwMDBAYGAgzMzPcuXMHXbp0kelER0REVFQplFT4+fnh7NmzOHz4MN6+fYu3b9/i4MGDOHv2LMaOHZvfMRIRlRoVKlTA0aNHoa2tjaCgIIwYMULskIiIiL5LoaRi7969WL9+Pdq0aQM9PT3o6enBw8MDa9euxZ49e/I7RiKiUqVmzZrYvXs3ypUrB29vb7HDISIi+i6Fkork5GSUK1cu03oTExM2fyIiygceHh54/PgxGjZsKHYoRERUwtja2uLVq1eZ1r99+xa2trYK1alQUuHq6oqpU6fi48ePwroPHz5g+vTpcHV1VSgQIiKSlXFYv5s3byI4OFi8YIiIqMSIiopCWlpapvWfPn3C06dPFapT7iFlM1q0aBHc3d0zTX6noaGBv//+W6FAiIgoa9evX0fTpk0hkUjwzz//oFq1amKHRERExdChQ4eEx3///bfMPHRpaWkICgqCtbW1QnUrlFRUr14dERERMhNm9OjRA7169YKmpqZCgRARUdYcHR3h7OyMkJAQeHh44OLFizA3Nxc7LCIiKmY8PT0BABKJBD4+PjLbVFVVYW1tjfnz5ytUd66Tis+fP8Pe3h5HjhzBgAEDFDooERHJT0NDAwcOHECDBg3w4MEDtGvXDufOnYOOjo7YoRERUTGSnp4OALCxscGVK1dgZGSUb3Xnuk+FqqqqTF8KIiIqeIaGhggMDISxsTFu3LiBbt26ITU1VeywiIioGIqMjMzXhAJQsPnTsGHDMHv2bKxbtw4qKgpVQUREuWRra4sjR46gadOmOHbsGIYNG4ZVq1ZBIpGIHRoRERVxS5YswcCBA6GhoYElS5bkWHbkyJG5rl+hjODKlSsICgrCiRMnUL16dZkRSgBg3759ilRLRETfUbduXezYsQOdOnVCZGQkPn36BA0NDbHDIiKiIm7hwoXo1asXNDQ0sHDhwmzLSSSSwksqDAwM0KVLF0V2JSKiPOrYsSNOnDiBJk2aQFVVVexwiIioGIiMjMzycX7JVVKRnp6OuXPn4uHDh0hJSUHz5s0xbdo0jvhERFTI3NzchMdSqRRPnz6FhYWFiBEREVFplquk4vfff8e0adPg5uYGTU1NLFmyBPHx8diwYUNBxUdERDlITU3FqFGjsGPHDly4cAH29vZih0REREWcn59fluslEgk0NDRgZ2eHjh07wtDQUO46c5VUbNmyBStWrMCgQYMAAKdOnULbtm2xbt06KCkpNDk3ERHlwefPn3Ht2jW8efMGHh4eCA0NRbly5cQOi4iIirAbN27g+vXrSEtLQ5UqVQAADx8+hLKyMuzt7bFixQqMHTsW58+fh6Ojo1x15ioTiI6OhoeHh/Dczc0NEokEz549y001RESUTzQ1NXHo0CHY2toiMjISHTp0QHJysthhERFREdaxY0e4ubnh2bNnuHbtGq5du4aYmBi0bNkSPXr0wNOnT/HDDz9gzJgxcteZq6QiNTU10ygjqqqq+Pz5c26qISKifGRiYoJjx47B0NAQly9fRs+ePZGWliZ2WEREVETNnTsXM2fOhJ6enrBOX18f06ZNw5w5c6ClpYUpU6bg2rVrcteZq+ZPUqkUvr6+UFdXF9Z9/PgRgwcPlhlWlkPKEhEVrsqVK+PQoUNo0aIFDh48CD8/PyxevFjssIiIqAhKSEjAixcvMjVtio+PR2JiIoAvo72mpKTIXWeu7lT4+PjAxMQE+vr6wtK7d2+Ym5vLrCMiosLXsGFD/PnnnwCAZcuW4ebNmyJHRERERVHHjh3Rt29f7N+/HzExMYiJicH+/fvRr18/eHp6AgAuX76MypUry11nru5UbNy4MVcBExFR4eratSsWLVoEW1tbODk5iR0OEREVQatXr8aYMWPQvXt3pKamAgBUVFTg4+MjTIxnb2+PdevWyV2nQpPfERFR0TVq1CiZ51KpFBKJRKRoiIioqNHR0cHatWuxcOFC/PvvvwAAW1tb6OjoCGWcnZ1zVSfHgSUiKsEiIyPRpEkTPH78WOxQiIioiNHR0YGhoSEMDQ1lEgpFMKkgIirBhg8fjpCQELRp0wYvX74UOxwiIioC0tPTMWPGDOjr68PKygpWVlYwMDDAzJkzkZ6erlCdTCqIiEqwdevWoUKFCoiIiICnpyc+fvwodkhERCSySZMmYdmyZZg1axZu3LiBGzdu4I8//sDSpUsxefJkhepkUkFEVIKZmZkhMDAQ+vr6+Oeff+Dt7a3wVSgiIioZNm/ejHXr1mHIkCGoUaMGatSogaFDh2Lt2rXYtGmTQnUyqSAiKuGqVq2K/fv3Q1VVFX/99Rd++eUXsUMiIiIRvX79Gvb29pnW29vb4/Xr1wrVyaSCiKgUaNasGTZs2ADgy0yqil6JIiKi4s/JyQnLli3LtH7ZsmUKD0fOIWWJiEqJ3r17IyoqCgcPHoS7u7vY4RARkUjmzJmDtm3b4tSpU3B1dQUAhIaG4smTJwgMDFSoTt6pICIqRSZNmoSQkBCYmpqKHQoREYmkSZMmePjwITp16oS3b9/i7du36Ny5Mx48eIDGjRsrVCfvVBARlSISiQQaGhrC8507d6J+/fqwtrYWLygiIip05ubm+P3332XWxcTEYODAgVizZk2u6+OdCiKiUmrt2rXo0aMHPDw88ObNG7HDISIikb169Qrr169XaF8mFUREpVSbNm1Qvnx53Lt3D507d8anT5/EDomIiIopJhVERKWUhYUFjh49Cl1dXQQHB6N///54+PAh/P390aNHD/j7+yMiIkLsMImIirVz586hffv2MDc3h0QiwYEDB3IsHxwcDIlEkmmJi4uTKbd8+XJYW1tDQ0MD9erVw+XLlwvwLL6PSQURUSnm5OSEv/76C8rKyti6dSvs7e0xd+5c7N69G3PnzoW9vT2HnyUiyoOkpCQ4OTlh+fLludrvwYMHiI2NFRYTExNh265du+Dn54epU6fi+vXrcHJyQuvWrfHixYv8Dl9u7KhNRFTKtW7dGjNmzMCkSZMglUqRlpYms71fv35o1KgR7OzsRIqQiKj4atOmDdq0aZPr/UxMTGBgYJDltgULFmDAgAHo06cPAGDVqlU4evQoNmzYkOMEp507d87xmG/fvs11nF/xTgUREeHdu3eQSCRZbpNIJAp33CMiKqnevXuHxMREYcnvfmnOzs4wMzNDy5Yt8c8//wjrU1JScO3aNbi5uQnrlJSU4ObmhtDQ0Bzr1NfXz3GxsrKCt7e3QvHyTgURESEqKgoSiQRSqTTTNqlUiqioqMIPioioCHN0dJR5PnXqVEybNi3P9ZqZmWHVqlWoXbs2Pn36hHXr1qFp06a4dOkSatWqhZcvXyItLQ3lypWT2a9cuXK4f/9+jnVv3Lgxz/Flh0kFERHB2to62zsVAGBlZVWI0RARFX3h4eEoX7688FxdXT1f6q1SpQqqVKkiPG/QoAEeP36MhQsX4s8//8yXYxQENn8iIiL07ds3y7sUAJCeno6goKDvXgEjIipNdHV1oaenJyz5lVRkpW7dunj06BEAwMjICMrKynj+/LlMmefPn8PU1LTAYvgeJhVERIRKlSph/fr1UFJSgrKysvCvRCKBmpoarl69CicnJ8ycORMpKSlih0tEVKqEhYXBzMwMAKCmpgYXFxcEBQUJ279e/HF1dRUrRDZ/IiKiL3x9fdGoUSOsX78eUVFRsLa2Rr9+/aCqqoqhQ4ciMDAQU6ZMwa5du7B27VpR/3gRERUX79+/F+4yAEBkZCTCwsJgaGiIChUqwN/fH0+fPsWWLVsAAIsWLYKNjQ2qVq2Kjx8/Yt26dTh9+jROnDgh1OHn5wcfHx/Url0bdevWxaJFi5CUlCSMBiUGJhVERCSws7NDQEBApvVHjhzBzp07MWrUKNy9exd//vknkwoiIjlcvXoVzZo1E577+fkBAHx8fLBp0ybExsYiOjpa2J6SkoKxY8fi6dOn0NLSQo0aNXDq1CmZOry8vBAfH48pU6YgLi4Ozs7OOH78eKbO24VJIs2uEW0hWr58OebOnYu4uDg4OTlh6dKlqFu3bpZl165diy1btuDOnTsAABcXF/zxxx/Zlv9WTEwMLC0t8eTJE1hYWOTbOcijVcDBQj1eQTvh31HsEIiokL169QozZ87E9OnToa+vDwBITk6GlpaWyJERERUOMX9LFmWi96nI7YyAwcHB6NGjB86cOYPQ0FBYWlqiVatWePr0aSFHTkRU+pQtWxaLFi0SEgqpVIr27duja9euiI2NFTk6IiISi+hJRcYZAR0dHbFq1SpoaWlhw4YNWZbftm0bhg4dCmdnZ9jb22PdunVC5xQiIipc169fx9mzZ7Fnzx44ODhg7dq1SE9PFzssIiIqZKImFXmZEfCr5ORkfP78GYaGhgUVJhERZcPFxQVXr16Fi4sLEhISMHDgQDRr1gwPHjwQOzQiIipEoiYVOc0IGBcXJ1cdEyZMgLm5uUxiktGnT59kplB/9+5dnuMmIqL/cXZ2xsWLFzF//nxoaWnh3LlzcHJywu+//47Pnz+LHR4RERUC0Zs/5cWsWbOwc+dO7N+/HxoaGlmWCQgIgL6+vrB8O6U6ERHlnYqKCvz8/HDnzh20bt0anz59wo4dO7KdUI+IiEoWUZOKvMwIOG/ePMyaNQsnTpxAjRo1si3n7++PhIQEYQkPD8+X2ImIKDMbGxscO3YMW7duxbp166CmpgYA+Pz5M96/fy9ydEREVFBETSoUnRFwzpw5mDlzJo4fP47atWvneAx1dXWZKdR1dXXzLX4iIspMIpGgV69eqF+/vrBu3rx5cHR0xNGjR0WMjIiICorozZ/8/Pywdu1abN68Gffu3cOQIUNkZgT09vaGv7+/UH727NmYPHkyNmzYAGtra8TFxSEuLo5XwIiIiqjU1FRs3boVT548Qbt27dCjR49Md6iJiKh4Ez2p8PLywrx58zBlyhQ4OzsjLCxMZkbA6OhombHPV65ciZSUFPz4448wMzMTlnnz5ol1CkRElAMVFRVcvnwZ48aNg5KSEnbu3AkHBwds3LiRfS6IiEqIIjGjdmHijNr5hzNqE1FuXbt2DQMGDMCNGzcAAM2bN8e6detgY2MjcmRERPLhjNpZE/1OBRERlR4uLi64fPky5syZA01NTZw/fx6fPn0SOywiIsojJhVERFSoVFRUMH78eNy5cwebN2+Gvb29sO3p06ciRkZERIpiUkFERKKwtbVF9+7dheeXLl2CjY0N/Pz8OPgGEVExw6SCiIiKhMDAQHz+/BkLFy5EtWrVcPz4cbFDIiIiOTGpICKiImH69OkIDAyElZUV/vvvP7Rp0wa9evVCfHy82KEREdF3MKkgIqIio02bNrhz5w5Gjx4NJSUlbN++Hfb29ti1a5fYoRERUQ6YVBARUZGio6ODhQsX4uLFi6hRowZev36Nd+/eiR0WERHlgEkFEREVSXXq1MHVq1exefNm9OvXT1j/6NEjpKamihgZERF9i0kFEREVWaqqqvD29oZEIgEAvHv3Ds2aNUO9evVw/fp1kaMjIqKvmFQQEVGxcefOHSQlJeH69euoW7cufv75ZyQnJ4sdFhFRqcekgoiIig1XV1fcu3cPXl5eSEtLw9y5c1G9enWcOnVK7NCIiEo1JhVERFSslCtXDjt37sSRI0dgaWmJf//9Fy1btoSPjw8+f/4sdnhERKUSkwoiIiqW2rZti7t372LkyJGQSCT4+PEjVFVVxQ6LiKhUUhE7ACIiIkXp6upi8eLF6NmzJ6ysrIT1z58/x4cPH2BtbS1ecEREpQjvVBARUbFXr149mJqaCs9HjBiBqlWrYsGCBRx+loioEDCpICKiEiU5ORnPnz9HcnIyxo4dC1dXV9y8eVPssIiISjQmFUREVKJoaWnhzJkzWLNmDfT19XH16lW4uLjgl19+wYcPH8QOj4ioRGJSQUREJY6SkhIGDBiAe/fuoUuXLkhLS8Ps2bNRvXp1hIeHix0eEVGJw6SCiIhKLDMzM+zZswcHDhxA+fLlkZKSAktLS7HDIiIqcZhUEBFRidexY0eEh4fj8OHD0NXVBQCkp6fjxIkTkEqlIkdHRFT8MakgIqJSQU9PD05OTsLzDRs2oHXr1mjfvj2io6NFjIyIqPhjUkFERKVSYmIi1NTUcPToUTg6OmLJkiVIS0sTOywiomKJSQUREZVKfn5+CAsLQ8OGDZGUlIRRo0ahYcOGuH37ttihEREVO0wqiIio1HJwcMC5c+ewcuVK6Onp4dKlS6hVqxZWrVoldmhERMUKkwoiIirVlJSUMHjwYISHh6NTp05IS0tDzZo1xQ6LiKhYYVJBREQEoHz58ti3bx/CwsJQr149Yf3Ro0fx5s0bESMjIir6mFQQERFlUKNGDeHx/fv30blzZzg4OGDPnj0cfpaIKBtMKoiIiLKRlJQEGxsbPH/+HF27doWnpydiYmLEDouIqMhhUkFERJQNFxcXhIWFYfLkyVBVVcWhQ4fg6OiI5cuXIz09XezwiIiKDCYVREREOdDQ0MCMGTNw48YN1K9fH+/evcPw4cPRsmVLNociIvp/TCqIiIjkULVqVZw/fx5Lly6Fjo4OWrZsCYlEInZYRERFgorYARARERUXysrKGD58ODw9PVGuXDlh/dWrV/Hx40c0atRIxOiIiMTDOxVERES5ZGFhAVVVVQBASkoKfHx80LhxYwwZMgQJCQkiR0dEVPiYVBAREeVBSkoKGjZsCABYtWoVHB0dsX//fpGjIiIqXEwqiIiI8kBHRwdr1qxBcHAwKlWqhGfPnqFz587o3Lkznj17JnZ4RCSyc+fOoX379jA3N4dEIsGBAwdyLL9v3z60bNkSxsbG0NPTg6urK/7++2+ZMtOmTYNEIpFZ7O3tC/Asvo9JBRERUT5o0qQJbt26hUmTJkFFRQX79++Hg4MDHj58KHZoRAqLiIhAgwYNULlyZdSpUwd3797NVCYqKgpNmzaFvr4+nJ2dZbadPn0adevWhaOjI6pWrYqff/5ZGI45KioKysrKcHZ2FpbHjx8XxmkVqqSkJDg5OWH58uVylT937hxatmyJwMBAXLt2Dc2aNUP79u1x48YNmXJVq1ZFbGyssJw/f74gwpcbO2oTERHlEw0NDfz222/w8vJC//79oaOjg0qVKokdFpHCBg0ahIEDB8LX1xd79uyBr68vrly5IlNGT08Pv/32GxISEjBp0iSZbWXKlMHOnTtha2uLjx8/ws3NDVu2bIGvry8AQFdXF2FhYYV0NuJo06YN2rRpI3f5RYsWyTz/448/cPDgQRw+fBg1a9YU1quoqMDU1DS/wswz3qkgIiLKZ9WrV8eFCxewe/duYdjZhIQEzJkzB58+fRI5OiL5vHjxAlevXkXv3r0BAF26dMGTJ0/w6NEjmXKGhoZo1KgRtLW1M9VRs2ZN2NraAviSdDs7OyMqKqrAYy8M7969Q2JiorAU1P/t9PR0vHv3DoaGhjLrIyIiYG5uDltbW/Tq1QvR0dEFcnx5MakgIiIqAMrKyihbtqzw3N/fHxMmTECtWrVw4cIFESMjks+TJ09gZmYGFZUvDVskEgkqVKig8I/XuLg47NmzB+3atRPWJSUloU6dOqhVqxZmzJiBtLS0fIm9MDg6OkJfX19YAgICCuQ48+bNw/v379GtWzdhXb169bBp0yYcP34cK1euRGRkJBo3box3794VSAzyYFJBRERUCJo0aQJjY2OEh4ejUaNGGDZsGBITE8UOi6hQJCYmon379vj5559Ru3ZtAICZmRmePn2KK1eu4NSpUwgJCcH8+fNFjlR+4eHhSEhIEBZ/f/98P8b27dsxffp07N69GyYmJsL6Nm3aoGvXrqhRowZat26NwMBAvH37Frt37873GOTFpIKIiKgQeHl54d69e/D19YVUKsWKFSvg6OiIQ4cOiR0aUZYsLS0RGxuL1NRUAIBUKkV0dDQqVKiQq3revXsHd3d3dOzYEX5+fsJ6dXV14YeyoaEh+vbti5CQkPw7gQKmq6sLPT09YVFXV8/X+nfu3In+/ftj9+7dcHNzy7GsgYEBKleunKlpWmFiUkFERFRIypYti40bN+LUqVOoWLEinj59io4dO2Lp0qVih0aUiYmJCWrVqoWtW7cCAPbu3QsLCwvY2dnJXcf79+/h7u4Od3d3/PrrrzLbXrx4gc+fPwMAPn36hH379sl0RC7NduzYgT59+mDHjh1o27btd8u/f/8ejx8/hpmZWSFElzUmFURERIWsRYsWuHXrFiZMmAATExN4eXmJHRJRllavXo3Vq1ejcuXKmDVrFjZu3AgA6N+/v3CXLTk5GRYWFujatSvCw8NhYWEhNAVavHgxLl++jH379gnDxv7+++8AgPPnz6NmzZpwcnJCrVq1YGpqmmn0qJLg/fv3CAsLE0a5ioyMRFhYmNA3xd/fH97e3kL57du3w9vbG/Pnz0e9evUQFxeHuLg4JCQkCGXGjRuHs2fPIioqChcuXECnTp2grKyMHj16FOq5ZSSRSqVS0Y4ugpiYGFhaWuLJkyewsLAo1GO3CjhYqMcraCf8O4odApHCIiIi4OPjg5cvX0JfXx+bNm1C1apVZcpERUXB19cXN27cgI2Njcywhzlt+0oqlaJFixa4fv063r59W7AnRMXWu3fvoKurKzyfMWMGvLy8UKVKFRGjIqLs5Pa3ZHBwMJo1a5ZpvY+PDzZt2gRfX19ERUUhODgYANC0aVOcPXs22/IA0L17d5w7dw6vXr2CsbExGjVqhN9//x0VK1bM07nlBZOKQsSkgqjoaN68Oby9vYWx12fPnp1p7PXXr18LHfEmTZokkzjktO2rBQsW4N69e/jrr7+YVJBc9u/fj86dO0NdXR2TJ0/G+PHjoaamJnZYRJSBmL8lizI2fyKiUic/xl7PaRsA3L17FwcOHMAvv/yS/ydAJVbNmjXh7u6OT58+4ddff4WLiwsuXbokdlhERN/FpIKISp38Hnv9W58/f8aAAQOwevVqKCsr50udVDpYW1sjMDAQ27Ztg5GREe7cuQNXV1eMHDlS1PHniYi+h0kFEVE+mz59Ojp37gwHBwexQ6FiSCKRoGfPnrh37x68vb0hlUqxdOlSuUaAISISC5MKIip18mvs9eycPXsWS5cuhbW1NRo1aoTExERYW1sjPj4+X+qn0sHIyAibN2/GiRMnYGNjg4kTJ4odEhFRtlTEDoCIqLBlHHvd19dXobHXc5Jx8qaoqCg4OzsjKioqX+qm0qdly5a4f/++TIftzZs3Iz09Hb6+vpBIJCJGR8WN+8n8n/VZTMdbBogdAv0/3qkgolIpr2Ov57SNKL9lTChiY2MxcuRI9O3bF25ubqLOoEtE9BXvVBBRqVSlShWEhoZmWr9u3TrhsZaWFmJiYrLcP6dtGVlbW3M4WcpXxsbG+PXXXzF16lScPn0a1atXx9SpUzF27FioqqqKHR4RlVK8U0FERFSMqKioYPz48bhz5w7c3Nzw8eNH+Pv7o06dOrh69arY4RFRKcWkgoiIirWIiAg0aNAAlStXRp06dXD37t1MZaKiotC0aVPo6+vD2dk50/b169ejUqVKqFixIgYMGIDPnz/LtU1Mtra2OHHiBDZv3gxDQ0PcvHkTDRo0wLNnz8QOjYhKISYVRERUrA0aNAgDBw7Ew4cPMWHCBPj6+mYqo6enh99++w3bt2/PtC0yMhKTJ09GSEgIHj16hOfPn2PNmjXf3VYUSCQSeHt74/79++jVqxdGjBgBc3NzscMiolKISQURERVb+TE7+p49e9ChQweYmppCIpFg8ODB2LFjx3e3FSXGxsbYunUr5s6dK6wLDw+Hj48PhzImokLBpIKIiIqt/JgdPTo6GlZWVsJza2trYf+cthVFSkpf/qxLpVIMHjwYW7Zsgb29PTZv3gypVCpydERUknH0JyIqEVoFHBQ7hHx1wr+j2CFQMSaRSDB//nwMGDAAN2/ehK+vL7Zu3YpVq1ahYsWKYodHRCUQ71QQEVGxlR+zo1eoUAH//fef8DwqKkrYP6dtRV2dOnVw5coVBAQEQENDA6dOnUL16tUxd+5c4fUiIsovTCqIiKjYyjg7OgCFZkfv0qULDh06hLi4OEilUqxatQrdu3f/7rbiQFVVFb/88gtu376NZs2a4cOHD/j555+xefNmsUMjohKGSQURERVreZ0d3dbWFtOnT0fDhg1hZ2cHY2NjDBo06LvbihM7OzsEBQVhw4YNcHNzg4+Pj9ghEVEJI5GWsp5bMTExsLS0xJMnT2BhYVGox2abb6KCw/9fRPKRSqWQSCQAgI8fP6Jz584YM2YMWrZsKXJkVBjcT/qLHUK+Ot4yoNCPKeZvyaKMdyqIiIhKka8JBQAsXrwYx44dQ6tWreDj44OXL1+KGBkRFWeiJxXLly+HtbU1NDQ0UK9ePVy+fDnbsnfv3kWXLl1gbW0NiUSCRYsWFV6gREREJczQoUMxcuRISCQSbNmyBQ4ODti2bRuHnyWiXBM1qdi1axf8/PwwdepUXL9+HU5OTmjdujVevHiRZfnk5GTY2tpi1qxZMDU1LeRoiYiIShZdXV0sXrwYFy5cQLVq1fDy5Uv07t0bHh4eiIqKEjs8IipGRE0qFixYgAEDBqBPnz5wdHTEqlWroKWlhQ0bNmRZvk6dOpg7dy66d+8OdXX1Qo6WiIioZKpfvz6uXbuG3377Derq6jh+/DhGjx4tdlhEVIyIllSkpKTg2rVrcHNz+18wSkpwc3NDaGhovh3n06dPSExMFJZ3797lW91EREQlhZqaGiZNmoSbN2/C3d0dCxYsELaxORQRfY9oM2q/fPkSaWlpKFeunMz6cuXK4f79+/l2nICAAEyfPj3f6iMiooLF0WnEVaVKFRw7dkxm3fDhw6Grq4upU6dCU1NTpMiIqCgTvaN2QfP390dCQoKwhIeHix0SERFRsXH37l2sWLECs2fPRvXq1REUFCR2SERUBImWVBgZGUFZWRnPnz+XWf/8+fN87YStrq4OPT09YdHV1c23uomIiEq6qlWr4uDBgyhfvjweP34MNzc39O3bF69fvxY7NCIqQkRLKtTU1ODi4iJzxSM9PR1BQUFwdXUVKywiIiL6RocOHRAeHo5hw4ZBIpFg48aNcHBwwM6dO9nfgogAiNz8yc/PD2vXrsXmzZtx7949DBkyBElJSejTpw8AwNvbG/7+/2tbm5KSgrCwMISFhSElJQVPnz5FWFgYHj16JNYpEBERlQp6enpYtmwZzp8/D0dHR7x48QIjRoxAQkKC2KERUREgWkdtAPDy8kJ8fDymTJmCuLg4ODs74/jx40Ln7ejoaCgp/S/vefbsGWrWrCk8nzdvHubNm4cmTZogODi4sMMnIiIqdRo0aIDr169j9uzZqFSpEgwMDAB8GSFKKpXK/N0motJD1KQC+DKixPD/a+/Ow6Iq+/+BvwcI0ACFQDBkHBFBXGJRDFwCe0xQU0wrMc19AJfHkhJygwxLfrnFYxoY6FMXrrmSWT2EWZSU7BngQoFDxYCmBbiwzfn+wdX5NYErM3ME36/r4qpzn8+Z87mtu/jMue/7LFrU6rl/FgoKhYKPWYmIiCRmZmaG6Ohorbb9+/djw4YNeP/99zFw4ECJMiMiqfDrBCIiImoTjUaDVatW4fvvv4e3tzdWrlyJGzduSJ0WERkQiwoiIiJqEyMjI6Snp+OZZ55BY2Mj3nzzTXh4eOCrr76SOjUiMhAWFURERNRmjo6OOHjwIA4cOIDu3bvj3LlzCAgIgFKpxJUrV6ROj4j0jEUFERER6cykSZNQVFSEsLAwAEBSUhJycnIkzoqI9E3yhdpERETUsXTt2hUJCQmYNm0a0tLSMGrUKPFcfX09TE1NJcyOiPSBTyqIiIhIL0aMGIE33nhDPC4vL0evXr2wZcsWaDQaCTMjIl1jUUFEREQGsWXLFvz2229YtGgRhg8fjsLCQqlTIiIdYVFBREREBvHWW2/h3XffhaWlJTIzM+Hl5YXo6GjU1dVJnRoRtRGLCiIiIjIIIyMjLFy4EEVFRRg/fjwaGhoQGxsLDw8PZGRkSJ0eEbUBiwoiIiIyqB49euDIkSPYt28f7O3tcfbsWRw9elTqtIioDVhUEBERkcHJZDI899xzKC4uxmuvvYaYmBjxXE1NjYSZEdG9YFFBREREkrG2tsbatWvRuXNnAEBTUxOeeuopTJo0Cb/99pvE2RHRnWJRQWQA58+fx9ChQ+Hq6gofH5+b7niSnJyMPn36oHfv3lAqlWhoaAAAnDhxAp06dYKnp6f4c/36dfG606dPIyAgAO7u7nB3d8fBgwcN0i8iIl3LyspCTk4ODh06BHd3dyQkJHD7WaJ2gEUFkQGEhYUhNDQU586dQ1RUFGbNmtUiprS0FKtWrUJGRgZKSkpQWVmJbdu2iefd3NyQn58v/nTq1AkAcO3aNQQHB2PNmjUoLi7Gjz/+iBEjRhiqa0REOuXr64vc3FwMGTIE1dXVmD9/Pvz9/VFcXCx1akR0CywqiPSsqqoK2dnZmD59OgBg8uTJKC8vR0lJiVbc/v37MWHCBDg4OEAmkyE8PBy7d+++7efv2rULvr6+GD58OADA2NgYdnZ2uu8IEZGBDBw4ECdPnkR8fDwefvhhfPPNN/D09MQbb7whPsElovsLiwoiPSsvL0f37t1hYmICoHlxolwuh0ql0opTqVTo2bOneKxQKLRifvrpJ3h7e8PHxwdbt24V24uKimBmZoann34anp6emDFjBi5evKjnXhER6ZexsTEWL16MoqIijBs3DvX19fj4449hZMRfXah9+frrrzF+/Hg8+uijkMlkOHz48G2vOXHiBLy9vWFmZgYXFxf897//bRGzZcsWKBQKmJub4/HHH8epU6d0n/xd4Mgkage8vb3xyy+/IDc3F4cOHUJCQgL27dsHAGhsbMQXX3yBxMRE5OXlwdHREfPnz5c4YyIi3ZDL5fj444+xZ88eJCUlwdjYGABw/fp1VFdXS5wd0e1dvXoVHh4e2LJlyx3Fl5aWYty4cRg5ciTy8/Px8ssvY968efj888/FmL179yIiIgIxMTHIzc2Fh4cHAgMDUVVVpa9u3BaLCiI9c3JyQkVFBRobGwEAgiBApVJBLpdrxcnlcly4cEE8LisrE2OsrKzQpUsXAM37u0+dOlV8UZRcLsfIkSPh6OgImUyG6dOn47vvvjNE14iIDEImk2HKlCnw8PAQ22JjY9GvXz8cOXJEwsyIbm/MmDFYs2YNnnnmmTuKT0hIQK9evbBhwwa4u7tj0aJFePbZZ7Fp0yYxZuPGjVAqlZg9ezb69euHhIQEdO7cGdu3b9dXN26LRQWRnnXr1g3e3t5ISUkBABw4cAA9evSAi4uLVtzkyZORmpoKtVoNQRCQkJCAkJAQAEBFRYW4+0lNTQ2OHj0KLy8vAMDzzz+PrKws8Ru7Y8eOaf2Pl4ioo6mvr8ehQ4fw66+/YuLEiXj22WdRUVEhdVr0gKmpqUF1dbX4U1dXp5PPzczMxKhRo7TaAgMDkZmZCaD53/+cnBytGCMjI4waNUqMkQKLCiIDSExMRGJiIlxdXREXF4cdO3YAAObNm4fU1FQAgLOzM1avXo1hw4bBxcUFdnZ2CAsLA9BciAwcOBAeHh7w9fXFU089hdmzZwNoflKxfPlyDB06FI899hiOHz+OhIQEaTpKRGQApqamyM3NRVRUFIyNjXHgwAG4u7vj/fff5/azZDD9+vVDly5dxJ+1a9fq5HPVajXs7e212uzt7VFdXY3r16/j0qVLaGpqajVGrVbrJId7YSLZnYkeIG5ubq1+e5CUlKR1rFQqoVQqW8QtWrQIixYtuunnv/jii3jxxRfbnigRUTvRqVMnxMXFISQkBEqlEtnZ2QgNDUVKSgp27NgBZ2dnqVOkDq6oqAiOjo7isZmZmYTZSI9PKoiIiKjd8vT0RGZmJjZu3IjOnTsjJydHXMxNpE+WlpawsrISf3RVVDg4OKCyslKrrbKyElZWVujUqRNsbW1hbGzcaoyDg4NOcrgXLCqIiIioXTMxMcGSJUtQWFiInTt3am3PXVpaKmFmRHfPz88P6enpWm1paWnw8/MD0Dz9b9CgQVoxGo0G6enpYowUWFQQERFRh6BQKBAcHCwef/nll3BxccHixYtRU1MjYWb0IKutrUV+fj7y8/MBNBe6+fn54ruoli1bhhkzZojx4eHh+PnnnxEZGYkzZ85g69at2LdvH5YsWSLGRERE4P3338cHH3yA4uJizJ8/H1evXhXXW0qBRQURERF1SOnp6dBoNNi8eTP69++Po0ePSp0SPYCys7Ph5eUl7toYEREBLy8vREdHA2je4fHvL7vt1asXPvnkE6SlpcHDwwMbNmxAUlISAgMDxZgpU6Zg/fr1iI6OhqenJ/Lz8/HZZ5+1WLxtSFyoTXQPgtKWSZ2CTn32lG52rCAiup+sWbMGTzzxBMLDw1FaWorx48djypQpiI+Pl/SXL3qwBAQEQBCEm55v7W3ZAQEByMvLu+Xn3m4TF0PjkwoiIiLqsEaPHo3Tp0/j1VdfhZGREfbu3Qt3d3fs2bNH6tSIOhQWFURERNShPfzww1i3bh2ysrLg5eWFK1euSJ0SUYfDooKIiIgeCN7e3jh16hT27duHKVOmiO2FhYVoaGiQMDOi9o9FBRERET0wTExM8Nxzz0EmkwEAfv/9d4wcORKDBw9GVlaWxNkRtV8sKoiIiOiBVVxcjKamJvzwww/w9fXFkiVLUFtbK3VaRO0OiwqS3Pnz5zF06FC4urrCx8cHhYWFrcYlJyejT58+6N27N5RKpfioOjMzE56envD09ET//v0RFhaGurq6215HREQ0fPhwnDlzBtOmTYNGo8E777yDAQMG4NNPP5U6NaJ2hUUFSS4sLAyhoaE4d+4coqKiMGvWrBYxpaWlWLVqFTIyMlBSUoLKykps27YNAODh4YGsrCzk5+fj9OnTqKqqwtatW297HREREQDY2dkhJSUFx44dQ8+ePXHhwgWMHTsW06dPR1NTk9TpEbULLCpIUlVVVcjOzsb06dMBAJMnT0Z5eTlKSkq04vbv348JEybAwcEBMpkM4eHh2L17NwCgc+fOeOihhwAA9fX1uH79ujhX9lbXERER/d2YMWPw448/YsmSJTAyMoK5uTmMjY2lTouoXWBRQZIqLy9H9+7dYWLS/B5GmUwGuVyu9WZJAFCpVOjZs6d4rFAotGLKysrg4eEBW1tbdOnSBQsWLLij64g6urZOLzx+/DiGDBmCfv36oX///oiMjIRGowHQPO6MjY3F6Yeenp746aefDNY3In2wsLDAxo0b8d1332HdunVie3l5Of/9JroFFhXUISgUChQUFECtVqOurg4HDx6UOiWi+0JbpxdaW1tjz549KCoqQk5ODk6ePIkPP/xQvNbS0hL5+fniT+/evQ3VNSK98vHxgbW1NQBAEASEhYVh4MCBWLduHRobGyXOjuj+w6KCJOXk5ISKigrxP9CCIEClUkEul2vFyeVyXLhwQTwuKytrEQM0f8MUEhKCnTt33tV1RB2RLqYXenl5wdnZGQBgbm4OT09PlJWVGbQfRFKrra3FjRs3cP36dURGRmLIkCHIzc2VOi2i+wqLCpJUt27d4O3tjZSUFADAgQMH0KNHD7i4uGjFTZ48GampqVCr1RAEAQkJCQgJCQEAlJSUiFM16uvrcejQITz22GO3vY6oo9PV9MK/qNVq7N+/H08//bTYdvXqVfj4+MDb2xtvvPEGF7VSh2RpaYn09HRs374d1tbWyMvLg4+PD5YuXYpr165JnR7RfYFFBUkuMTERiYmJcHV1RVxcHHbs2AEAmDdvHlJTUwEAzs7OWL16NYYNGwYXFxfY2dkhLCwMQPOcby8vL3h4eMDLywv29vZYtWrVba8jojtXXV2N8ePHIzIyEoMHDwYAdO/eHb/++iuysrLwxRdfICMjAxs2bJA4UyL9kMlkmD17NoqLixESEgKNRoP169djwIABOHPmjNTpEUnOROoEiNzc3JCZmdmiPSkpSetYqVRCqVS2iAsNDUVoaOhNP/9m1xF1dH+fXmhiYnLL6YV/X4D6z2mCNTU1CAoKQnBwMCIiIsR2MzMzdOvWDQBgY2ODOXPmYNeuXYiMjNRzz4ikY29vj927d2P69OmYP38+TExMoFAopE6LSHJ8UkFE1EHpYnphbW0tgoKCEBQUhJUrV2pdV1VVJU49/GuDBC8vLwP0jEh648aNQ1FREVJTU2Fubg4AaGxsRGpqKgRBkDg7IsNjUUFE1IG1dXphfHw8Tp06hYMHD4rbxr755psAgG+++Uaceujt7Q0HBwesWLFCmo4SScDCwgJ9+/YVjzdv3ozg4GCMGTOGGxrQA4fTn4iIOrC2Ti9csWLFTQuFSZMmYdKkSbpJlKgDEAQBZmZm+Pzzz9G/f3/ExsZi8eLF4mYJRB0Zn1QQERER6UBERAR++OEH+Pv749q1a3jllVfg6+uL/Px8AM0vo1y2bBmmTp2KZcuW4fz589ImTKRDLJ1J56r+M0rqFHSq2+IvpE6BiIjaCVdXVxw/fhzbt2/Hq6++ipycHAwePBjTpk1DSkoKZDIZBEGATCbD22+/jeTk5FZfSknU3vBJBREREZEOGRkZYd68eSguLsazzz4LAEhJSYFGo0FTU5PWX+fOndvihZRE7RGLCiIiItKp8+fPY+jQoXB1dYWPjw8KCwtbjUtOTkafPn3Qu3dvKJVKcTexW53TaDR49dVXMWDAAPTt2xdz585FfX29Qfp1t7p3746PPvoIc+bMgUwmazVGJpMhOTnZwJkR6R6LCiIiItKpsLAwhIaG4ty5c4iKimp1ek9paSlWrVqFjIwMlJSUoLKyEtu2bbvtueTkZOTm5iI3NxfFxcUwMjJCfHy8Ibt312pqam66zawgCNwpijoErqkgImpHuGaJ7ndVVVXIzs7G//73PwDN70FZtGgRSkpKtN6Rsn//fkyYMAEODg4AgPDwcLz11ltYuHDhLc8VFBRg1KhRMDU1BQCMGTMGr7/+OpYuXWrgnt45hUJxyycVfHkedQR8UkFEREQ6U15eju7du4vbqMpkMsjlcqhUKq04lUqFnj17iscKhUKMudW5QYMGITU1FdXV1WhoaMC+ffvu+2/658yZc8snFXPnzjVwRkS6x6KCiIiI2o1Zs2YhKCgI/v7+8Pf3h6urqyTvgbibdSNjx47FI488AplMBmNjYxgZGcHY2BgymQy2trYIDAxssaYEaC44nnzySXTt2tUAPSJqGxYVREREpDNOTk6oqKhAY2MjgOZfjFUqFeRyuVacXC7HhQsXxOOysjIx5lbnZDIZXn/9deTl5eHkyZPo168f+vfvr+9utXC360YqKysxcuRIPPnkk3j++eehVCpha2uLgoKCFutG/rJp0yb07t3bQD0iahsWFURERKQz3bp1g7e3N1JSUgAABw4cQI8ePbTWUwDNay1SU1OhVqshCAISEhIQEhJy23M3btzAlStXAACXLl1CXFwcIiMjDdjD/79uZPr06WK+5eXlLbaG/fvaEJlMhldeeQXXrl3D7t274ezsjEmTJonnwsPDsXv3bvHawsJCHD58GK+99ppB+0Z0r1hUEBERkU4lJiYiMTERrq6uiIuLw44dOwAA8+bNQ2pqKgDA2dkZq1evxrBhw+Di4gI7OzuEhYXd9tyff/6JoUOHon///hgxYgTCw8Mxfvx4g/ZP3+tGGhoaoFQqkZiYCGNjY313h0gnuPsTERER6ZSbmxsyMzNbtCclJWkdK5VKKJXKVj/jZufs7e1RXFysm0TvU6tXr8akSZPg7u5+3y9CJ/oLn1QQERER3QV9rxv56quvsHnzZigUCgwfPhzV1dVQKBS4ePGivrtGdM9YVBARERHdBX2vG8nIyMCFCxdQVlaGb775BlZWVigrK4OdnZ1hO0p0F1hUEBEREd0lfa4bIWqPuKaCiIiI6C7pc93I3ykUCvzxxx/3nCeRofBJBRERERERtQmfVBAREdEtjV57ROoUdOp/y4KlToGow+GTCiIiIiIiapP7oqjYsmULFAoFzM3N8fjjj+PUqVO3jP/oo4/Qt29fmJubY+DAgTh27JiBMiUiIiIion+SvKjYu3cvIiIiEBMTg9zcXHh4eCAwMBBVVVWtxp88eRJTp07F3LlzkZeXh4kTJ2LixIn48ccfDZw5EREREREB98Gaio0bN0KpVGL27NkAgISEBHzyySfYvn07XnvttRbx8fHxCAoKwtKlSwEAsbGxSEtLw7vvvouEhASD5k5EREQdR9V/Rkmdgs50W/yF1CnQA0bSoqK+vh45OTlYtmyZ2GZkZIRRo0a1uk0bAGRmZiIiIkKrLTAwEIcPH241vq6uDnV1deLxn3/+CQCoqKhoY/Z378aflwx+T3365ZdfWm2/9McNA2eiX/Wt9PPGxWoJMtGfm/2zbE84vtonjq/2geOr/WltbAEcX7rw1++QGo3G4Pe+n0laVFy6dAlNTU2wt7fXare3t8eZM2davUatVrcar1arW41fu3YtVq9e3aJ9yJAh95g1/cXp/0mdgYHEOEmdgd45YavUKdA/cHx1HBxf958HYnw9AGMLkHZ8VVZWQi6XS3b/+43k05/0bdmyZVpPNhobG1FcXAwnJycYGUm+pITuUU1NDfr164eioiJYWlpKnQ5Rh8LxRaQ/HF/tn0ajQWVlJby8vKRO5b4iaVFha2sLY2NjVFZWarVXVlbCwcGh1WscHBzuKt7MzAxmZmZabcOGDWtD1nQ/qK5ufnzr6OgIKysribMh6lg4voj0h+OrY+ATipYk/are1NQUgwYNQnp6utim0WiQnp4OPz+/Vq/x8/PTigeAtLS0m8YTEREREZF+ST79KSIiAjNnzsTgwYMxZMgQvPPOO7h69aq4G9SMGTPg6OiItWvXAgBeeukl+Pv7Y8OGDRg3bhz27NmD7OxsbNu2TcpuEBERERE9sCQvKqZMmYKLFy8iOjoaarUanp6e+Oyzz8TF2CqVSmvtw9ChQ7Fr1y6sXLkSy5cvR58+fXD48GEMGDBAqi6QBMzMzBATE9NiahsRtR3HF5H+cHxRRyUTBEGQOgkiIiIiImq/uP0RERERERG1CYsKIiIiIiJqExYVRERERETUJiwqSGcCAgLw8ssvS3b/WbNmYeLEifdNPkREREQPChYV1GEdPHgQsbGxUqdBpFeGLJ63bduGgIAAWFlZQSaT4Y8//mgRc/nyZUybNg1WVlbo2rUr5s6di9raWoPkR9RW7XE8/fDDDxgxYgTMzc3h5OSEt99+2yD5E/0TiwrqsGxsbGBpaSl1GkQdxrVr1xAUFITly5ffNGbatGkoLCxEWloajh49iq+//hqhoaEGzJKofdDFeKqursbo0aPRs2dP5OTkYN26dXj99df57i6ShkCkI/7+/sLChQuFhQsXClZWVsIjjzwirFy5UtBoNIIgCMKHH34oDBo0SLCwsBDs7e2FqVOnCpWVleL1ly9fFl544QXB1tZWMDc3F1xcXITt27eL51UqlfDcc88JXbp0EaytrYUJEyYIpaWl4vmZM2cKwcHBWvm89NJL4nHPnj2FN998U5g9e7ZgYWEhODk5CYmJiVp9uN09iO4nM2fOFABo/ZSWlgonTpwQfHx8BFNTU8HBwUGIiooSGhoaxOtuN1Zv58svvxQACFeuXNFqLyoqEgAIWVlZYtunn34qyGQy4ddff9VJn4n0pT2Op61btwrW1tZCXV2dGBMVFSW4ubm14U+C6N7wSQXp1AcffAATExOcOnUK8fHx2LhxI5KSkgAADQ0NiI2NRUFBAQ4fPoyysjLMmjVLvHbVqlUoKirCp59+iuLiYrz33nuwtbUVrw0MDISlpSUyMjLw7bffwsLCAkFBQaivr7/j/DZs2IDBgwcjLy8PCxYswPz583H27Fmd3oPIUOLj4+Hn5welUomKigpUVFTgoYcewtixY+Hj44OCggK89957SE5Oxpo1a7SuvdVYvVeZmZno2rUrBg8eLLaNGjUKRkZG+P7779v02UT61h7HU2ZmJp544gmYmpqKMYGBgTh79iyuXLnSpvsT3S3J36hNHYuTkxM2bdoEmUwGNzc3nD59Gps2bYJSqcScOXPEOGdnZ/znP/+Bj48PamtrYWFhAZVKBS8vL/E/oAqFQozfu3cvNBoNkpKSIJPJAAA7duxA165dceLECYwePfqO8hs7diwWLFgAAIiKisKmTZvw5Zdfws3NTWf3IDKULl26wNTUFJ07d4aDgwMAYMWKFXBycsK7774LmUyGvn374rfffkNUVBSio6NhZNT8XdKtxuq9UqvV6Natm1abiYkJbGxsoFar772jRAbQHseTWq1Gr169tGLs7e3Fc9bW1vd8f6K7xScVpFO+vr7iL+QA4Ofnh/Pnz6OpqQk5OTkYP3485HI5LC0t4e/vDwBQqVQAgPnz52PPnj3w9PREZGQkTp48KX5OQUEBSkpKYGlpCQsLC1hYWMDGxgY3btzATz/9dMf5PfbYY+Lfy2QyODg4oKqqSqf3IJJScXEx/Pz8tMbhsGHDUFtbi19++UVsu9VYfeutt8Qx8FfBT/Qg4ngiunN8UkEGcePGDQQGBiIwMBA7d+6EnZ0dVCoVAgMDxalFY8aMwYULF3Ds2DGkpaXhX//6FxYuXIj169ejtrYWgwYNws6dO1t8tp2d3R3n8dBDD2kdy2QyaDQaANDZPYjau/DwcDz//PPi8aOPPnpH1/29SP9LY2MjLl++LH7zS/Sg0ed4cnBwQGVlpVbMX8ccc2RoLCpIp/45b/q7775Dnz59cObMGfz++++Ii4uDk5MTACA7O7vF9XZ2dpg5cyZmzpyJESNGYOnSpVi/fj28vb2xd+9edOvWDVZWVnrJ3RD3INI1U1NTNDU1icfu7u44cOAABEEQvzn99ttvYWlpiR49eohxNxurxsbGsLGxgY2NzV3n4ufnhz/++AM5OTkYNGgQAOD48ePQaDR4/PHH76V7RAbV3saTn58fVqxYgYaGBvFLs7S0NLi5uXHqExkcpz+RTqlUKkRERODs2bPYvXs3Nm/ejJdeeglyuRympqbYvHkzfv75Z6SmprZ4h0R0dDSOHDmCkpISFBYW4ujRo3B3dwfQvK2era0tgoODkZGRgdLSUpw4cQKLFy/WegTdFoa4B5GuKRQKfP/99ygrK8OlS5ewYMEClJeX49///jfOnDmDI0eOICYmBhEREeL8b+DmY/VW1Go18vPzUVJSAgA4ffo08vPzcfnyZQDNv4AFBQVBqVTi1KlT+Pbbb7Fo0SKEhITc8bezRFJqb+PphRdegKmpKebOnYvCwkLs3bsX8fHxiIiI0NOfENEtSL39FHUc/v7+woIFC4Tw8HDByspKsLa2FpYvXy5uq7dr1y5BoVAIZmZmgp+fn5CamioAEPLy8gRBEITY2FjB3d1d6NSpk2BjYyMEBwcLP//8s/j5FRUVwowZMwRbW1vBzMxMcHZ2FpRKpfDnn38KgnBnW8pu2rRJK2cPDw8hJibmju9BdL85e/as4OvrK3Tq1OmutsC81Vi9mZiYmBZbbgIQduzYIcb8/vvvwtSpUwULCwvByspKmD17tlBTU6Ov7hPpVHscTwUFBcLw4cMFMzMzwdHRUYiLi9PpnwnRnZIJgiBIUs0QEZEkAgIC4OnpiXfeeUfqVIjaPY4nomac/kRERERERG3CooKIiIiIiNqE05+IiIiIiKhN+KSCiIiIiIjahEUFERERERG1CYsKIiIiIiJqExYVRERERETUJiwqiIiIiIioTVhUEBERERFRm7CoICIiIiKiNmFRQUREREREbcKigoiIiIiI2uT/AMHJRbcVfSP4AAAAAElFTkSuQmCC", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Top-100 ablation — full prediction shift:\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - "
\n", - "
Input Sentence:
\n", - "
Fact: the capital of the state containing Dallas is
\n", - " \n", - "
\n", - "
Original Top 5 Tokens
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TokenProbabilityDistribution
Austin0.414\n", - "
\n", - "
\n", - " 41.4%\n", - "
\n", - "
Texas0.056\n", - "
\n", - "
\n", - " 5.6%\n", - "
\n", - "
the0.056\n", - "
\n", - "
\n", - " 5.6%\n", - "
\n", - "
not0.056\n", - "
\n", - "
\n", - " 5.6%\n", - "
\n", - "
Fort0.044\n", - "
\n", - "
\n", - " 4.4%\n", - "
\n", - "
\n", - " \n", - "
New Top 5 Tokens
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TokenProbabilityDistribution
Texas0.125\n", - "
\n", - "
\n", - " 12.5%\n", - "
\n", - "
the0.110\n", - "
\n", - "
\n", - " 11.0%\n", - "
\n", - "
not0.059\n", - "
\n", - "
\n", - " 5.9%\n", - "
\n", - "
called0.036\n", - "
\n", - "
\n", - " 3.6%\n", - "
\n", - "
a0.031\n", - "
\n", - "
\n", - " 3.1%\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - "
Key Tokens
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TokenOriginalNewChange
▁Austin0.41410.0090\n", - "
\n", - "
\n", - " -97.8%\n", - "
\n", - "
▁Dallas0.03000.0038\n", - "
\n", - "
\n", - " -87.4%\n", - "
\n", - "
▁Texas0.05590.1245\n", - "
\n", - "
\n", - " +122.7%\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ + "from IPython.display import display, Markdown\n", + "\n", "# Progressive ablation: zero out increasing numbers of custom-target features\n", "probs_base = torch.softmax(original_logits.squeeze(0)[-1].float(), dim=-1)\n", "groups = {\"baseline\": {\n", @@ -2195,7 +641,7 @@ "\n", "# Show the full top-k comparison for the strongest ablation\n", "strongest_n = max(ablation_results.keys())\n", - "print(f\"\\nTop-{strongest_n} ablation — full prediction shift:\")\n", + "display(Markdown(f\"#### Top-{strongest_n} ablation — full prediction shift\"))\n", "display_topk(prompt, original_logits, ablation_results[strongest_n])" ] }, @@ -2210,320 +656,12 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAxYAAAHqCAYAAACZcdjsAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjgsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvwVt1zgAAAAlwSFlzAAAPYQAAD2EBqD+naQAApRFJREFUeJzs3XlcTfn/B/DXbd8XlFJRpJJIhMm+ZB1kGGMwiOzbEIasGTOyjX0n2yCNLNnGMsiaLRKiLCVL2RUVUef3h6/z62pRt+W0vJ6Px3m493M+53Pe597jdN73nM/nyARBEEBERERERJQHSlIHQERERERExR8TCyIiIiIiyjMmFkRERERElGdMLIiIiIiIKM+YWBARERERUZ4xsSAiIiIiojxjYkFERERERHnGxIKIiIiIiPKMiQUREREREeUZEwsq8aKjoyGTycSJiiZvb2/xO3J3d8/RMu7u7uIy3t7eBRqfpaWluK6goKACXVdRpch3VNo1a9ZM/Mw2btxYIOtQZN9Mf0yMjo7+Zlt52Y6s1lWSFMb3nJXCPA4SfYuK1AFQ7r158wZz587Fvn37cP/+fXz69AmGhoYwMTFBjRo10Lp1a/Tu3VvqMAtNaGgo9uzZA+DzH0UpTniCgoLEP8K1atVC586dCz2GgpL+D9Xo0aNhYGAgWSwF5c2bN1i0aJH4vqT8cS4N3x1JKzo6WjyRNjAwwOjRoyWNJ6dSUlKwYsUKbNy4Effv34cgCDA1NUXt2rUxceJE1KpVS+oQ86ykHteoaGNiUcy8fv0a9erVw927d+XKnz59iqdPn+LatWuIiooqdYnFjBkzAABNmzbNkFiYmpri9OnTBRpDUFCQGEPfvn1LVGLxZbuAz7+MlcST0zdv3shtZ2Z/gAMCAvD+/XsAQI0aNQortDwpDd8dKSb9MdHU1PSb9ZcuXYr4+HgAgI2NjVgeHR0t7meVKlXKNLHI7boKQ8+ePbFz5065sjt37uDOnTto27ZtsUosJk+ejAEDBgAAKlasKJbn5LhGlN+YWBQzixcvFpOKihUrYurUqahcuTKSk5Nx8+ZN7N27F0pKvMMtPXV1dTRq1EjqMPJNcnIy1NXV+T0XMmdnZ6lDIAklJiZCW1tb6jDyTW6PiXlJpova8ffly5dySYWPjw/q1q2Lx48f4/jx49DX15cwutyrWrUqqlatKnUYRADYx6LYuXjxovh67NixGDBgAFq0aIHvv/8ev/32G86cOYMDBw5kWC4pKQlz585FvXr1oKenB3V1dVStWhWenp54/vy5XN2goCDxfk1LS0tERETg+++/h46ODoyNjTFy5EgkJyfj7du3GDlyJMqXLw9NTU00adIEly9flmvr8uXL+OWXX1CjRg0YGRlBVVUVurq6qFWrFqZPn453797J1f/6Hu6zZ8+iRYsW0NbWhr6+Prp3745nz56J9WUyGfr16ye+P3nyZIb+FN/qYxEaGgp3d3dUrlwZGhoa0NPTQ40aNTB+/Phvfh9f2k7/q9CmTZvkPj/g8x+yIUOGoH79+jA1NYWGhgY0NTVhbW2NgQMH4v79+9l+B5GRkejSpQsMDQ2hpaWFhIQEAMCDBw/QrVs36OvrQ09PDx06dEB4eHi29/u+evUKU6dOhaOjI3R0dKCpqYnq1avD29tb7vv4ct9uelZWVjm+jzi33/3XLly4gJYtW0JHRweGhob4+eef8fDhw2yXUWTdzZo1g5WVldzy6feXL7e4ZXcf+/379zF06FBYW1tDQ0MDOjo6cHR0xLRp0/DmzRu5urndx4GM+8O35Oa7y03sWTly5Ag0NTUhk8mgrKyMdevWifMuXryIHj16wMLCAmpqajA0NISrqyv27t2boZ2v99v169fD0dERGhoaqFChAry8vJCampqjmL5ua926dahZsyY0NDRgbm6OCRMmiFegvkj/HR85cgTTp09H5cqVoaKigrVr14r1Tp06ha5du6JChQriNjVu3Bjr1q1DWlpatnFt3rxZ3Kas4li2bBnatWsHKysr6OnpQVVVFcbGxmjTpg12796dbftpaWlYuHAhbG1toa6ujipVqmDOnDkZ4sptv4fMjimWlpZo3ry5WOfBgweZtpvdum7duoUBAwbIHX8bNmyIjRs3QhAEubqPHz/G4MGDUblyZairq0NTUxMWFhZo1aoVpk+f/s1t+EJbWxuqqqri+65du6Jly5bo06cPNm7ciB9++CHHbeXE06dPMX78eNjb20NLSwuampqws7PDmDFj8OTJkwz1c3tcz6yPRU6Pa9k5deoUGjZsCE1NTZQvXx5Dhw7F69evs/w+c7vfpv//duzYMcydOxdVq1bNdr+lYkCgYuXnn38WAAgABFtbW2H79u1CXFxctss8f/5ccHBwEJf7ejIzMxPu378v1j9x4oQ4z8DAQDA2Ns6wTJcuXYT69etnKC9XrpyQkJAgtrVy5cos1wtAqFOnjvDx40ex/vTp08V5VlZWgoqKSoZl2rRpI9bPru0vu3dUVFSGsi/WrFmT6ToACPr6+t/8Pr5u++upUqVKgiAIwq1bt7KtZ2hoKNy7dy/T70BfX18wMjKSq//69WshNjZWqFChQqZtWVlZie83bNggtnvnzh3B3Nw8yzgcHByEly9fCoIgCH379s025vTtZiYv3729vb2grq6eYRlzc3Ph6dOn4jLpY5w+fbpC627atGm2dU+cOCEIgiBUqlQpQ5kgCEJQUJCgo6OT5fJWVlbCo0ePMt3OnOzjX+8PX/ap7OT0u8tL7H379hUEQRCOHj0qaGhoCAAEFRUVYdu2bWL95cuXC0pKSlm27+XlJRd3+u+iatWqmS7j4+Pzze3/ui17e/tM22rbtq2QlpYmLpP+O/56/QsXLhQEQRDmzZsnyGSyLLepffv2cvt1+jicnJxyFEdmx9bMYsks7qzWMWjQILll0s+LiorKtK30+3n67fiy/6Svm9n0pd2s1rV7925x38ls6tWrl/i5pKSkCFWqVMmyrrq6eo72iy+6desmLlu7dm0hPj4+V8t/LbPPRxAEITw8PNO/oV+mcuXKCdeuXRPrK3Jcz+w4mNPjWlb+++8/QVVVNcNytWvXzvL7zMt+m9X/0a/3Wyr6mFgUM3///Xem//nMzMyEn3/+WQgMDJT7AyUIgvDTTz+J9WrVqiX4+fkJ//77r9C1a1exvHHjxmL99CcxX5bZvXu34O3tLVeuoaEhLFq0SNi5c6fcgXPVqlViW2fPnhX++usvYffu3cJ///0nnDhxQtixY4dQt25dsf4///wj1k9/4gJAaNWqlbB3794M5bdv3xYEQRBOnz4tTJo0SS7W06dPi5MgZJ1Y3Lx5U1BWVpZbdtOmTcKhQ4eERYsWCS4uLt/8Pt6/fy+cPn1a6Nevn9hOu3btxPVfunRJEARBiIuLE37//XfB399fOHTokBAUFCTs27dP+OWXX8Tlhg0bluV3YGBgICxatEg4cuSIsHjxYiEpKUno37+/OF9PT09Yvny5EBgYKDRu3Fhu2fR/gNIf+Js3by7s3r1b2Ldvn9wfod69ewuCIAiRkZHC6dOn5drasWOHuG3pT/Azk9fvvlOnTsL+/fuFpUuXyp38DhgwQFwmq8QiN+sOCwsTduzYIbfu9PvQmzdvBEHI/IQrOTlZ7iSgXr16wq5du4TNmzcLZmZmYnn79u2z3M5v7eNf7w85SSxy8t3lNfa+ffsKx44dEzQ1NQUAgpqamrB7926x7o0bN8SkQklJSZg8ebJw5MgRYfXq1YKhoaHYzrFjx8Rlvj4ZGjlypHDgwAHhxx9/FMtMTEy+uf1ftyWTyYTffvtNOHjwoODp6Sm3ji1btojLfH2i3K9fP2H//v3CP//8I5w6dUoIDQ2VSyp69+4tHDhwQJg9e7agpqYmls+dOzdPcaxYsULw9fUV9u/fLwQFBQlHjx4Vli1bJibb+vr6cslL+rjV1NSEWbNmCQcPHsyQYJ45c0ZcJn25oonFpUuXhCVLlsh9N+n/77x//z7LdT179kzu//WQIUOEQ4cOCX///bdcDL6+vuK6vpTVrFlT2L17t3D06FFh06ZNwq+//ipUr149R/uFIAjC2LFj5WICIHz33XdyyUX6v5uvX7/+ZptZJRbpT8SrVq0q+Pn5CTt27JA7kXZwcBBSU1MFQRAUOq5ndhzM6XEtM6mpqYK1tbW4nJ2dnbBjxw7h77//ljs2fL3vFMZ+S0UfE4tiaPjw4dn+Yubm5iYmF69fv5Y7ed62bZt4YDlx4oTcLxJfTmS+PqkNDw8XBEEQ0tLSBG1tbbH8t99+k4vpS7mnp6dY/vHjR2Hp0qVCw4YNBUNDw0x/vUxfP/2JS7ly5YSkpCRxnp2dnThv7969YvmGDRvE8qZNm2b4vLJKLMaNGyeWmZubC+/evcvyM7906ZLcQfn06dNCZGRkpnF/+SX3a/v37xe+//57wcTEJNNfqWvXri3W/fo7SL+9gvD5wK+npyfOX7BggTjv+fPncr8CfvkDdP36dbFMVVVVOHz4sLgtAQEBcvPevn0rtpfVH5Fvyct3X6FCBeHDhw/ivPnz54vzDAwMxD/CWSUWuV13dle1vsjshCswMFDuD+OTJ0/E+vv37xfnyWQyMRFTZB9XVHbfXV5jr1mzpqClpSUAEDQ1NYVDhw7JtZ/+5M3V1VXu/076k6eff/5ZXCb9yVn6hCYuLk5uW9JfFc1K+ra6desmN69Dhw7ivE6dOonl6b/jLl26ZGhzzJgx4vwaNWrIzUt/PLG3t89THDExMcKwYcMEW1tbMXH7egoLC8s07vHjx8uto0aNGuK8UaNGieVZ7Ru5SSwEIWdJb2brWrp0qVjm4OAgt39MnjxZnPfdd98JgvA5Wf5S1rJlS+HmzZtCSkpKpuvLzsKFC8V2GjduLLdd6ZOLL/8XzczMctRuZp/PtWvX5LY9JCRErH/jxg25eRcvXlTouC4IWR8Hc3Jcy8zly5fllrty5Yo4L/2x4et9pzD2Wyr62Hm7GFq2bBlGjhyJHTt24PTp07hw4YI4WgcABAYGwt/fHz///DMiIyPl7knu2bNnlu3euHEDtra2cmUGBgaoVq0agM/3Z5YpUwaJiYkAABcXF7FeuXLlxNevXr0SX/fv3x9///13ttvz+vXrTMtdXFygqakpvi9btmym61BUeHi4+LpNmzbZdsz88ccf8eDBA7myvn375ni88vXr18PDwyPbOll9Durq6ujQoYNc2bNnz8R+FgDQsGFD8XW5cuVgZ2eH0NBQuWXSb+/Hjx/Rpk2bTNf38eNHREREoE6dOtnG+y15+e7r168PNTU18X36zp9v3rzBixcvYGxsXCDrzo3bt2+Lr6tUqSI34k36mAVBQERERIaYC3ofz05eYw8LCxNfr127NsP+lH5/+++///Dff/9lGseNGzcyLW/ZsqX4Ov3nAnz+bHR1dTNdLjNfdx5u1KgR9u/fD+DzSECZ6dq1a4ay9J9ZZm3Onz8fABAZGQlBEDL0c8lJHHFxcXB2ds7Qz+ZrWe2/X6+jYcOGuH79utw6ioL0+8eNGzfQuHHjTOt92T+sra3h6uqK//77D8eOHUP16tWhrKyMKlWqwMXFBUOGDMF33333zfXOnj0bAKCiooJ//vkHWlpaaN68Oa5cuYLz58+jbdu2GDZsmPhdt2/fXuFtTL+/aGpqonbt2uL76tWrw8DAQOzHdPv2bVhYWOT6uF4Q0u8nWlpacHJyEt9n1RG/tOy39G3svF1M2draYsqUKTh8+DBevnyJQ4cOwdDQUJx/4cKFXLeZWWfar0fHSD8SUVZDVwr/63D3+PFjuZO70aNH48iRIzh9+jT69OkjlmfVOatMmTJy71VU/j8P/rKO4uLLHzMAaNu2Lfbu3YvTp09j4cKFYnlWn0P58uUznKB8631efatj9bfk9bsvruvOreK8jysrK4uvvb29ERsbq1A7We1r6T+b9J8LUDifjVTDoq5fv148OStfvjx8fX1x8uRJnD59Wu4HnKKw/xaGL/uHTCbDvn37sGrVKvzwww+wtbWFkpISIiMjsWnTpkwHD/nas2fP8PTpUwCf/7aZmJhAT08Phw4dEofQDQ4OFodrl8lkGDVqVAFunbyCPq4rGkdOcL+lL5hYFDMnTpzIMFKLsrIy2rRpg/r164tlX/7z2tjYyJ0AREREQPh8C5zc9O7dO/Tt2zdfY00/gk/ZsmWxcOFCtGrVCo0aNcLjx4/zbT3pk53cHLTs7e3F10eOHBGvxHyR/uQlOjo6w2eW/mrFt2KIiYkRX8+bNw8dO3ZEo0aNcnQCn9lB3sjISC7pO3/+vPj6xYsXcr+UffHlyhPw+dezN2/eZLkvNG3aNNP15/Tzzet3f/HiRXz8+FF8f/bsWfG1vr6+3B+q/Fj310P35nQ77ezsxNf37t1DXFxcpjHLZLIMVwMLQ3bfXV5j/+mnn+Dq6goAuHv3Llq2bCk3wlz6/a1Hjx6Z7muCIGR5xSI/pd+er99bW1tnukxm/+/Sf2bZtWljY5Pp8jmJI/2x4pdffkH//v3RpEkTVKxYES9fvsw01uzWce7cuQzryE+KHn/T7x8NGjTIcv/4cowUBAEaGhoYPHgwdu3ahdu3byMxMVE88f/48SMCAgKyXaeWlpYY78uXL8XPxsjICEePHoW5ublc/cmTJ8PBwSHH2/S19PtLcnIyrl69Kr4PDw+X+1tuZ2en0HE9O4oe19IPXZuUlCReOQCQ5TOhitt+SwWHt0IVM76+vti1axc6dOiA5s2bo0qVKpDJZDhz5gyOHj0q1vtym5KBgQG6dOmCHTt2APh8WXf8+PGwtrbGmzdv8ODBA5w6dQq3b9/O9UHrWypXriy+fvnyJf788084OzsjICAAx44dy7f1pL9NIiwsDLt27YKxsTEMDAyy/aPg7u6OhQsXIjU1FQ8fPkTTpk3x66+/onz58oiMjMT27dtx5syZXMdw+vRpHDhwQPxFzNraGpUrV8atW7cAAH/88Qc8PDwQEhKCP//8U6FtVlJSwo8//ghfX18AwLRp06CmpoYKFSpg3rx5GYavBD6PQ1+3bl1cunQJycnJaNGiBUaNGgULCws8f/4cUVFROH78ONLS0uRuWylbtixevHgBAFi1ahU6dOgAJSUl1KtXT+52pfTy+t0/fvwYP/30EwYMGCD3AC7g821p2T3DQ5F1lylTBjKZTEwmFy5ciHr16kFJSUnudoSvtW7dGhUqVMCTJ0+QkpKCH374ARMmTMC7d+/g5eUl1mvXrl22t259S1BQkDisZ6VKlXI0PCiQ/XeX19jV1NSwe/dutGzZEhcvXsStW7fQqlUrnDhxAoaGhnB3d8eiRYuQlpYGPz8/6OrqokOHDlBXV8ejR48QHh6OvXv3YtKkSRkeapnfAgIC4OXlhaZNm+L48ePi7UfA5wQpp/r06YNFixZBEASEhYWhX79++Omnn3Djxg0sWbJErJfV9uQkjvT7b0BAAFxcXJCWloYZM2bk6ErN4sWLUaZMGdSsWRM7duyQu2UtN9uaU+mPfU+ePMHmzZtRuXJlaGpqZns7Zffu3TFp0iS8e/cO586dw48//oiePXtCX18fjx8/RkREBA4ePIjOnTtj+vTpePr0KRo2bIiuXbuiRo0aMDU1RVJSktxVisyOe+np6OigTZs2+PfffwEAnTp1wuTJk1G9enWEh4dnOPHObCjY3KhZsyZq166NK1euAPicYM+YMQPKyspyxzQHBwfUqVNHoeN6dhQ9rjk5OaFq1ariLUg9e/aEt7c33r9/jwkTJmS6THHbb6kAFVz3DSoIvXr1yrRDVPqpSZMmwqdPn8Rlnj17lu1ws/iq0112nfGy6tiXVefl9MPjfpmUlZXlRrhIXz+7TtBZdR589eqV2Ik0/dSyZUtBELLvwLZy5Uq5zu3pp5wMN/tFeHh4ph2EPTw8BEEQhFWrVmW6jmbNmuX6O/giq2EJ9fX1BUtLy0w/q8jIyGyHmwUydoDv0aNHpvUePnyY7WeSl+++SpUqmXZwNzMzkxteOatOi7ldtyAIgouLS6bLfJGX4WbTf1aK7OO5HRXqi299d/kR+4sXL4Rq1aqJ5fXq1RM7Vy9btizb4Wa/3s6stl8Qcj+IQE6GeW3VqpU4EIAgZP0dp5eT4WbTdypOH0f6zvlZxREbGys3ataXyd7eXm70vfTxpY87q3V8ORZ96/PMbeftT58+ZXpMqVKlyjfXtWvXrmyHm03//zo2NjbbeioqKsKFCxe+sVcIQnR0tNzxMbMp/fc7Z86cb7aZ3edz8+bNbIebLVu2bI6Gm83uuJ7VcVAQvn1cy0pWw83WqlUr0+8zr/ttVv9Hv95vqejjrVDFjLe3NxYvXozOnTujWrVqKFOmDJSVlWFgYAAXFxf89ddfOHLkiNztT0ZGRrh48SLmz5+P7777Dvr6+lBVVUWFChXw3XffYfLkyXJPIc1P69atw+jRo2Fubg5NTU3Ur18fBw8eRIsWLfJtHYaGhti1axecnZ2hrq6eq2WHDBmCCxcuoHfv3rC0tISamhp0dHTg4OCAgQMH5ridatWqYfPmzahevbrcg5e+GDx4MFauXAk7OztoaGigatWqWLRoEaZNm5areNMzMTHB2bNn0bVrV+jq6oq/xp05c0au/0v6TulVq1ZFWFgYpk2bBicnJ+jo6EBdXR0VK1ZEkyZN8Oeff2LVqlVy61m8eDG6d+8u/vqVU3n57hs1aoSjR4+icePG0NLSgr6+Pn766SecPXsW5cuXL5B1//3332jfvn2uOgUDQNOmTREaGio+uEtNTQ2ampqoUaMGpkyZgitXrmS4xaKwfOu7y4/Yy5YtiyNHjqBixYoAPt/G9v333yMpKQnDhw/H+fPn0atXL1SsWBFqamrQ09ODra0tunXrhs2bN6NLly4Fsu3pjRo1Cps2bULNmjWhrq6OChUqYNy4cQgMDMz1E+zHjRuHEydOoEuXLjAxMYGKigr09fXRsGFDrF69Gvv27cv0GAAAEyZMwMqVK2Fvby/+Ev11HCYmJggKCoKrqyv09PRQtmxZ/PLLLzhx4oRcR/+sLF26FH/++SeqVKkCNTU1WFlZYdasWVi9enWutjOnlJWVsXv3bjRp0gRaWlq5WvaHH37A1atXMWjQIPEBjdra2rC2tkaHDh2watUqDBs2DMDnq+8zZ85E69atUbFiRWhqakJFRQUVKlRAly5dcPr0adSrV++b66xUqRJCQ0MxZcoUODg4QENDA2pqaqhcuTL69OmDoKAgrFixQqw/ceLEbz6UMDv29vYICwvD2LFjxeO/hoYGbGxs8OuvvyIsLAw1a9YU6ytyXM+Oose1li1b4ujRo2jQoAHU1dVhZGSEAQMGZDhX+BJHXvfb+fPnY8GCBahatWqh7LdUcGSCUMR7CBJRloRMRp559uwZLC0tkZycDODzk8UdHR2lCI9IMs2aNcPJkycBABs2bCjw262I8ktROK5nFgMA7Nu3D506dQLw+Var58+f5zo5/8LS0lIcbfHEiRNo1qyZwvFS0cErFkTFWMuWLbFmzRpcvXoVDx8+xH///Qc3Nzfxj4+jo6Pcr2FERFS0FYXjemRkJNq2bYudO3ciIiIC9+7dw7Zt2zBkyBCxzi+//KJwUkElFztvExVj4eHhGDx4cKbzjI2NsWXLFsmGLCQiotwrCsd1QRBw+PBhHD58ONP59evXxx9//FGgMVDxxFSTqBgbOnQo6tevj3LlykFFRQW6urqoXbs2pkyZgps3b+ZpqEQiIip8ReG4bmxsjIEDB6J69erQ19eHiooKypYti2bNmmHlypU4ffp0rvttUOnAPhZERERERJRnvGJBRERERER5xsSCiIiIiIjyrNR13v706ROuXr2K8uXLczQDIiIiIsp3aWlpePr0KZycnKCiUnpOt0vPlv7P1atXc/QQHSIiIiKivLh48SLq1q0rdRiFptQlFl+e2nvx4kWYmppKHA0RERERlTSxsbGoV6+eeN5ZWpS6xOLL7U+mpqYwNzeXOBoiIiIiKqlK2233pWtriYiIiIioQDCxICIiIiKiPGNiQUREREREeVbq+ljkVGpqKj5+/Ch1GJQLqqqqUFZWljoMIiKiXON5R/HCc47MMbH4iiAIiIuLw5s3b6QOhRRgYGAAExMTyGQyqUMhIiL6Jp53FF8858iIicVXvvznNjY2hpaWFneWYkIQBCQlJeHZs2cAwKGEiYioWOB5R/HDc46sMbFIJzU1VfzPXbZsWanDoVzS1NQEADx79gzGxsa8RElEREUazzuKL55zZI6dt9P5cm+jlpaWxJGQor58d7xPlYiIijqedxRvPOfIiIlFJngZsvjid0dERMUN/3YVT/zeMmJiQUREREREecbEohTp3bs3Zs2aJXUYAD5n+Xv27Mlx/UOHDqFWrVpIS0sruKCIiIioyLC0tMSiRYvy3E50dDRkMhlCQ0MBAEFBQZDJZHIjce3ZswfW1tZQVlbG6NGjsyyj7LHzdg619gks1PUd8XLLVX13d3ds2rQJwOexlStWrIg+ffpg0qRJUFFRwbVr13Dw4EGsXLkyw7J+fn745ZdfMGTIECxfvjxf4v/C29sbe/bsEf8zfxEbGwtDQ8Mct9O2bVtMnToVW7duRe/evfM1RiIiIsodd3d3vHnzJlc/EubWpUuXoK2tLb6XyWTYvXs3OnfunKd2GzRogNjYWOjr64tlgwcPRr9+/TBq1Cjo6upmWUbZ4xWLEqRt27aIjY3FnTt3MHbsWHh7e2PevHkAgKVLl6Jbt27Q0dHJsJyvry9+++03+Pn54f3794USq4mJCdTV1XO1jLu7O5YsWVJAERERERVfd+7cgZeXF3r06AEvLy/cuXNH6pDyzMjIqEA6tqupqck9f+Ldu3d49uwZ2rRpgwoVKkBXVzfTMvo2JhYliLq6OkxMTFCpUiUMHToUrq6u2Lt3L1JTUxEQEICOHTtmWCYqKgrnzp3DxIkTYWNjg127dsnN9/b2Rq1ateTKFi1aBEtLS/F9UFAQ6tWrB21tbRgYGKBhw4Z48OABNm7ciBkzZuDatWuQyWSQyWTYuHEjAPlbob5coty1axeaN28OLS0tODo6Ijg4WG69HTt2xOXLl3Hv3r0SeQAlIiJSxIYNG2BnZ4d58+bhn3/+wbx582BnZyf+zZXCyZMnUa9ePairq8PU1BQTJ07Ep0+fxPlv375Fr169oK2tDVNTUyxcuBDNmjWTu+Uo/a1QX847fvjhB8hkMrnzkK9dvHgRTk5O0NDQgLOzM65evSo3P/2tUEFBQWLS0KJFC8hksizL6NuYWJRgmpqaSElJQVhYGOLj4+Hs7JyhzoYNG/D9999DX18fv/zyC3x9fXO1jk+fPqFz585o2rQpwsLCEBwcjEGDBkEmk6F79+4YO3YsqlevjtjYWMTGxqJ79+5ZtjV58mSMGzcOoaGhsLGxQY8ePeQOQhUrVkT58uUxa9asIncAJSIiyk+JiYlZTunvLrhz5w4GDBiAtLQ0pKamyv3r4eGBGzdu5Kjd/PT48WO0b98edevWxbVr17By5Ur4+vrijz/+EOt4enri7Nmz2Lt3L44ePYrTp0/jypUrWbZ56dIlAJ/PW2JjY8X3X3v37h06dOgAe3t7hISEwNvbG+PGjcuy3QYNGiAiIgIAsHPnTsTGxmZZRt/GPhYlkCAIOHbsGA4fPoyRI0fiwYMHUFZWhrGxsVy9tLQ0bNy4EUuXLgUA/Pzzzxg7diyioqJgZWWVo3UlJCQgPj4eHTp0QJUqVQAA1apVE+fr6OhARUUFJiYm32xr3Lhx+P777wEAM2bMQPXq1XH37l3Y2dmJdcqUKYMNGzZAEIQMy3t4eKBu3bpQUmK+TERExVtmty5/0b59exw4cAAAsH79+iwHNklLS0P79u0RExMjlllaWuLFixcZ6mb2d1VRK1asgIWFBZYtWwaZTAY7Ozs8efIEEyZMwLRp05CYmIhNmzZh27ZtaNmyJYDPCUOFChWybNPIyAgAYGBgkO05xbZt25CWlgZfX19oaGigevXqePToEYYOHZppfTU1NfH8qEyZMmLbmZXRt/EMrATZv38/dHR0oKGhgXbt2qF79+7w9vZGcnIy1NXVM4y3fPToUSQmJqJ9+/YAgHLlyqFVq1ZYv359jtdZpkwZuLu7o02bNujYsSMWL16M2NhYheKvWbOm+NrU1BTA5ydappeQkJDl8ulvtSIiIioNoqOjs51fWH0n07t16xZcXFzkzjsaNmyId+/e4dGjR7h//z4+fvyIevXqifP19fVha2ubL+uuWbMmNDQ0xDIXF5c8t0s5w8SiBGnevDlCQ0Nx584dJCcnY9OmTdDW1ka5cuWQlJSElJQUufq+vr549eoVNDU1oaKiAhUVFRw8eBCbNm0Sf/1QUlLK8CvG10+Y3LBhA4KDg9GgQQP4+/vDxsYG58+fz3X8qqqq4usvB6Ovf4XJLrFITU3F5cuXc71eIiKioubdu3dZTjt37hTrWVpaQllZOdM2lJWV0bdvX7my6OjoTNskyg9MLEoQbW1tWFtbo2LFilBR+f+73L50vg4PDxfLXr58icDAQGzfvh2hoaHidPXqVbx+/RpHjhwB8PnSY1xcnFxy8fXQsQDg5OQELy8vnDt3Dg4ODti2bRuAz5cYU1NT82X73r9/j8TExGyfdBkUFIShQ4dmuKeUiIioONHW1s5ySv9rfP/+/bO8jUkQBAwePDhH7eanatWqITg4WC6us2fPQldXF+bm5qhcuTJUVVXl+knEx8cjMjIy23ZVVVW/eU5RrVo1hIWFyV2pUeTHzsLk4+ODunXrQldXF8bGxujcubPYxyM7b968wfDhw2Fqagp1dXXY2Njg4MGDhRBx1phYlAJGRkaoXbs2zpw5I5b9/fffKFu2LH766Sc4ODiIk6OjI9q3by924m7WrBmeP3+OuXPn4t69e1i+fDn+/fdfsZ2oqCh4eXkhODgYDx48wJEjR3Dnzh2xn4WlpSWioqIQGhqKFy9e4MOHDwpvx/nz5+UOpl+TyWRQVlYWR6I4efKkwusiIiIqDqpWrQpfX18oKSlBWVlZ7l9fX19YW1sX2Lrj4+PlfpwMDQ3Fw4cPMWzYMDx8+BAjR47E7du3ERgYiOnTp8PT0xNKSkrQ1dVF3759MX78eJw4cQI3b96Eh4cHlJSUsv3x0NLSEseOHUNcXBxev36daZ2ePXtCJpNh4MCBCA8Px8GDBzF//vyC+gjyxcmTJzF8+HCcP38eR48excePH9G6detsO9WnpKSgVatWiI6ORkBAACIiIrB27VqYmZkVYuQZMbEoJQYMGICtW7eK79evXy8O2fa1rl27Yu/evXjx4gWqVauGFStWYPny5XB0dMTFixflRlfQ0tLC7du30bVrV9jY2GDQoEEYPny4+AtJ165d0bZtWzRv3hxGRkbw8/NTeBv8/PzQu3fvLA+g69evR2hoKNq0aQM7Ozs0bNhQXDY/O6UREREVJe7u7oiIiMD48ePx008/Yfz48YiIiIC7u3uBrjcoKAhOTk5y04wZM2BmZoaDBw/i4sWLcHR0xJAhQ+Dh4YEpU6aIyy5YsAAuLi7o0KEDXF1d0bBhQ1SrVi3bHxD/+usvHD16FBYWFnBycsq0jo6ODvbt24fr16/DyckJkydPxpw5c/J92/PToUOH4O7ujurVq8PR0REbN25ETEwMQkJCslxm/fr1ePXqFfbs2YOGDRvC0tISTZs2haOjYyFGnpFMKGVnXI8ePYKFhQUePnwIc3NzuXnv378XR0TKbscujpKTk2Frawt/f/9i2YnpxYsXsLW1xeXLl2FlZYW7d+/C19cX0dHRsLS0hIeHB6ytrcXv0MjICOXKlQPw+Xt1cXFBr169MGLEiBL33RIRUfFUks87cisxMRFmZmb466+/4OHhIXU4OZLd95fd+ea33L17F1WrVsX169fh4OCQaZ327dujTJky0NLSQmBgIIyMjNCzZ09MmDAhyz43hYHDzZYSmpqa2Lx5c6ZDzBUH0dHRWLFihTgMrrW1NXx8fLKsn36Yvi1btoiXaJcuXYqZM2eiV69ekv7HIyIiKs2uXr2K27dvo169eoiPj8fvv/8OAHBzc5M4svz19u1buYFn1NXVoa6unmX9tLQ0jB49Gg0bNswyqQCA+/fv4/jx4+jVqxcOHjyIu3fvYtiwYfj48SOmT5+er9uQG7wVqhRp1qxZpk/fLg6cnZ2zfbhedvr164cNGzbA3NwcMTEx6Nu3L2rXro1///2Xt0gRERFJZP78+XB0dISrqysSExNx+vRp8W6DksLe3h76+vrilN2PogAwfPhw3LhxA9u3b8+2XlpaGoyNjbFmzRrUqVMH3bt3x+TJk7Fq1ar8DD/XeMWCSjxlZWW4u7uje/fuWLp0KWbNmoWwsDC0b98eLVq0wL59+6ClpSV1mERERKWGk5NTtn0ISorw8HC5DtXZXa0YMWIE9u/fj1OnTn3z9ilTU1OoqqrK3X1RrVo1xMXFISUlBWpqankPXgG8YkGlhqamJn777Tfcv38fY8eOhZqaGrS0tJhUEBERUYHQ1dWFnp6eOGWWWAiCgBEjRmD37t04fvy4eNt3dho2bIi7d+/KPe8rMjISpqamkiUVABMLKoXKlCmD+fPnIzIyEosXLxbLnzx5gtGjR2d42jcRERFRQRk+fDi2bNmCbdu2QVdXF3FxcYiLi0NycrJYp0+fPvDy8hLfDx06FK9evcKvv/6KyMhIHDhwALNmzcLw4cOl2AQREwsqtSpVqoTKlSuL7729vbF48WJUqVIFv//+O59ESkREhSL9r85UfOTX97Zy5UrEx8ejWbNmMDU1FSd/f3+xTkxMDGJjY8X3FhYWOHz4MC5duoSaNWti1KhR+PXXXzFx4sR8iUlR7GNB9D89evTA1atXcfnyZUyfPh0rVqyAt7c3PDw8oKqqKnV4RERUwqipqUFJSQlPnjyBkZER1NTUsn1AHBUNgiAgJSUFz58/h5KSUp5vPcrJQDJBQUEZylxcXIrcU8WZWBD9T/PmzXHhwgXs2LEDkyZNwv379zF06FAsXLgQc+fOLXFD4BERkbSUlJRgZWWF2NhYPHnyROpwKJe0tLRQsWJFKCnxBqAvmFgQpaOkpITu3bvjhx9+wOrVq/H7778jMjISp0+fZmJBRET5Tk1NDRUrVsSnT5+QmpoqdTiUQ8rKylBRUeEVpq8wsShFevfujWrVqmHSpEn53ra3tzf27NmD0NBQAIC7uzvevHmDPXv25LntlJQU2NjYICAgAM7OznluLyfU1NQwcuRI9O3bFwsXLsTIkSPFeTdv3oRMJoO9vX2hxEJERCWbTCaDqqoqb7ulYo+JRQ49W+JaqOszHvVfruq7u7tj06ZNAABVVVVUrFgRffr0waRJk6CiooJr167h4MGDWLlypbhMs2bNcPLkSQCfT6TLlSuH2rVro1+/fujSpUv+bUweqampYdy4cZgwYQKOHTtWqOvW09OTe4KlIAgYPnw4Tp8+jX79+mHGjBly41MTERERlVa8KawEadu2LWJjY3Hnzh2MHTsW3t7emDdvHgBg6dKl6NatG3R0dOSWGThwIGJjY3Hv3j3s3LkT9vb2+PnnnzFo0CApNiFLvXr1wpkzZ3Dz5k1J40hKSkLZsmWRlpYGX19fVK1aFZMmTcKbN28kjYuIiIhIakwsShB1dXWYmJigUqVKGDp0KFxdXbF3716kpqYiICAAHTt2zLCMlpYWTExMYG5uju+++w5z5szB6tWrsXbtWvz33/9fNZkwYQJsbGygpaWFypUrY+rUqfj48WOOYzt06BAaNWoEAwMDlC1bFh06dMC9e/fE+SkpKRgxYgRMTU2hoaGBSpUqyT323tDQEA0bNvzmI+4Lmra2Nnbu3Ilz586hUaNGSE5Oho+PD6pUqYKFCxfiw4cPksZHREREJBUmFiWYpqYmUlJSEBYWhvj4+Bz3T+jbty8MDQ2xa9cusUxXVxcbN25EeHg4Fi9ejLVr12LhwoU5jiUxMRGenp64fPkyjh07BiUlJfzwww/iGNBLlizB3r178c8//yAiIgJbt26FpaWlXBv16tXD6dOnc7zOguTi4oJTp05h7969sLe3x6tXr+Dp6Sl54kNEREQkFfaxKIEEQcCxY8dw+PBhjBw5Eg8ePICysjKMjY1ztLySkhJsbGwQHR0tlk2ZMkV8bWlpiXHjxmH79u347bffctRm165d5d6vX78eRkZGCA8Ph4ODA2JiYlC1alU0atQIMpkMlSpVytBGhQoV8ODBgxytrzDIZDJ07NgR7dq1w6ZNm7B9+3b06tVLnP/8+XMYGRlJGCERERFR4eEVixJk//790NHRgYaGBtq1a4fu3bvD29sbycnJUFdXz9WQaIIgyNX39/dHw4YNYWJiAh0dHUyZMgUxMTE5bu/OnTvo0aMHKleuDD09PfFqxJc23N3dERoaCltbW4waNQpHjhzJ0IampiaSkpJyvM7CoqKiAg8PDxw9ehQqKp9z9Q8fPqBu3bpo1aoVrly5InGERERERAWPiUUJ0rx5c4SGhuLOnTtITk7Gpk2boK2tjXLlyiEpKQkpKSk5aic1NRV37tyBlZUVACA4OBi9evVC+/btsX//fly9ehWTJ0/OcXsA0LFjR7x69Qpr167FhQsXcOHCBQAQ26hduzaioqIwc+ZMJCcn46effsKPP/4o18arV6+KzRWA8+fP48mTJ/jvv/9Qp04d9OzZE1FRUVKHRURERFRgmFiUINra2rC2tkbFihXFX84BoFatWgCA8PDwHLWzadMmvH79Wrx96dy5c6hUqRImT54MZ2dnVK1aNVe3JL18+RIRERGYMmUKWrZsiWrVquH169cZ6unp6aF79+5Yu3Yt/P39sXPnTrx69Uqcf+PGDTg5OeV4vVJq2rQpIiIixFuj/Pz8YGtri9GjR+P58+cSR0dERESU/5hYlAJGRkaoXbs2zpw5k2FeUlIS4uLi8OjRI5w/fx4TJkzAkCFDMHToUDRv3hwAULVqVcTExGD79u24d+8elixZgt27d+d4/YaGhihbtizWrFmDu3fv4vjx4/D09JSrs2DBAvj5+eH27duIjIzEjh07YGJiAgMDA7HO6dOn0bp1a8U+BAlYWVlhy5YtuHLlClq3bo2PHz9i8eLFsLa2xuPHj6UOj4iIiChfMbEoJQYMGICtW7dmKF+7di1MTU1RpUoVdOnSBeHh4fD398eKFSvEOp06dcKYMWMwYsQI1KpVC+fOncPUqVNzvG4lJSVs374dISEhcHBwwJgxY8Tna3yhq6uLuXPnwtnZGXXr1kV0dDQOHjwIJaXPu2hwcDDi4+Mz3B5VHDg5OeHw4cM4evQonJyc0LhxYz5Uj4iIiEocmSAIgtRBFKZHjx7BwsICDx8+hLm5udy89+/fIyoqClZWVtDQ0JAowoKRnJwMW1tb+Pv7w8XFRepwcq179+5wdHTEpEmTsq1X1L/DtLQ0xMfHw9DQEAAQGxuLTp06YfLkyXBzc8tVB3siIiIqmrI73yzJeMWilNDU1MTmzZvx4sULqUPJtZSUFNSoUQNjxoyROpQ8U1JSEpMKAJg7dy4uX76MH374AY0bN8bZs2cljI6IiIhIcUwsSpFmzZpl+vTtok5NTQ1TpkyBpqam1KHkO29vb0yaNAmampo4e/YsGjVqhM6dO+PWrVtSh0ZERESUK0wsiCSkr6+PP//8E3fu3MGAAQOgpKSEwMBAODg4YNSoUVKHR0RERJRjTCyIigAzMzOsXbsWN27cgJubG9LS0qCqqip1WEREREQ5pvLtKkRUWKpVq4Y9e/bg7NmzsLOzE8tDQkJw5swZDB06FGpqahJGSERERJQ5XrEgKoIaNmyIsmXLAgAEQcC4ceMwevRo2NnZwc/PD2lpaRJHSERERCSPiQVREScIAnr06AETExNERUWhZ8+eqFevHo4dOyZ1aEREREQiJhZERZySkhIGDRqEu3fvYubMmdDV1UVISAhcXV3Rtm1bXLt2TeoQiYiIiIpGYrF8+XJYWlpCQ0MD9evXx8WLF7Osu3HjRshkMrmpKD4IjSi/aWtrY8qUKbh37x5GjRoFVVVVHD58GOfPn5c6NCIiIiLpEwt/f394enpi+vTpuHLlChwdHdGmTRs8e/Ysy2X09PQQGxsrTg8ePCjEiIuv3r17Y9asWVKH8U2rVq0qls/bKCxGRkZYvHgxbt++jTFjxsDDw0Ocd/nyZbx8+VLC6IiIiKi0kgmCIEgZQP369VG3bl0sW7YMAJCWlgYLCwuMHDkSEydOzFB/48aNGD16NN68eaPQ+rJ7xPr79+8RFRUFKyurDFdB2h71Umh9ijrUyidX9d3d3bFp0yYAgKqqKipWrIg+ffpg0qRJUFFRwbVr19CiRQs8ePAAL168gJWVVbbtbdiwAe7u7oqGnycpKSmwsrLC9u3b0bhx41wtm913WNJ9+PABdnZ2eP36NSZOnIhff/21RD5UkIiIqKjL7nyzJJP0ikVKSop4r/gXSkpKcHV1RXBwcJbLvXv3DpUqVYKFhQXc3Nxw8+bNLOt++PABCQkJ4vT27dt83YaipG3btoiNjcWdO3cwduxYeHt7Y968eQCApUuXolu3btDR0YGFhYXcFZ+xY8eievXqcmXdu3eXbDvU1NTQs2dPLFmyRLIYiqMnT55AX18f8fHx8PLyQtWqVbF+/XqkpqZKHRoRERGVApImFi9evEBqairKly8vV16+fHnExcVluoytrS3Wr1+PwMBAbNmyBWlpaWjQoAEePXqUaX0fHx/o6+uLk729fb5vR1Ghrq4OExMTVKpUCUOHDoWrqyv27t2L1NRUBAQEiLcXKSsrw8TERJx0dHSgoqIivjc2NsaiRYtgZWUFTU1NODo6IiAgAMDnEYpcXV3Rpk0bfLnY9erVK5ibm2PatGkAgNTUVHh4eIjL29raYvHixXKxBgUFoV69etDW1oaBgQEaNmwod0tbx44dsXfvXiQnJxfGR1ciWFlZ4cqVK9i8eTMqVqyIx48fw8PDA46Ojti/fz8kvjhJREREJZzkfSxyy8XFBX369EGtWrXQtGlT7Nq1C0ZGRli9enWm9b28vBAfHy9O4eHhhRyxdDQ1NZGSkoKwsDDEx8fD2dk5R8v5+Phg8+bNWLVqFW7evIkxY8bgl19+wcmTJyGTybBp0yZcunRJvKIwZMgQmJmZiYlFWloazM3NsWPHDoSHh2PatGmYNGkS/vnnHwDAp0+f0LlzZzRt2hRhYWEIDg7GoEGDIJPJxBicnZ3x6dMnXLhwIZ8/lZJNSUkJvXv3RkREBP766y8YGhri5s2b6NixI0JCQqQOj4iIiEowSZ+8Xa5cOSgrK+Pp06dy5U+fPoWJiUmO2lBVVYWTkxPu3r2b6Xx1dXWoq6uL7xMSEhQPuJgQBAHHjh3D4cOHMXLkSDx48ADKysowNjb+5rIfPnzArFmz8N9//8HFxQUAULlyZZw5cwarV69G06ZNYWZmhtWrV6NPnz6Ii4vDwYMHcfXqVaiofN6dVFVVMWPGDLFNKysrBAcH459//sFPP/2EhIQExMfHo0OHDqhSpQqAz0+cTk9LSwv6+vrsmK8gDQ0NeHp6on///pg9ezbu3r0rl1jGx8dDX19fwgiJiIiopJH0ioWamhrq1Kkj96CvtLQ0HDt2TDyp/ZbU1FRcv34dpqamBRVmsbF//37o6OhAQ0MD7dq1Q/fu3eHt7Y3k5GSoq6vLXRHIyt27d5GUlIRWrVpBR0dHnDZv3ox79+6J9bp164YffvgBs2fPxvz581G1alW5dpYvX446derAyMgIOjo6WLNmDWJiYgAAZcqUgbu7O9q0aYOOHTti8eLFiI2NzRCLpqYmkpKS8viplG4GBgaYPXs2duzYIZbFxcWJt8tldcshERERUW5JfiuUp6cn1q5di02bNuHWrVsYOnQoEhMT0a9fPwBAnz594OX1/yMy/f777zhy5Aju37+PK1eu4JdffsGDBw8wYMAAqTahyGjevDlCQ0Nx584dJCcnY9OmTdDW1ka5cuWQlJSElJSUb7bx7t07AMCBAwcQGhoqTuHh4WI/CwBISkpCSEgIlJWVcefOHbk2tm/fjnHjxsHDwwNHjhxBaGgo+vXrJ7f+DRs2IDg4GA0aNIC/vz9sbGwyPI/h1atXMDIyystHQv+TPqnctWsX4uPjsWrVKlhbW2P69OklelADIiIiKhySJxbdu3fH/PnzMW3aNNSqVQuhoaE4dOiQ2KE7JiZG7tfs169fY+DAgahWrRrat2+PhIQEnDt3rkR3ys4pbW1tWFtbo2LFiuJtSQBQq1YtAMhR/xJ7e3uoq6sjJiYG1tbWcpOFhYVYb+zYsVBSUsK///6LJUuW4Pjx4+K8s2fPokGDBhg2bBicnJxgbW0td7XjCycnJ3h5eeHcuXNwcHDAtm3bxHn37t3D+/fv4eTkpMhHQdkYNmwYTp48ifr16yMxMRG///47qlSpgmXLluUo+SQiIiLKjKR9LL4YMWIERowYkem8oKAgufcLFy7EwoULCyGqksPIyAi1a9fGmTNnxCQjK7q6uhg3bhzGjBmDtLQ0NGrUCPHx8Th79iz09PTQt29fHDhwAOvXr0dwcDBq166N8ePHo2/fvggLC4OhoSGqVq2KzZs34/Dhw7CyssLff/+NS5cuic/OiIqKwpo1a9CpUydUqFABERERuHPnDvr06SPGcfr0aVSuXFnsg0H5q0mTJggODsauXbvg5eWFO3fuYOTIkVi5ciVCQ0OhqqoqdYhERERUzEh+xYIKx4ABA7B169Yc1Z05cyamTp0KHx8fVKtWDW3btsWBAwdgZWWF58+fw8PDA97e3qhduzYAYMaMGShfvjyGDBkCABg8eDC6dOmC7t27o379+nj58iWGDRsmtq+lpYXbt2+ja9eusLGxwaBBgzB8+HAMHjxYrOPn54eBAwfm4ydAX5PJZOjatStu3ryJFStWoHz58mjdujWTCiIiIlKI5E/eLmyKPnm7uEtOToatrS38/f1z3DFeKjdv3kSLFi0QGRmZ65GLSvJ3WNDevXuH1NRU8TO/fPkypk+fjtmzZ6NGjRoSR0dERFR88MnbVKJpampi8+bNePHihdShfFNsbCw2b97M4VALmY6OjtxnPmXKFBw8eBCOjo5wd3cXR/UiIiIiygwTi1KkWbNm4tO3i7IvT/YmaS1btgw//fQTBEHApk2bYGNjg/Hjx+P169dSh0ZERFRi+Pj4oG7dutDV1YWxsTE6d+6MiIiIHC+/fft2yGQydO7cueCCzCEmFkSUKWtra/j7++PixYto1qwZPnz4gPnz56Ny5cpYs2aN1OERERGVCCdPnsTw4cNx/vx5HD16FB8/fkTr1q2RmJj4zWWjo6Mxbtw4NG7cuBAi/TYmFkSUrbp16+L48eM4cOAAHBwc8ObNG6SlpUkdFhERUYlw6NAhuLu7o3r16nB0dMTGjRsRExODkJCQbJdLTU1Fr169MGPGDFSuXLmQos0eEwsi+iaZTIb27dsjNDQUfn5+8PDwEOcdOnQIBw8eRCkbB4KIiKhAxMfHAwDKlCmTbb3ff/8dxsbGcn+TpVYknmNR1PDX2OKL313BUlZWxs8//yy+T0lJwbBhwxAVFYVmzZph7ty5qFu3roQREhERFR1v375FQkKC+F5dXR3q6upZ1k9LS8Po0aPRsGFDODg4ZFnvzJkz8PX1RWhoaH6Gm2dMLNJRU1ODkpISnjx5AiMjI6ipqUEmk0kdFuWAIAhISUnB8+fPoaSkBDU1NalDKhU+fvyIrl27YunSpQgKCkK9evXQrVs3zJo1C9bW1lKHR0REJCl7e3u599OnT4e3t3eW9YcPH44bN27gzJkzWdZ5+/YtevfujbVr16JcuXL5FWq+4HMsvpKSkoLY2FgkJSVJEB3llZaWFkxNTZlYFLKYmBhMmzYNmzdvhiAIUFFRweDBgzF16lSUL19e6vCIiIgK1ZfzzfDwcJiZmYnl2V2xGDFiBAIDA3Hq1ClYWVll2XZoaCicnJygrKwsln25Y0NJSQkRERGoUqVKPm1J7jCxyIQgCPj06RNSU1MLOTrKC2VlZaioqPAqk4TCwsIwceJE/PvvvwCA48ePo3nz5hJHRUREVLhy84A8QRAwcuRI7N69G0FBQahatWq29d+/f4+7d+/KlU2ZMgVv377F4sWLYWNjI9kPrLwVKhMymQyqqqpQVVWVOhSiYqVmzZo4ePAgTpw4gYMHD8olFZcvX4ajoyP/XxEREaUzfPhwbNu2DYGBgdDV1UVcXBwAQF9fH5qamgCAPn36wMzMDD4+PtDQ0MjQ/8LAwAAAsu2XURg4KhQR5bvmzZtj3rx54vunT5+iWbNmcHBwwM6dOzmCFBER0f+sXLkS8fHxaNasGUxNTcXJ399frBMTE4PY2FgJo8wZXrEgogJ3+/ZtaGlpITIyEj/++CPq16+PuXPnokmTJlKHRkREJKmc/NgWFBSU7fyNGzfmTzB5xCsWRFTgmjZtirt372LatGnQ1tbGhQsX0LRpU3Ts2BE3btyQOjwiIiLKB0wsiKhQ6OnpYcaMGbh79y6GDBkCZWVl7N+/H3Xr1sWrV6+kDo+IiIjyiIkFERUqExMTrFy5EuHh4ejatSuGDBki93TR5ORkCaMjIiIiRTGxICJJ2NjYICAgAH/99ZdYFhISAgsLCyxYsADv37+XMDoiIiLKLSYWRCQpJaX/PwytXr0aL1++xNixY2Fra4u///5bfOgPERERFW1MLIioyFixYgV8fX1hZmaGmJgY9OnTB7Vr18ahQ4c4RC0REVERx8SCiIoMFRUV9O/fH5GRkfDx8YG+vj6uXbuGdu3aoVevXlKHR0RERNlgYkFERY6WlhYmTpyIe/fuwdPTE2pqamjcuLHUYREREVE2mFgQUZFVtmxZ/PXXX4iMjMSAAQPE8oCAAIwaNQrPnz+XMDoiIiJKj4kFERV5lSpVgqqqKgDg06dPmDBhApYuXYoqVargzz//RGJiosQREhERERMLIipWVFRUsGbNGtSuXRtv377FlClTULVqVaxZswafPn2SOjwiIqJSi4kFERU7LVu2xKVLl7Bt2zZYWVkhNjYWgwcPRo0aNXDixAmpwyMiIiqVmFgQUbGkpKSEHj164NatW1i0aBHKli2L27dv87kXREREEmFiQUTFmrq6On799Vfcu3cPvr6+aNmypThvx44duHXrloTRERERlR5MLIioRNDX10f//v3F98+ePUP//v3h4OCAgQMH4smTJxJGR0REVPIxsSCiEunDhw9wdXVFWloa1q1bB2tra0yePBnx8fFSh0ZERFQiMbEgohLJwsICu3fvxpkzZ9CgQQMkJydj1qxZqFKlChYtWoQPHz5IHSIREVGJwsSCiEq0hg0b4syZM9i9ezfs7Ozw8uVL/Pbbb3j06JHUoREREZUoKlIHQERU0GQyGTp37owOHTpgw4YNiIuLQ5UqVcT5N27cgIODg4QREhERFX9MLIio1FBRUcHAgQPlyq5cuYI6deqgVatWmDNnDpycnCSKjoiIqHjjrVBEVKqFhIRAVVUVR48eRe3atfHLL78gOjpa6rCIiIiKHSYWRFSqDRw4ELdv30bPnj0BAFu3boWtrS08PT3x8uVLiaMjIiIqPphYEFGpV7lyZWzduhUhISFwdXVFSkoKFi5cCBcXFz7Jm4iIKIeYWBAR/U/t2rVx9OhRHD58GLVq1cKvv/4KJaXPh0lBEPDp0yeJIyQiIiq6mFgQEX2ldevWCAkJweDBg8WyHTt2wNHREXv37oUgCBJGR0REVDQxsSAiyoSSkhJUVP5/4LwFCxYgPDwcbm5uaNKkCYKDgyWMjoiIqOhhYkFElAOHDh3CxIkToaGhIT7Nu2vXroiIiJA6NCIioiKBiQURUQ4YGBjAx8cHd+7cQf/+/aGkpIRdu3ahevXqmDdvntThERERSY6JBRFRLpibm8PX1xdhYWHo2LEjUlNT+VA9IiIiMLEo8u7cuYMGDRrAxsYGdevWxc2bN7OsKwgCWrRoAQMDA7Hs3bt3aNOmDcqVKydX/jV3d3fIZDK8efMm/4InKsGqV6+OvXv3IjQ0FK6urmL50qVLsXTpUqSkpEgYHRERUeFjYlHEDR48GIMGDUJkZCQmTJgAd3f3LOsuXLgQVapUkStTVVXFhAkT8N9//2W53K5du6CqqppfIROVKo6OjuLr58+fY9KkSRg1ahTs7e3h7+/P52AQEVGpwcSiCHv27BkuX76MX375BQDQtWtXPHz4EHfv3s1Q9+bNm9izZw8mTpwoV66urp7hKkZ6T58+xaxZs7BgwYJ8j5+otDEwMMC8efNQvnx53Lt3Dz///DPq16+P48ePSx0aERFRgWNiUYQ9fPgQpqam4pCXMpkMFStWRExMjFy9jx8/YuDAgVi9ejWUlZVztY6BAwdi7ty50NXVzbe4iUorVVVVDBkyBHfv3sXvv/8OHR0dXL58GS1btkS7du1w584dqUMkIiIqMEwsSoAZM2agS5cuqFatWq6WW7duHSpWrIgWLVoUUGREpZOOjg6mTp2Ke/fuYcSIEVBRUcF///0HmUwmdWhEREQFholFEWZhYYHY2Fh8+vQJwOfO2TExMahYsaJcvZMnT2Lp0qWwtLREo0aNkJCQAEtLSzx//jzb9k+cOIHAwEBYWlrC0tISAFCzZk1cvXq1QLaHqLQxNjbG0qVLcevWLaxbtw7W1tbiPD8/P7x69UrC6IiIiPKXyrerkFSMjY1Ru3ZtbNmyBe7u7ti5cyfMzc3lTk4A4PTp0+Lr6Oho1KpVC9HR0d9sf+vWrXLvZTIZwsLCsh09iohyz9raWu7/bWhoKHr16gV9fX14eXlh5MiR0NTUlDBCIiKivOMViyJu9erVWL16NWxsbDB79mxs2LABADBgwADs3bs3R23UrFkTLi4uSEhIgLm5OXr37l2QIRPRN3z48AEODg548+YNJkyYABsbG2zYsAGpqalSh0ZERKQwmSAIgtRBFKZHjx7BwsICDx8+hLm5udThEFEplZqaiq1bt2LKlCl4+PAhAMDBwQGzZ89G+/bt2R+DiKgYy835po+PD3bt2oXbt29DU1MTDRo0wJw5c2Bra5vlMmvXrsXmzZtx48YNAECdOnUwa9Ys1KtXL1+3I7d4xYKISALKysro06cPIiMjMW/ePBgaGuLGjRvo168fkpKSpA6PiIgKycmTJzF8+HCcP38eR48excePH9G6dWskJiZmuUxQUBB69OiBEydOIDg4GBYWFmjdujUeP35ciJFnxCsWRERFwOvXrzF79mxUqVIFgwYNAvB5wIbo6GhYWVlJHB0REeVGXs43nz9/DmNjY5w8eRJNmjTJ0TKpqakwNDTEsmXL0KdPH0VCzhe8YkFEVAQYGhpizpw5YlIBAAEBAbCxscHw4cPx9OlTCaMjIiJFvH37FgkJCeL04cOHby4THx8PAChTpkyO15OUlISPHz/mapmCwMSCiKiIOnnyJD59+oQVK1bA2toaM2bMwLt376QOi4iIcsje3h76+vri5OPjk239tLQ0jB49Gg0bNoSDg0OO1zNhwgRUqFABrq6ueQ05T3grVCFr7RNY6OssSEe83KQOgahECwoKwm+//YZLly4B+DwM9fTp0zFw4ECoqqpKHB0REWXmy/lmeHg4zMzMxHJ1dXWoq6tnudzQoUPx77//4syZMzk+T509ezbmzp2LoKAg1KxZM8+x5wWvWBARFWHNmjXDhQsX8M8//8Da2hrPnj3D8OHD0aNHD6lDIyKib9DV1YWenp44ZZdUjBgxAvv378eJEydynFTMnz8fs2fPxpEjRyRPKgAmFkRERZ5MJkO3bt0QHh6O5cuXw9jYWK4vRlpamoTRERFRXgiCgBEjRmD37t04fvx4jgfsmDt3LmbOnIlDhw7B2dm5gKPMGSYWRETFhKqqKoYNG4aoqCi0bt1aLPfx8UGHDh1w/fp1CaMjIiJFDB8+HFu2bMG2bdugq6uLuLg4xMXFITk5WazTp08feHl5ie/nzJmDqVOnYv369bC0tBSXkbofHhMLIqJiRktLS3ydnJyMBQsW4MCBA3B0dES/fv3EB+4REVHRt3LlSsTHx6NZs2YwNTUVJ39/f7FOTEwMYmNj5ZZJSUnBjz/+KLfM/PnzpdgEkYqkayciojzR1NREcHAwJk+ejICAAGzcuBHbt2/HqFGjMHHiRBgaGkodIhERZSMn4ygFBQXJvY+Oji6YYPKoSFyxWL58OSwtLaGhoYH69evj4sWLOVpu+/btkMlk6Ny5c8EGSERUhNnY2GDHjh04f/48mjRpgvfv32Pu3LmoUqUK9u/fL3V4RERUSkieWPj7+8PT0xPTp0/HlStX4OjoiDZt2uDZs2fZLhcdHY1x48ahcePGhRQpEVHRVr9+fQQFBWHfvn2oXr063r59C1tbW6nDIiKiUkLyxGLBggUYOHAg+vXrB3t7e6xatQpaWlpYv359lsukpqaiV69emDFjBipXrlyI0RIRFW0ymQwdOnTAtWvXcOrUKVStWlWc9/vvv+Pff//N0WV3IiKi3JI0sUhJSUFISIjcUwKVlJTg6uqK4ODgLJf7/fffYWxsDA8Pj8IIk4io2FFWVoaLi4v4/tq1a/D29kb79u3RsmVLXL58WcLoiIioJJI0sXjx4gVSU1NRvnx5ufLy5csjLi4u02XOnDkDX19frF27Nkfr+PDhAxISEsTp7du3eY6biKi4sbCwgKenJ9TU1HDixAnUrVsXP//8M+7duyd1aEREVEJIfitUbrx9+xa9e/fG2rVrUa5cuRwt4+PjA319fXGyt7cv4CiJiIqeMmXKYP78+YiMjETv3r0hk8ng7++PatWqYdSoUXj9+rXUIRIRUTEnaWJRrlw5KCsr4+nTp3LlT58+hYmJSYb69+7dQ3R0NDp27AgVFRWoqKhg8+bN2Lt3L1RUVDL95c3Lywvx8fHiFB4eXmDbQ0RU1FWqVAmbN2/G1atX0bZtW3z8+BF+fn5QVlaWOjQiIirmJH2OhZqaGurUqYNjx46JQ8ampaXh2LFjGDFiRIb6dnZ2GZ4sO2XKFLx9+xaLFy+GhYVFhmXU1dWhrq4uvk9ISMjfjSAiKoYcHR3x77//4vjx43j58iX09PQAfB5PPSAgAD/88ANUVFRw584drF+/HtHR0bC0tET//v3lOoQTERF9IfkD8jw9PdG3b184OzujXr16WLRoERITE9GvXz8Anx9hbmZmBh8fH2hoaMDBwUFueQMDAwDIUE5ERN/WokULufe7du3CTz/9BFtbW7i6umLlypWQyWQQBAEymQxz586Fr68v3N3dpQmYiIiKLMkTi+7du+P58+eYNm0a4uLiUKtWLRw6dEjs0B0TEwMlpWLVFYSIqNhKSUlBuXLlEBERgYiIiEzreHh4oFGjRrC2ti7k6IiIqCiTCaVsQPNHjx7BwsICDx8+hLm5eaGvv7VPYKGvsyAd8XKTOgQiymcJCQlo164dzp07l+l8ZWVljB8/Hj4+PoUcGRFR8SD1+aZUeCmAiIjk6OnpoWLFilleLRYEAdHR0YUbFBERFXlMLIiIKANLS0vIZLJM58lkMlhaWhZuQEREVOQxsSAiogz69++PrO6UFQQBHh4ehRwREREVdUwsiIgog6pVq8LX1xdKSkpQVlaW+/ePP/6AlpaW1CESEVERI/moUEREVDS5u7ujUaNG8PX1FZ9jUb16dQwfPhy7d+/GyZMnoampKXWYRERURDCxICKiLFlbW8uN/nTv3j2oqKjg0qVLGDRoEDZv3pxlXwwiIipdeCsUERHlWJUqVbBjxw4oKytjy5YtmD9/vtQhERFREcHEgoiIcqVFixZYvHgxAGDChAk4ePCgxBEREVFRwMSCiIhybdiwYRg0aBAEQUCPHj1w69YtqUMiIiKJMbEgIqJck8lkWLp0KRo3boyEhATMmTNH6pCIiEhiTCyIiEghampq2LlzJ7y8vLBmzRqpwyEiIolxVCgiIlKYkZERZs2aJXUYRERUBPCKBRER5YtPnz7B09MTGzZskDoUIiKSAK9YEBFRvvDz88PChQuhpqYGW1tbNGjQQOqQiIioEPGKBRER5YtevXqha9euSElJQZcuXfDw4UOpQyIiokLExIKIiPKFkpISNm3aBEdHRzx9+hSdO3dGUlKS1GEREVEhYWJBRET5RltbG4GBgTAyMsKVK1fQv39/CIIgdVhERFQImFgQEVG+qlSpEnbu3AkVFRX4+/vDx8dH6pCIiKgQMLEgIqJ817hxYyxfvhxaWlqwtbWVOhwiIioEHBWKiIgKxKBBg/D999/DzMxM6lCIiKgQ8IoFEREVmPRJxaNHj/Dy5UsJoyEiooLExIKIiArchQsX4OzsjG7duuHjx49Sh0NERP/z/v37fGuLiQURERU4bW1tJCYm4sSJE/D09JQ6HCKiUi0tLQ0zZ86EmZkZdHR0cP/+fQDA1KlT4evrq3C7TCyIiKjAOTg4YOvWrZDJZFi2bBnWrFkjdUhERKXWH3/8gY0bN2Lu3LlQU1MTyx0cHLBu3TqF22ViQUREhaJTp06YOXMmAGD48OE4deqUxBEREZVOmzdvxpo1a9CrVy8oKyuL5Y6Ojrh9+7bC7TKxICKiQjNp0iR0794dnz59QteuXfHgwQOpQyIikpSPjw/q1q0LXV1dGBsbo3PnzoiIiPjmcjt27ICdnR00NDRQo0YNHDx4MMfrfPz4MaytrTOUp6Wl5akfHBMLIiIqNDKZDOvXr4eTkxNevHiByZMnSx0SEZGkTp48ieHDh+P8+fM4evQoPn78iNatWyMxMTHLZc6dO4cePXrAw8MDV69eRefOndG5c2fcuHEjR+u0t7fH6dOnM5QHBATAyclJ4W3hcyyIiKhQaWlpITAwEDNnzsSCBQukDoeISFKHDh2Se79x40YYGxsjJCQETZo0yXSZxYsXo23bthg/fjwAYObMmTh69CiWLVuGVatWfXOd06ZNQ9++ffH48WOkpaVh165diIiIwObNm7F//36Ft4VXLIiIqNBZWFhgzZo10NHRkToUIqIiJT4+HgBQpkyZLOsEBwfD1dVVrqxNmzYIDg7O0Trc3Nywb98+/Pfff9DW1sa0adNw69Yt7Nu3D61atVI4dl6xICIiSQmCAB8fH9jZ2aFLly5Sh0NElG/evn2LhIQE8b26ujrU1dWzrJ+WlobRo0ejYcOGcHBwyLJeXFwcypcvL1dWvnx5xMXF5Ti2xo0b4+jRozmunxO8YkFERJL6+++/MXnyZPTu3RvXrl2TOhwionxjb28PfX19cfLx8cm2/vDhw3Hjxg1s3769QOO6dOkSLly4kKH8woULuHz5ssLtMrEgIiJJ9ezZE61atUJSUhLc3Nzw/PlzqUMiIsoX4eHhiI+PFycvL68s644YMQL79+/HiRMnYG5unm27JiYmePr0qVzZ06dPYWJikqO4hg8fjocPH2Yof/z4MYYPH56jNjLDxIKIiCSloqKC7du3w9raGg8ePMCPP/6IlJQUqcMiIsozXV1d6OnpiVNmt0EJgoARI0Zg9+7dOH78OKysrL7ZrouLC44dOyZXdvToUbi4uOQorvDwcNSuXTtDuZOTE8LDw3PURmaYWBARkeTKlCmDvXv3QldXF6dOncKvv/4qdUhERIVi+PDh2LJlC7Zt2wZdXV3ExcUhLi4OycnJYp0+ffrIXe349ddfcejQIfz111+4ffs2vL29cfnyZYwYMSJH61RXV89wxQMAYmNjoaKieBdsJhZERFQkVKtWDX5+fpDJZFi1ahVWrlwpdUhERAVu5cqViI+PR7NmzWBqaipO/v7+Yp2YmBjExsaK7xs0aIBt27ZhzZo1cHR0REBAAPbs2ZNth+/0WrduDS8vL3EEKgB48+YNJk2axFGhiIioZPj+++8xe/ZsTJ48GaqqqlKHQ0RU4ARB+GadoKCgDGXdunVDt27dFFrn/Pnz0aRJE1SqVEl8IF5oaCjKly+Pv//+W6E2ASYWRERUxIwfPx4dO3ZEtWrVpA6FiKhEMjMzQ1hYGLZu3Ypr165BU1MT/fr1Q48ePfL0ow4TCyIiKlJkMplcUvHy5UuoqalBV1dXwqiIiEoWbW1tDBo0KF/bZGJBRERF1s2bN9GxY0fUrFkTu3btgpISuwYSEeWHO3fu4MSJE3j27BnS0tLk5k2bNk2hNplYEBFRkfXu3Ts8efIEUVFRmD59OmbOnCl1SERExd7atWsxdOhQlCtXDiYmJpDJZOI8mUymcGKh0E8/J06cUGhlREREuVG/fn2sWbMGAPDHH3/gn3/+kTgiIqLi748//sCff/6JuLg4hIaG4urVq+J05coVhdtVKLFo27YtqlSpgj/++CPTp/YRERHllz59+mDcuHEAAHd3d1y9elXiiIiIirfXr18rPKJUdhRKLB4/fowRI0YgICAAlStXRps2bfDPP//wSalERFQgZs+ejbZt2yI5ORlubm6ZPtiJiIhyplu3bjhy5Ei+t6tQH4ty5cphzJgxGDNmDK5cuYINGzZg2LBhGDZsGHr27AkPDw84Ojrmd6xERFRKKSsrw8/PD9999x0iIiIwduxYbNmyReqwiIiKJWtra0ydOhXnz59HjRo1MgwxO2rUKIXalQk5eSrHNzx58gRr1qzB7NmzoaKigvfv38PFxQWrVq1C9erV89p8vnr06BEsLCzw8OFDmJubF/r6W/sEFvo6C9IRLzepQyCiUiQyMhJeXl5Ys2YNypYtK3U4RESZkvp881usrKyynCeTyXD//n2F2lV4VKiPHz8iMDAQ69evx9GjR+Hs7Ixly5ahR48eeP78OaZMmYJu3bohPDxc0VUQERHJsbGxwc6dO6UOg4ioWIuKiiqQdhVKLEaOHAk/Pz8IgoDevXtj7ty5cHBwEOdra2tj/vz5qFChQr4FSkRE9LU1a9agcuXKcHV1lToUIqJiJyUlBVFRUahSpQpUVPL+FAqFOm+Hh4dj6dKlePLkCRYtWiSXVHxRrlw5DktLREQFZuvWrRg8eDB++ukn3LlzR+pwiIiKjaSkJHh4eEBLSwvVq1dHTEwMgM8XD2bPnq1wuwolFtOnT0e3bt2grq4uV/7p0yecOnUKAKCiooKmTZsqHBgREVF2unbtiu+++w6vX7+Gm5sb4uPjpQ6JiKhY8PLywrVr1xAUFAQNDQ2x3NXVFf7+/gq3q1Bi0bx5c7x69SpDeXx8PJo3b65wMERERDmloaGBXbt2wczMDLdu3UKvXr2QmpoqdVhEREXenj17sGzZMjRq1EjuqdvVq1fHvXv3FG5XocRCEAS5IL54+fIltLW1FQ6GiIgoN0xNTbFnzx5oaGjgwIEDmDx5stQhEREVec+fP4exsXGG8sTExEzP8XMqV700unTpAuDzMFTu7u5yt0KlpqYiLCwMDRo0UDgYIiKi3HJ2dsb69evRs2dPzJkzBzVq1ECvXr2kDouIqMhydnbGgQMHMHLkSAAQk4l169bBxcVF4XZzlVjo6+sD+HzFQldXF5qamuI8NTU1fPfddxg4cKDCwRARESmiR48eCAsLw5w5cxAbGyt1OERERdqsWbPQrl07hIeH49OnT1i8eDHCw8Nx7tw5nDx5UuF2c5VYbNiwAQBgaWmJcePG8bYnIiIqMv744w907NiRV86JiL6hUaNGuHbtGnx8fFCjRg0cOXIEtWvXRnBwMGrUqKFwuwoNWDt9+nSFV0hERFQQlJWV5ZKKd+/eQUVFRW7EEyKi0u7jx48YPHgwpk6dirVr1+Zr2zlOLGrXro1jx47B0NAQTk5O2XbsuHLlSr4ER0REpIjo6Gi4ubnB0dERmzZtylNnRCKikkRVVRU7d+7E1KlT873tHCcWbm5uYmftzp0753sgRERE+eX+/fu4efMmwsLC4OjoiLFjx0odEhFRkdG5c2fs2bMHY8aMydd2c5xYpL/9ibdCERFRUdaiRQssXLgQo0aNwm+//QZ7e3u0a9dO6rCIiIqEqlWr4vfff8fZs2dRp06dDP2mR40apVC7CvWxICIiKupGjBiBsLAwrFu3Dj///DMuXLgAOzs7qcMiIpKcr68vDAwMEBISgpCQELl5Mpms4BMLQ0PDHN+jmtlTuYmIiAqTTCbD8uXLcevWLZw9exZubm44f/48DA0NpQ6NiEhSUVFRBdJujhOLRYsWFUgAREREBUVNTQ07d+5E3bp1ERkZiVGjRuHvv/+WOiwioiIhJSUFUVFRqFKlClRU8n4jU45b6Nu3b55XRkREVNjKly+PwMBAjB49Gj4+PlKHQ0QkuaSkJIwcORKbNm0CAERGRqJy5coYOXIkzMzMMHHiRIXaVcppxYSEBLnX2U25tXz5clhaWkJDQwP169fHxYsXs6y7a9cuODs7w8DAANra2qhVqxZ/fSIiomw5OTkhKCgI5ubmUodCRCQ5Ly8vXLt2DUFBQXLP+nF1dYW/v7/C7eaqj0VsbCyMjY1hYGCQaX8LQRAgk8mQmpqa4wD8/f3h6emJVatWoX79+li0aBHatGmDiIgIGBsbZ6hfpkwZTJ48GXZ2dlBTU8P+/fvRr18/GBsbo02bNjleLxERlS7p/27t3LkTZmZm+O677ySMiIhIGnv27IG/vz++++47uWNj9erVce/ePYXbzXFicfz4cZQpUwYAcOLECYVX+LUFCxZg4MCB6NevHwBg1apVOHDgANavX5/pZZhmzZrJvf/111+xadMmnDlzhokFERF90z///IPu3bvDxMQEly5d4lUMIip1nj9/nukP+ImJiXl6oGiOE4umTZtm+jovUlJSEBISAi8vL7FMSUkJrq6uCA4O/ubygiDg+PHjiIiIwJw5c/IlJiIiKtnat2+PGjVq4Pr16+jcuTNOnz4NTU1NqcMiIio0zs7OOHDgAEaOHAng/6/orlu3Di4uLgq3q3D379evX8PX1xe3bt0CANjb26Nfv37iVY2cePHiBVJTU1G+fHm58vLly+P27dtZLhcfHw8zMzN8+PABysrKWLFiBVq1apVp3Q8fPuDDhw/i+7dv3+Y4PiIiKnl0dHQQGBiIunXrIiQkBB4eHti6dWuefqUjIipOZs2ahXbt2iE8PByfPn3C4sWLER4ejnPnzuHkyZMKt5vjztvpnTp1CpaWlliyZAlev36N169fY8mSJbCyssKpU6cUDiandHV1ERoaikuXLuHPP/+Ep6cngoKCMq3r4+MDfX19cbK3ty/w+IiIqGizsrJCQEAAVFRU4Ofnx6veRFSqNGrUCKGhofj06RNq1KiBI0eOwNjYGMHBwahTp47C7coEQRByu1CNGjXg4uKClStXQllZGQCQmpqKYcOG4dy5c7h+/XqO2klJSYGWlhYCAgLQuXNnsbxv37548+YNAgMDc9TOgAED8PDhQxw+fDjDvK+vWDx+/Bj29vZ4+PChJPfVtvbJ2TYVF0e83KQOgYhIYStXrsSwYcMgk8mwd+9edOjQQeqQiKgEePToESwsLCQ738yMp6cnZs6cCW1tbZw6dQoNGjTIl2dXpKfQFYu7d+9i7NixYlIBAMrKyvD09MTdu3dz3I6amhrq1KmDY8eOiWVpaWk4duxYru7vSktLk0se0lNXV4eenp446erq5rhdIiIq2YYOHYohQ4ZAEARcuHBB6nCIiArM0qVL8e7dOwBA8+bN8erVq3xfh0JpSu3atXHr1i3Y2trKld+6dQuOjo65asvT0xN9+/aFs7Mz6tWrh0WLFiExMVEcJapPnz4wMzMTH2rk4+MDZ2dnVKlSBR8+fMDBgwfx999/Y+XKlYpsChERlXJLlizB999/z6sVRFSifenG0Lp1awiCgODgYBgaGmZat0mTJgqtI8eJRVhYmPh61KhR+PXXX3H37l1xDPDz589j+fLlmD17dq4C6N69O54/f45p06YhLi4OtWrVwqFDh8QO3TExMVBS+v8LK4mJiRg2bBgePXoETU1N2NnZYcuWLejevXuu1ktERAQAqqqqcklFSkoKlJSU8v0WASIiKc2bNw9DhgyBj48PZDIZfvjhh0zr5faZdHLL5rSPhZKSEmQyGb5VPS/BFAap73ljHwsioqLr2bNn+PHHH+Hk5ITFixdLHQ4RFVNSn29m5927d9DT08vyYdQAoK+vr1DbOe5jERUVhfv37yMqKirb6f79+woFQkREJLULFy7g9OnTWLJkCdatWyd1OERUSpw6dQodO3ZEhQoVIJPJsGfPnm8us3XrVjg6OkJLSwumpqbo378/Xr58mWV9T09PJCYmQkdHBydOnICVlZXcyKnpJ0XlOLGoVKlSjiciIqLiqGPHjpgxYwYAYNiwYThz5ozEERFRaZCYmAhHR0csX748R/XPnj2LPn36wMPDAzdv3sSOHTtw8eJFDBw4MMtl0nfebtGiRdHpvP1FeHg4YmJikJKSIlfeqVOnPAVFREQklSlTpuD69esICAhA165dcenSJVSsWFHqsIioBGvXrh3atWuX4/rBwcGwtLTEqFGjAHx+Ns/gwYOzfSZPkeq8nd79+/fxww8/4Pr163L9Lr48tbQo97EgIiLKjpKSEjZu3Ig7d+7g2rVrcHNzw5kzZ6CtrS11aERUzLx9+xYJCQnie3V1dairq+e5XRcXF0yaNAkHDx5Eu3bt8OzZMwQEBKB9+/ZZLlMYnbcVeo7Fr7/+CisrKzx79gxaWlq4efMmTp06BWdn5yyfgE1ERFRcaGtrIzAwEEZGRggNDRV/FSQiyg17e3u5vgtfHp+QVw0bNsTWrVvRvXt3qKmpwcTEBPr6+tneStW5c2fExcUhISEBgiAgIiICr1+/zjDl5RYpha5YBAcH4/jx4yhXrhyUlJSgpKSERo0awcfHB6NGjcLVq1cVDoiIiKgoqFSpEnbt2oWhQ4di/PjxUodDRMVQeHg4zMzMxPf5cbXiS7u//vorpk2bhjZt2iA2Nhbjx4/HkCFD4Ovrm+2y6Ttv5/ew2gq1lpqaKj7Buly5cnjy5AlsbW1RqVIlRERE5GuAREREUmnUqBFCQ0OhrKwsdShEVAzp6upCT08v39v18fFBw4YNxR89atasCW1tbTRu3Bh//PEHTE1NMyyTkJAgxuLk5ISkpKQs21c0ZoUSCwcHB1y7dg1WVlaoX78+5s6dCzU1NaxZswaVK1dWKBAiIqKiKH1Scfz4cRgZGaFGjRoSRkREpV1SUlKGqw1fjlVZPXPO0NAQsbGxMDY2hoGBgdg3Oj1BEPLUx0KhxGLKlClITEwEAPz+++/o0KEDGjdujLJly8Lf31+hQIiIiIqyPXv24Mcff4SFhQUuXbqEcuXKSR0SEZUQ7969w927d8X3UVFRCA0NRZkyZVCxYkV4eXnh8ePH2Lx5M4DPQ2MPHDgQK1euFG+FGj16NOrVq4cKFSpkuo7jx4+jTJkyAIATJ04UyHYolFi0adNGfG1tbY3bt2/j1atXMDQ0zDT7ISIiKu4aN26MSpUq4f79++jWrRuOHDkCVVVVqcMiohLg8uXLaN68ufje09MTANC3b19s3LgRsbGxiImJEee7u7vj7du3WLZsGcaOHQsDAwO0aNEi2+FmmzZtmunr/CQTsrpekkMPHz4EAFhYWORLQAVN6kest/YJLPR1FqQjXm5Sh0BEVGhu3ryJ7777Du/evcOwYcNy/DArIipdpD7fzExYWFiO69asWVOhdSh0xeLTp0+YMWMGlixZIj7BT0dHByNHjsT06dP5Cw4REZVI1atXx7Zt2+Dm5oYVK1agRo0aGDJkiNRhERF9U61atcTnz33rDqNCfY7FyJEjsWbNGsydOxdXr17F1atXMXfuXPj6+nKsbyIiKtE6duyIWbNmAfj89/DkyZMSR0RE9G1RUVG4f/8+oqKisHPnTlhZWWHFihXiufyKFStQpUoV7Ny5U+F1KHTFYtu2bdi+fbvco8dr1qwJCwsL9OjRAytXrlQ4ICIioqJuwoQJCAsLg5+fH3bv3l1g9ysTEeWXSpUqia+7deuGJUuWyD2p+8u5/NSpU9G5c2eF1qFQYqGurg5LS8sM5VZWVlBTU1MoECIiouJCJpPB19cXbdq0QZ8+faQOh4goV65fvw4rK6sM5VZWVggPD1e4XYVuhRoxYgRmzpyJDx8+iGUfPnzAn3/+iREjRigcDBERUXGhqamJvn37ivcqp6WlIS0tTeKoiIi+rVq1avDx8UFKSopYlpKSAh8fH1SrVk3hdnN8xaJLly5y7//77z+Ym5vD0dERAHDt2jWkpKSgZcuWCgdDRERUHCUkJOCXX35B7dq14e3tLXU4RETZWrVqFTp27Ahzc3NxBKiwsDDIZDLs27dP4XZznFjo6+vLve/atavc++Iy3CwREVF+O3LkCPbt24d9+/bBwcEBP/74o9QhERFlqV69erh//z62bt2K27dvAwC6d++Onj17QltbW+F2c5xYbNiwQeGVEBERFaY7d+6gb9++ePHiBfT19bFx40ZUr15drs7x48cxceJEvHv3DjKZDN9//z1mz54NJSX5u4Td3d2xadMmvH79GgYGBgCAv//+G/Pnz0dqairKly+PDRs2wNPTEwsWLEDfvn1hbW2NWrVqFdLWEhHlnra2NgYNGpSvbSrUx+KL58+f48yZMzhz5gyeP3+eXzERERHlyeDBgzFo0CBERkZiwoQJcHd3z1DH0NAQ27dvR3h4OEJCQnDu3Dls3rxZrs6uXbsyPJvp9u3bGD9+PA4dOoQbN26gX79+GDp0KObMmYM2bdogKSkJbm5uePbsWUFuIhFRkaNQYpGYmIj+/fvD1NQUTZo0QZMmTVChQgV4eHggKSkpv2MkIiLKsWfPnuHy5cv45ZdfAHy+dffhw4e4e/euXD0nJydUrlwZAKChoYFatWohOjpanP/06VPMmjULCxYskFvuxo0bqFmzJkxNTQEA7du3x7///ov4+Hhs374dNjY2iImJQdeuXeU6RhIRlXQKJRaenp44efIk9u3bhzdv3uDNmzcIDAzEyZMnMXbs2PyOkYiIKMcePnwIU1NTqKh8vttXJpOhYsWKiImJyXKZuLg4BAQEoEOHDmLZwIEDMXfuXOjq6srVdXR0xJUrVxAZGQkA2LJlCwRBwIMHD2BgYIC9e/dCX18fZ86cwZgxYwpgC4mIiiaFEoudO3fC19cX7dq1g56eHvT09NC+fXusXbsWAQEB+R0jERFRgUlISEDHjh3x22+/wdnZGQCwbt06VKxYES1atMhQv2rVqli1ahX69OkDZ2dnvHz5EgYGBmIiY2trCz8/P1hYWGR6CxYRUUmlUGKRlJSE8uXLZyg3NjbmrVBERCQpCwsLxMbG4tOnTwAAQRAQExODihUrZqj79u1btG3bFm5ubvD09BTLT5w4gcDAQFhaWooPhK1ZsyauXr0KAPjxxx9x/vx5XL58GUOHDkVycjKsra3F5du1a4fIyEjUrVu3ALeUiEgxlStXxsuXLzOUv3nzRrxFVBEKJRYuLi6YPn063r9/L5YlJydjxowZcHFxUTgYIiKivDI2Nkbt2rWxZcsWAJ+vspubm8ud+APAu3fv0LZtW7Rt2xZTpkyRm7d161Y8fPgQ0dHRYr+LsLAwODk5AQBiY2MBAKmpqZgwYQKGDx8OLS0tuTY0NDTE1yEhIbh3716+bicRkaKio6ORmpqaofzDhw94/Pixwu3meLjZ9BYtWoS2bdtmeECehoYGDh8+rHAwRERE+WH16tVwd3fHrFmzoKenJw6ZPmDAAHTq1AmdOnXC4sWLcfHiRSQmJmLXrl0AgG7dumHy5MnfbL9///548OABPnz4gO+//x6zZs3Ksu7hw4fRuXNnVK5cGcHBwdDT08ufjSQiyqW9e/eKrw8fPiz3nLrU1FQcO3ZMvEqrCJkgCIIiCyYlJck9VKNatWro1asXNDU1FQ6mMDx69AgWFhZ4+PAhzM3NC339rX0CC32dBemIl5vUIRARFWlPnjxB3bp18eTJE3Ts2BF79uzJ8KwMIipZpD7fzMqXY49MJsPXKYCqqiosLS3x119/yQ1kkRu5vmLx8eNH2NnZYf/+/Rg4cKBCKyUiIiotKlSogD179qBx48bYt28fpk6dij///FPqsIioFEpLSwMAWFlZ4dKlSyhXrly+tp/rn0xUVVXl+lYQERFR9urWrQtfX18AwKxZs+Dn5ydxRERUmkVFReV7UgEo2Mdi+PDhmDNnDtatWycOr0dERERZ69WrF8LCwjB37lz0798fNjY2qFOnjtRhEVEpsWTJEgwaNAgaGhpYsmRJtnVHjRql0DoUygouXbqEY8eO4ciRI6hRowa0tbXl5n/pBEdERET/b9asWbhx4wYOHjyI1atXY82aNVKHRESlxMKFC9GrVy9oaGhg4cKFWdaTyWSFm1gYGBiga9euCq2QiIgot9oe9ZI6hHzzcaANbM2TsXLFSqlDIaJSJCoqKtPX+SlXiUVaWhrmzZuHyMhIpKSkoEWLFvD29i7yI0EREREVFaraGrD8sT6UlZUBQByZRSaTSRkWEVGe5Sqx+PPPP+Ht7Q1XV1doampiyZIleP78OdavX19Q8REREZVYHz58wJAhQ+Do6IjRo0dLHQ4RlRKenp6ZlstkMmhoaMDa2hpubm4oU6ZMrtrNVWKxefNmrFixAoMHDwYA/Pfff/j++++xbt06jslNRESUSzt37sTGjRuhpKQEe3t7tG7dWuqQiKgUuHr1Kq5cuYLU1FTY2toCACIjI6GsrAw7OzusWLECY8eOxZkzZ2Bvb5/jdnOVDcTExKB9+/bie1dXV8hkMjx58iQ3zRARERGAHj16oH///khLS0P37t0RGRkpdUhEVAq4ubnB1dUVT548QUhICEJCQvDo0SO0atUKPXr0wOPHj9GkSROMGTMmV+3mKrH49OkTNDQ05MpUVVXx8ePHXK2UiIiIPt92sGLFCjRo0ABv3rxBp06dEB8fL3VYRFTCzZs3DzNnzoSenp5Ypq+vD29vb8ydOxdaWlqYNm0aQkJCctVurm6FEgQB7u7uUFdXF8vev3+PIUOGyA05y+FmiYiIckZdXR07d+5E3bp1ERERgR49emDfvn1i524iovwWHx+PZ8+eZbjN6fnz50hISADweRTYlJSUXLWbqysWffv2hbGxMfT19cXpl19+QYUKFeTKiIiIKOdMTEwQGBgITU1N/Pvvv5g0aZLUIRFRCebm5ob+/ftj9+7dePToER49eoTdu3fDw8MDnTt3BgBcvHgRNjY2uWo3V1csNmzYkKvGiYiIKGdq166NDRs2YOjQoWjVqpXU4RBRCbZ69WqMGTMGP//8Mz59+gQAUFFRQd++fcWH59nZ2WHdunW5alehB+QRERFR/uvevTtat24NQ0NDqUMhohJMR0cHa9euxcKFC3H//n0AQOXKlaGjoyPWqVWrVq7bZWJBRERUhKRPKiIjI6Gjo4MKFSpIGBERlVQ6OjrisyrSJxWK4sMniIiIiqCTJ0+iXr166Ny5M5KTk6UOh4hKkLS0NPz+++/Q19dHpUqVUKlSJRgYGGDmzJlIS0tTuF0mFkREREWQhYUFlJWVcenSJQwaNAiCIEgdEhGVEJMnT8ayZcswe/ZsXL16FVevXsWsWbOwdOlSTJ06VeF2eSsUERFREVS5cmXs2LEDrVu3xpYtW1CzZk2MHz9e6rCIqATYtGkT1q1bh06dOollNWvWhJmZGYYNG4Y///xToXZ5xYKIiKiIatGiBRYvXgwAmDBhAg4ePChxRERUErx69Qp2dnYZyu3s7PDq1SuF22ViQUREVIQNGzZMvBWqR48euH37ttQhEVEx5+joiGXLlmUoX7ZsGRwdHRVul4kFERFRESaTybB06VI0btwYCQkJ8PHxkTokIspnp06dQseOHVGhQgXIZDLs2bPnm8t8+PABkydPRqVKlaCurg5LS0usX78+R+ubO3cu1q9fD3t7e3h4eMDDwwP29vbYuHEj5s2bp/B2MLEgIiIq4tTU1LBz505MnDgRa9askTocIspniYmJcHR0xPLly3O8zE8//YRjx47B19cXERER8PPzg62tbY6Wbdq0KSIjI/HDDz/gzZs3ePPmDbp06YKIiAg0btxY0c1g520iIqLiwMjIiFcriEqodu3aoV27djmuf+jQIZw8eRL3798Xn0NhaWmZq3VWqFAhQyftR48eYdCgQQr/gMErFkRERMVMamoqxo8fjw0bNkgdChFJYO/evXB2dsbcuXNhZmYGGxsbjBs3Ls/PvHn58iV8fX0VXp5XLIiIiIqZbdu2Yf78+VBTU4OtrS0aNGggdUhElIm3b98iISFBfK+urg51dfU8t3v//n2cOXMGGhoa2L17N168eIFhw4bh5cuXkv7gwCsWRERExUyvXr3QpUsXpKSkoEuXLnj48KHUIRFRJuzt7aGvry9O+XU7Y1paGmQyGbZu3Yp69eqhffv2WLBgATZt2pTnqxZ5wSsWRERExYySkhI2bdqEu3fvIiwsDJ07d8bp06ehpaUldWhElE54eDjMzMzE9/lxtQIATE1NYWZmBn19fbGsWrVqEAQBjx49QtWqVfNlPbnFxIKIiKgY0tHRQWBgIOrWrYsrV66gf//+8PPzg0wmkzo0IvofXV1d6Onp5Xu7DRs2xI4dO/Du3Tvo6OgAACIjI6GkpARzc/Msl+vSpUu27b558yZPcfFWKCIiomLK0tISO3fuhIqKCvz9/TlqFFEx9e7dO4SGhiI0NBQAEBUVhdDQUMTExAAAvLy80KdPH7F+z549UbZsWfTr1w/h4eE4deoUxo8fj/79+0NTUzPL9aS/LSuzqVKlSnLryS1esSAiIirGmjRpguXLl2P06NGwtraWOhwiUsDly5fRvHlz8b2npycAoG/fvti4cSNiY2PFJAP4fMXy6NGjGDlyJJydnVG2bFn89NNP+OOPP7JdT0F37GZiQUREVMwNGjQI7dq1g4WFhdShEJECmjVrBkEQspy/cePGDGV2dnY4evRoAUaVe7wVioiIqARIn1Q8fvwYL1++lDAaIiqNmFgQERGVIJcuXULdunXRrVs3fPz4UepwiKgUYWJBRERUgmhqauLt27c4ceKEeJ82EVFhYGJBRERUgjg4OGDLli0AgGXLlmHNmjUSR0REpUWRSCyWL18OS0tLaGhooH79+rh48WKWddeuXYvGjRvD0NAQhoaGcHV1zbY+ERFRaePm5iaODjN8+HCcOnVK4oiIqDSQPLHw9/eHp6cnpk+fjitXrsDR0RFt2rTBs2fPMq0fFBSEHj164MSJEwgODoaFhQVat26Nx48fF3LkRERERdekSZPQvXt3fPr0CV27dsWDBw+kDomISjjJE4sFCxZg4MCB6NevH+zt7bFq1SpoaWlh/fr1mdbfunUrhg0bhlq1asHOzg7r1q1DWloajh07VsiRExERFV0ymQzr16+Hk5MTXrx4AS8vL6lDIqISTtLEIiUlBSEhIXB1dRXLlJSU4OrqiuDg4By1kZSUhI8fP6JMmTIFFSYREVGxpKWlhcDAQHh4eGD16tVSh0NEJZykD8h78eIFUlNTUb58ebny8uXL4/bt2zlqY8KECahQoYJccpLehw8f8OHDB/H927dvFQ+YiIiomLGwsMC6deukDoOISgHJb4XKi9mzZ2P79u3YvXs3NDQ0Mq3j4+MDfX19cbK3ty/kKImIiIoGQRAwZ84c7Nq1S+pQiKgEkjSxKFeuHJSVlfH06VO58qdPn8LExCTbZefPn4/Zs2fjyJEjqFmzZpb1vLy8EB8fL07h4eH5EjsREVFxs3XrVkycOBG9e/fGtWvXpA6HiEoYSRMLNTU11KlTR67j9ZeO2C4uLlkuN3fuXMycOROHDh2Cs7NztutQV1eHnp6eOOnq6uZb/ERERMXJzz//DFdXVyQlJcHNzQ3Pnz+XOiQiKkEkvxXK09MTa9euxaZNm3Dr1i0MHToUiYmJ6NevHwCgT58+ciNZzJkzB1OnTsX69ethaWmJuLg4xMXF4d27d1JtAhERUbGgoqICf39/WFtb48GDB/jxxx+RkpIidVhEVEJInlh0794d8+fPx7Rp01CrVi2Ehobi0KFDYofumJgYxMbGivVXrlyJlJQU/PjjjzA1NRWn+fPnS7UJRERExUaZMmWwd+9e6Orq4tSpUxg5ciQEQZA6LCIqASQdFeqLESNGYMSIEZnOCwoKknsfHR1d8AERERGVYNWqVYOfnx86duyINWvWwNHREcOGDZM6LCIq5iS/YkFERESF7/vvv4ePjw9UVFSgrKwsdThEVAIUiSsWREREVPh+++03dOjQAdWrV5c6FCIqAXjFgoiIqJSSyWRyScWrV6/4IFkiUhgTCyIiIkJ4eDjq1auH3r17Iy0tTepwiKgYYmJBREREePv2LR49eoTAwEBMnz5d6nCIqBhiYkFERESoX78+1qxZAwD4448/4O/vL3FERFTcMLEgIiIiAJ8fSjt27FgAQL9+/XDlyhWJIyKi4oSJBREREYnmzJmDtm3bIjk5GZ07d8bTp0+lDomIigkmFkRERCRSVlaGn58fbGxs8PDhQ4wZM0bqkIiomGBiQURERHIMDAywd+9euLm5YcmSJVKHQ0TFBB+QR0RERBnY2tpiz549UodBRMUIr1gQERHRN/n6+uLo0aNSh0FERRivWBAREVG2/Pz8MGDAABgaGuLChQuoWrWq1CERURHEKxZERESUrR9++AHfffcdXr9+DTc3NyQkJEgdEhEVQUwsiIiIKFsaGhrYtWsXzMzMcOvWLfTs2ROpqalSh0VERQwTCyIiIvomU1NT7NmzBxoaGjhw4ACmTJkidUhEVMQwsSAiIqIccXZ2xvr16wEAs2fPxrZt2ySOiIiKEiYWRERElGM9evSAl5cXAODhw4cSR0NERQlHhSIiIqJc+eOPP9C+fXs0atRI6lCIqAjhFQsiIiLKFSUlJbmkIjExEe/fv5cwIiIqCphYEBERkcJiYmLQqFEjDBo0CIIgSB0OEUmIiQUREREp7O7du7h+/Tr+/vtvLFiwQOpwiEhCTCyIiIhIYS1atMDChQsBAL/99hv+/fdfiSMiKn5OnTqFjh07okKFCpDJZNizZ0+Olz179ixUVFRQq1atAosvp5hYEBERUZ6MGDECAwYMQFpaGnr06IGIiAipQyIqVhITE+Ho6Ijly5fnark3b96gT58+aNmyZQFFljscFYqIiIjyRCaTYfny5bh9+zbOnDmDTp064cKFCzAwMJA6NKJioV27dmjXrl2ulxsyZAh69uwJZWXlXF3lKCi8YkFERER5pqamhp07d8LCwgKRkZEYMWKE1CERSe7t27dISEgQpw8fPuRb2xs2bMD9+/cxffr0fGszr5hYEBERUb4wNjbG3r170bBhQ8yaNUvqcIgkZ29vD319fXHy8fHJl3bv3LmDiRMnYsuWLVBRKTo3IBWdSIiIiKjYq1WrFk6fPg2ZTCZ1KESSCw8Ph5mZmfheXV09z22mpqaiZ8+emDFjBmxsbPLcXn5iYkFERET5Kn1SsXv3bpiYmMDFxUXCiIikoaurCz09vXxt8+3bt7h8+TKuXr0q3nKYlpYGQRCgoqKCI0eOoEWLFvm6zpxiYkFEREQFIiAgAN26dYOJiQkuXboEc3NzqUMiKvb09PRw/fp1ubIVK1bg+PHjCAgIgJWVlUSRMbEgIiKiAtK2bVvUqFED169fR+fOnXH69GloampKHRZRkfPu3TvcvXtXfB8VFYXQ0FCUKVMGFStWhJeXFx4/fozNmzdDSUkJDg4OcssbGxtDQ0MjQ3lhY+dtIiIiKhA6OjoIDAxE2bJlERISAg8PDwiCIHVYREXO5cuX4eTkBCcnJwCAp6cnnJycMG3aNABAbGwsYmJipAwxR5hYEBERUYGxsrJCQEAAVFRU4Ofnhzlz5kgdElGR06xZMwiCkGHauHEjAGDjxo0ICgrKcnlvb2+EhoYWSqzZYWJBREREBapZs2ZYunQpAGDSpEnYt2+fxBERUUFgYkFEREQFbsiQIRgyZAgEQcD58+elDoeICgA7bxMREVGhWLJkCdq1a4dOnTpJHQoRFQBesSAiIqJCoaqqKpdUpKSk4NOnTxJGRET5iYkFERERFboXL16gdevWGDt2rNShEFE+4a1QREREVOjOnz+PkydP4uTJk6hRowYGDBggdUhElEe8YkFERESFrkOHDvj9998BAMOGDcOZM2ckjoiI8oqJBREREUliypQp6NatGz5+/IguXboUiweAEVHWmFgQERGRJGQyGTZs2IBatWrh+fPncHNzQ2JiotRhEZGCmFgQERGRZLS1tbFnzx4YGRkhNDQUI0eOlDokIlIQEwsiIiKSVKVKlbBr1y7Y29tj/PjxUodDRAriqFBEREQkuUaNGiEsLAzKyspSh0JECuIVCyIiIioS0icVJ06cwPXr1yWMhohyi4kFERERFSl79+5F69at0alTJ7x48ULqcIgoh5hYEBERUZHSqFEjVKpUCdHR0eJwtERU9DGxICIioiKlTJkyCAwMhK6uLoKCgjB69GipQyKiHGBiQUREREVO9erVsXXrVshkMqxYsQKrVq2SOiQi+gYmFkRERFQkdezYEbNmzQIAjBw5EidPnpQ4IiLKDhMLIiIiKrImTJiAHj164NOnT9i5c6fU4RBRNvgcCyIiIiqyZDIZfH190apVK7i7u0sdDhFlg4kFERERFWmampro16+f+D4tLQ0AoKTEGy+IihL+jyQiIqJi4+3bt+jSpQtmzJghdShE9BVesSAiIqJi48iRIwgMDERgYCAcHBzQrVs3qUMiov/hFQsiIiIqNrp27QpPT08AQN++fXH16lWJIyKiL5hYEBERUbEyZ84ctGnTBsnJyXBzc8PTp0+lDomIwMSCiIiIihkVFRVs374dNjY2ePjwIbp27YqUlBSpwyIq9ZhYEBERUbFjYGCAvXv3Ql9fH2fPnsXo0aOlDomo1GNiQURERMWSra0t/Pz8YG5uzmdcEBUBHBWKiIiIiq127drhzp070NDQkDoUolKPVyyIiIioWEufVFy9ehV3796VMBqi0kvyxGL58uWwtLSEhoYG6tevj4sXL2ZZ9+bNm+jatSssLS0hk8mwaNGiwguUiIiIirQjR46gYcOGcHNzQ0JCgtThEJU6kiYW/v7+8PT0xPTp03HlyhU4OjqiTZs2ePbsWab1k5KSULlyZcyePRsmJiaFHC0REREVZQ4ODjA0NER4eDh69eqF1NRUqUMiKlUkTSwWLFiAgQMHol+/frC3t8eqVaugpaWF9evXZ1q/bt26mDdvHn7++Weoq6sXcrRERERUlFWoUAF79uyBuro69u/fj6lTp0odElGpIllikZKSgpCQELi6uv5/MEpKcHV1RXBwcL6t58OHD0hISBCnt2/f5lvbREREVLTUrVsXvr6+AAAfHx/4+flJHBFR6SFZYvHixQukpqaifPnycuXly5dHXFxcvq3Hx8cH+vr64mRvb59vbRMREVHR06tXL0yYMAEA0L9/f1y+fFniiIhKB8k7bxc0Ly8vxMfHi1N4eLjUIREREVEB+/PPP/H999/j/fv3WLlypdThEJUKkiUW5cqVg7KyMp4+fSpX/vTp03ztmK2urg49PT1x0tXVzbe2iYiIqGhSVlbG1q1bMXfuXKxZs0bqcIiyderUKXTs2BEVKlSATCbDnj17sq2/a9cutGrVCkZGRtDT04OLiwsOHz5cOMFmQ7LEQk1NDXXq1MGxY8fEsrS0NBw7dgwuLi5ShUVEREQlhL6+PsaPHw9lZWUAgCAIEARB4qiIMkpMTISjoyOWL1+eo/qnTp1Cq1atcPDgQYSEhKB58+bo2LEjrl69WsCRZk/SJ297enqib9++cHZ2Rr169bBo0SIkJiaiX79+AIA+ffrAzMwMPj4+AD53+P5yK1NKSgoeP36M0NBQ6OjowNraWrLtICIioqLtw4cPGDZsGBwcHDBmzBipwyGS065dO7Rr1y7H9b9+ltusWbMQGBiIffv2wcnJKZ+jyzlJE4vu3bvj+fPnmDZtGuLi4lCrVi0c+r/27j0qqnLvA/h3AzJhMApHBAPGCbkczOSiqCAeqWNiWmB5UikvoA4gWp7FOkpYai49aV4y02PgAalOgJQSsix7X7wdyahEhRS8ADEOFgOZKZCFIPv9g+W8TaCYzMwG5vtZa1buvZ+99++hHuM3z+2zz3QTujUaDSws/r9T5fvvv9f7YW3atAmbNm3C+PHjcfToUVOHT0RERD1ETk4Odu3aBQsLCwwdOhRhYWFSh0RkMK2trWhoaICDg4OkcUiaWADA4sWLsXjx4g6v/T5ZUCqV7MIkIiKiP2zmzJk4ePAgdu3ahRkzZuDrr7+Gl5eX1GFRL9fQ0KC3C7xMJjPKXmybNm1CY2Mjpk+fbvBn/xG9flUoIiIiIkEQsGPHDgQHB+P69esIDw/HtWvXpA6LermhQ4fqbXtwe3i/IWVmZmL16tX48MMPMXDgQIM//4+QvMeCiIiIyBRkMhlycnIwcuRIXLhwAZGRkdi/f79ucjeRoZWVlcHFxUV3bOjeit27d2PBggX46KOP9Dadlgp7LIiIiMhsODk5Yd++fbCxscFnn32GpKQkqUOiXszOzk5v2wNDJhZZWVmIjo5GVlYWpkyZYrDndgUTCyIiIjIrAQEBSE9Ph729fbf4lpeosbERxcXFKC4uBgBUVVWhuLgYGo0GQNuGz3PmzNGVz8zMxJw5c7B582aMHj0aWq0WWq0W169flyJ8HSYWREREZHZmzJiByspKTJw4UepQiFBUVAR/f3/d6qcJCQnw9/fHypUrAQA1NTW6JAMAdu7ciZaWFixatAiDBg3SfZYsWSJJ/LdxjgURERGZJXt7e92fy8vL0bdvX73x8ESmEhoaeteVT99991294+66zQJ7LIiIiMisHTt2DKNGjcIzzzyDX375RepwiHosJhZERERk1lxdXWFhYYETJ05ApVJxzyyi+8TEgoiIiMyau7s7PvroI1haWiIjIwMbN26UOiSiHomJBREREZm9xx9/HFu3bgUAvPzyy/jkk08kjoio52FiQURERAQgPj4eMTExEEURzz//PM6dOyd1SEQ9ChMLIiIiIgCCIGDbtm0YN24c6uvr8frrr0sdElGPwsSCyITKy8sRHBwMLy8vBAYGorS0tMNyaWlp8PT0xJAhQ6BSqdDc3AygbXk5Gxsb+Pn56T6/XcHkzJkzCA0NhY+PD3x8fJCTk2OSehER9RbW1tbYu3cvli1bhtTUVKnDIepRmFgQmVBsbCxiYmJw8eJFJCYmIioqql2ZqqoqrFixAgUFBaioqEBtbS127typu+7t7a3bnbO4uBg2NjYAgBs3biAiIgJr167FuXPncPbsWYwbN85UVSMi6jUcHR3xxhtvQCaTSR0KUY/CxILIROrq6lBUVIRZs2YBAKZNm4bq6mpUVFTolduzZw/Cw8Ph7OwMQRAQFxeHrKysTp+fmZmJMWPGICQkBABgaWkJR0dHw1eEiMiM3Lp1C4mJidi1a5fUoRB1e0wsiEykuroagwYNgpVV24b3giBAoVBAo9HoldNoNBg8eLDuWKlU6pWprKxEQEAAAgMDsWPHDt35srIyyGQyPPXUU/Dz88OcOXPwww8/GLlWRES92+7du7FhwwbExcXh+PHjUodD1K0xsSDqQQICAnD58mWcOnUKH3/8MZKTk/Hhhx8CAFpaWnDw4EGkpKTg9OnTcHFxwcKFCyWOmIioZ4uMjMS0adPQ3NyMZ599FtXV1VKHRNRtMbEgMhE3NzfU1NSgpaUFACCKIjQaDRQKhV45hUKBS5cu6Y7VarWujFwuR79+/QC07RQbGRmJgoIC3X2PPfYYXFxcIAgCZs2ahS+//NIUVevxujqp/jZRFPH444+jf//+euc3btyIYcOGYejQoXjmmWdw7do1I9WEiAzNwsIC7733Hnx9fVFXV4epU6fixo0bUodF1C0xsSAykYEDByIgIAAffPABAGDv3r1wdXWFh4eHXrlp06YhLy8PWq0WoigiOTkZM2fOBADU1NSgtbUVANDQ0ID9+/fD398fADB9+nScOHEC9fX1AIBPP/0Uvr6+pqpej2aISfUAsGXLFgwZMkTvXH5+PtLT01FYWIiysjKMGDECr7zyijGrQ0QG9uCDD2Lfvn1wdHTEqVOnMG/ePIiiKHVYRN0OEwsiE0pJSUFKSgq8vLywfv16pKenAwAWLFiAvLw8AIC7uztWr16NsWPHwsPDA46OjoiNjQXQlow8+uij8PX1xZgxY/DEE08gOjoaQFuPxfLlyxEcHIzhw4fj8OHDSE5OlqaiPYihJtWXlpYiNzcXL7/8st59JSUlCAkJgZ2dHQBg8uTJ+M9//mPkWhGRoQ0ePBh79+6FlZUVsrOzsW7dOqlDIup2mFgQmZC3tzcKCwtx8eJFFBUV4dFHHwUApKamIjw8XFdOpVKhsrISlZWVSEtLQ58+fQAAixcvRmlpKUpKSlBaWorXXnsNgiDo7ps9ezbOnj2Lb775BgcOHICbm5tpK9gDGWJSfXNzM1QqFVJSUmBpaal334gRI3Dw4EFdD1RGRgYaGhpw9epVI9eMiAxt3Lhx2LFjB2xsbODu7i51ON2CsYeS3hYVFQVBEDiUtJtjYkFE1EWrV6/Gs88+Cx8fn3bXHnvsMfzjH//AU089hTFjxuiWAL6dyBBRz6JSqXDhwgXdEFVzZ8yhpLfl5OTovmCj7o2JBRGZNUNMqv/vf/+Lbdu2QalUIiQkBPX19VAqlbrlfuPj41FUVISvvvoKoaGhcHV1hVwuN1ENicjQftsb/P333+PHH3+UMBrpGHsoKQDU1tbi9ddfx5tvvmncypBBMLEgIrNmiEn1BQUFuHTpEtRqNT7//HPI5XKo1Wpd70RNTQ2Att3RV65ciWXLlpmwhkRkLCdPnkRgYCCee+65dkN7zIGxh5ICbT1EGzZs0M1To+6NiQURmb2uTqrvzMSJE/HII4/A19cXISEhWLx4sdHqQkSmI5PJUF9fjyNHjiAhIUHqcHqkuw0lTU1NhUKhwOOPPy5BZHQ/OMiXqAsm5SdJHYLBffaE+a10cntS/e+lpqbqHatUKqhUqrs+S6lUtptceObMmS7HSETdz7Bhw5CRkYGpU6di+/btePTRRxETEyN1WCbz26GkVlZWdx1KWllZqTv+/VBSjUaD7du3o6WlRTeU9MSJEzhy5AiOHTuG/fv36+4dPnw49u3bp1tqnboX9lgQERER3afw8HCsXbsWALBo0SIcO3ZM4ohMx9hDSTMyMlBdXQ21Wg21Wg0A+Oabb5hUdGNMLIiIiIi6ICkpCTNmzEBLSwumTZumt9BDb2fsoaTUs3AoFBEREVEXCIKAXbt2oby8HKdOncLLL7+st+pRb2bsoaS/xd3Ouz/2WBARERF1Ud++fZGbm4t58+a126OByFywx4KIep2J6/ZJHYLB/W9ShNQhEFEn3NzckJaWJnUYRJJhjwV1C+Xl5QgODoaXlxcCAwNRWlraYbm0tDR4enpiyJAhUKlUunXDCwsL4efnBz8/PzzyyCOIjY1FU1NTp/cREREZgyiK2LRpE3JycqQOhchkmFhQtxAbG4uYmBhcvHgRiYmJiIqKalemqqoKK1asQEFBASoqKlBbW6vrbvb19cWJEydQXFyMM2fOoK6uDjt27Oj0PiIiImPIysrC0qVLMXv2bJSUlEgdDpFJMLEgydXV1aGoqAizZs0C0LYsXXV1NSoqKvTK7dmzB+Hh4XB2doYgCIiLi9NNjuvbty/69OkDALh58yZ++eUXCILQ6X1ERETGMH36dDzxxBO4ceMGIiIi8MMPP0gdEpHRcY4FSa66uhqDBg2ClVXbf46CIEChUECj0eitha3RaDB48GDdsVKphEaj0R2r1WpERESgsrISU6ZMQXx8/D3dR0REZGhWVlbIzs7GqFGjUFFRgb/97W/Iz8+HtbW11KHdVW/b+NUcN32VEnssqNdQKpUoKSmBVqtFU1MTx7USEZGk7O3tkZeXB7lcjmPHjuHFF1/kkqnUqzGxIMm5ubmhpqYGLS0tANomvGk0GigUCr1yCoVCb9MhtVrdrgwA2NraYubMmcjIyPhD9xERERmaj48PsrKyIAgCdu7ciXfeeUfqkIiMhokFSW7gwIEICAjABx98AADYu3cvXF1d9YZBAW1zL/Ly8qDVaiGKIpKTkzFz5kwAQEVFhW6lp5s3b+Ljjz/G8OHDO72PiIjI2CZPnoz169fD0tJSN/+PqDdiYkHdQkpKClJSUuDl5YX169cjPT0dALBgwQLk5eUBANzd3bF69WqMHTsWHh4ecHR0RGxsLADg8OHD8Pf3h6+vL/z9/eHk5IQVK1Z0eh8REZEpLF26FMXFxVi4cKHUoRAZDSdvU7fg7e2NwsLCdudTU1P1jlUqFVQqVbtyMTExiImJuePz73QfERGRKQiCgGHDhumOf/rpJ1hZWcHOzk7CqIgMiz0WRERERCZ0/vx5jB49GrNnz0Zra6vU4RAZDBMLIiIiIhO6fv06NBoN9u3bh1WrVkkdDpHBMLEgIiIiMqHRo0fj3//+NwBg7dq1yM7OljgiIsNgYkFERERkYrNnz8bSpUsBANHR0cjNzUVSUhIiIyORlJSE8vJyiSMkUzp27BiefvppPPTQQxAEAbm5uZ3ec/ToUQQEBEAmk8HDwwPvvvuu0ePsDCdvk1HUvT1B6hAMauBLB6UOgYiIepl169bh7NmzOHDgAJ555hlYWlpCFEUIgoANGzYgLS0NUVFRUodJJvDzzz/D19cX8+bNw7PPPttp+aqqKkyZMgVxcXHIyMjAoUOHsGDBAgwaNAhhYWEmiLhjTCyIiIiIJGBpaYk1a9bgwIEDAIBbt27pXZ8/fz5CQkLa7etEvc+TTz6JJ5988p7LJycn4+GHH8bmzZsBtG3E+Pnnn2PLli2SJhYcCkVEREQkkT179sDS0rLDa4IgIC0tzcQRkSE1NDSgvr5e92lqajLIcwsLCzFhgv7okLCwsA6X7jclJhZEREREElGr1RBFscNroihCrVabNiAyqKFDh6Jfv366z7p16wzyXK1WCycnJ71zTk5OqK+vxy+//GKQd9wPJhZERGakvLwcwcHB8PLyQmBgIEpLSzssl5aWBk9PTwwZMgQqlQrNzc33dO3MmTMIDQ2Fj48PfHx8kJOTY/Q6EfVkSqUSgiB0eE0QBCiVStMGRAZVVlaG69ev6z5JSUlSh2RUTCyIiMxIbGwsYmJicPHiRSQmJnY4MbSqqgorVqxAQUEBKioqUFtbi507d3Z67caNG4iIiMDatWtx7tw5nD17FuPGjTNl9Yh6nHnz5t21x2L+/PkmjogMyc7ODnK5XPeRyWQGea6zszNqa2v1ztXW1kIul8PGxsYg77gfTCyIiMxEXV0dioqKMGvWLADAtGnTUF1djYqKCr1ye/bsQXh4OJydnSEIAuLi4pCVldXptczMTIwZMwYhISEA2iamOjo6mrCGRD2Pp6cn0tLSYGFhAUtLS71/pqWlceI2dSgoKAiHDh3SO5efn4+goCCJImrDVaGIiMxEdXU1Bg0aBCurtr/6BUGAQqGARqPR++VFo9Fg8ODBumOlUgmNRtPptbKyMshkMjz11FO4fPkyhg8fjs2bNzO5IOpEVFQUQkJCkJaWBrVaDaVSifnz5zOpMCONjY16X/JUVVWhuLgYDg4OUCgUSEpKwnfffYf3338fABAXF4ft27dj2bJlmDdvHg4fPowPP/wQn3zyiVRVAMAeCyIiMpCWlhYcPHgQKSkpOH36NFxcXLBw4UKpw6JuxJhzfNRqNUJDQ9GvXz/4+fmZojp39UfrGhYWhitXruD999/HunXr4OHhcd9znajnKSoqgr+/P/z9/QEACQkJ8Pf3x8qVKwEANTU1ui9xAODhhx/GJ598gvz8fPj6+mLz5s1ITU2VdKlZgIkFEZHZcHNzQ01NDVpaWgC0jd/WaDRQKBR65RQKBS5duqQ7VqvVujKdXXvsscfg4uICQRAwa9YsfPnll8auFvUgxpzjI5fLsXbtWmRmZpqySndkzLre7Rr1TKGhoRBFsd3n9m7a7777Lo4ePdruntOnT6OpqQmVlZXdYjNFJhZERGZi4MCBCAgIwAcffAAA2Lt3L1xdXdsNt5g2bRry8vKg1WohiiKSk5Mxc+bMTq9Nnz4dJ06cQH19PQDg008/ha+vrwlrSN2Zsef4ODg4ICQkBA8++KBpK9YBY9f1bteIpMTEgojIjKSkpCAlJQVeXl5Yv3490tPTAQALFixAXl4eAMDd3R2rV6/G2LFj4eHhAUdHR8TGxnZ6TaFQYPny5QgODsbw4cNx+PBhJCcnS1NR6nbuNsfnt+53jk93Yuy69pSfA5kfTt4mIjIj3t7eHe7MmpqaqnesUqmgUqk6fMbdrs2ePRuzZ8/ueqBERNTjsMeCiIiIjM7Yc3y6E1PMZ+oJPwcyP0wsiIiIyOiMPcenOzF2XXvKz4HMDxMLIiIiMgljzvG5ceMGXF1d8dxzz6GsrAyurq5ISkqSpqIwbl3vdo1ISoJ4p33ke6nLly/Dzc0N1dXVcHV1Nfn7J67bZ/J3GtP/JkV0eL7u7QkmjsS4Br50sMPzk/Kl+5+WsXz2xDqpQ+iy3tbOgDu3NXPBtkZkGr2trUnVzqT+fVMq7LEgIiIiIqIu6xarQv3rX//Cxo0bodVq4evri23btmHUqFF3LP/RRx9hxYoVUKvV8PT0xBtvvIHJkyebMGIiou7BXHoHiYio+5O8xyI7OxsJCQlYtWoVTp06BV9fX4SFhaGurq7D8l988QUiIyMxf/58nD59GlOnTsXUqVNx9uxZE0dORERERES3SZ5YvPnmm1CpVIiOjsbQoUORnJyMvn37YteuXR2W37p1KyZNmoSlS5fCx8cHa9asQUBAALZv327iyImIiIiI6DZJh0LdvHkTJ0+e1Fu1wcLCAhMmTOhwAycAKCwsREJCgt65sLAw5Obmdli+qakJTU1NuuPr168DAGpqaroY/f359foVSd5rLJcvX+7w/JVrv5o4EuO6eYd6/vpDvYkjMb47/TvtSXpbOwPY1tjWuqfnt/+P1CEYXObiMKlDkFRva2tStbPbv2e2trZK8n6pSJpYXLlyBbdu3YKTk5PeeScnJ5w/f77De7RabYfltVpth+XXrVuH1atXtzt/tzkcdO/c3pA6AhNZ5SZ1BCbjhh1Sh0AdYFvrfdjWuiezaWtmQup2Vltba1abF3aLydvGlJSUpNfD0dLSgnPnzsHNzQ0WFpKPBKMuaGhowNChQ1FWVgY7OzupwyHqtdjWiEyDba33aG1tRW1tLfz9/aUOxaQkTSwGDBgAS0tL1NbW6p2vra2Fs7Nzh/c4Ozv/ofIymQwymUzv3NixY7sQNXUX9fVt3bUuLi6Qy+USR0PUe7GtEZkG21rvYk49FbdJ+pW9tbU1RowYgUOHDunOtba24tChQwgKCurwnqCgIL3yAJCfn3/H8kREREREZHySD4VKSEjA3LlzMXLkSIwaNQpvvfUWfv75Z0RHRwMA5syZAxcXF6xb17Zz4pIlSzB+/Hhs3rwZU6ZMwe7du1FUVISdO3dKWQ0iIiIiIrMmeWIxY8YM/PDDD1i5ciW0Wi38/Pzw2Wef6SZoazQavbkQwcHByMzMxKuvvorly5fD09MTubm5GDZsmFRVIInIZDKsWrWq3VA3IjIstjUi02Bbo55OEEVRlDoIIiIiIiLq2bgsEhERERERdRkTCyIiIiIi6jImFkRERERE1GVMLMigQkND8fe//12y90dFRWHq1KndJh4iIiIic8HEgnq1nJwcrFmzRuowiEzKlAn1zp07ERoaCrlcDkEQcO3atXZlrl69ihdeeAFyuRz9+/fH/Pnz0djYaJL4iAylJ7arb775BuPGjcMDDzwANzc3bNiwwSTxk/liYkG9moODA+zs7KQOg6jXunHjBiZNmoTly5ffscwLL7yA0tJS5OfnY//+/Th27BhiYmJMGCVRz2KIdlVfX4+JEydi8ODBOHnyJDZu3IjXXnuN+36RcYlEBjR+/Hhx0aJF4qJFi0S5XC7+6U9/El999VWxtbVVFEVRfP/998URI0aItra2opOTkxgZGSnW1tbq7r969ar4/PPPiwMGDBAfeOAB0cPDQ9y1a5fuukajEZ977jmxX79+or29vRgeHi5WVVXprs+dO1eMiIjQi2fJkiW648GDB4v//Oc/xejoaNHW1lZ0c3MTU1JS9OrQ2TuIurO5c+eKAPQ+VVVV4tGjR8XAwEDR2tpadHZ2FhMTE8Xm5mbdfZ213c4cOXJEBCD+9NNPeufLyspEAOKJEyd05w4cOCAKgiB+9913BqkzkbH1xHa1Y8cO0d7eXmxqatKVSUxMFL29vbvwkyC6O/ZYkMG99957sLKywtdff42tW7fizTffRGpqKgCgubkZa9asQUlJCXJzc6FWqxEVFaW7d8WKFSgrK8OBAwdw7tw5vPPOOxgwYIDu3rCwMNjZ2aGgoADHjx+Hra0tJk2ahJs3b95zfJs3b8bIkSNx+vRpxMfHY+HChbhw4YJB30Ekla1btyIoKAgqlQo1NTWoqalBnz59MHnyZAQGBqKkpATvvPMO0tLSsHbtWr1779Z271dhYSH69++PkSNH6s5NmDABFhYW+Oqrr7r0bCJT6YntqrCwEH/5y19gbW2tKxMWFoYLFy7gp59+6tL7ie5E8p23qfdxc3PDli1bIAgCvL29cebMGWzZsgUqlQrz5s3TlXN3d8fbb7+NwMBANDY2wtbWFhqNBv7+/rq/LJVKpa58dnY2WltbkZqaCkEQAADp6eno378/jh49iokTJ95TfJMnT0Z8fDwAIDExEVu2bMGRI0fg7e1tsHcQSaVfv36wtrZG37594ezsDAB45ZVX4Obmhu3bt0MQBPz5z3/G999/j8TERKxcuRIWFm3fMd2t7d4vrVaLgQMH6p2zsrKCg4MDtFrt/VeUyIR6YrvSarV4+OGH9co4OTnprtnb29/3+4nuhD0WZHBjxozR/VIOAEFBQSgvL8etW7dw8uRJPP3001AoFLCzs8P48eMBABqNBgCwcOFC7N69G35+fli2bBm++OIL3XNKSkpQUVEBOzs72NrawtbWFg4ODvj1119RWVl5z/ENHz5c92dBEODs7Iy6ujqDvoOoOzl37hyCgoL02uXYsWPR2NiIy5cv687dre2+/vrrujZx+0sAInPGdkXUHnssyGR+/fVXhIWFISwsDBkZGXB0dIRGo0FYWJhumNGTTz6JS5cu4dNPP0V+fj7++te/YtGiRdi0aRMaGxsxYsQIZGRktHu2o6PjPcfRp08fvWNBENDa2goABnsHUW8TFxeH6dOn644feuihe7rvt4n7bS0tLbh69arum18ic2XMduXs7Iza2lq9MreP2fbIWJhYkMH9ftz0l19+CU9PT5w/fx4//vgj1q9fDzc3NwBAUVFRu/sdHR0xd+5czJ07F+PGjcPSpUuxadMmBAQEIDs7GwMHDoRcLjdK7KZ4B5GxWVtb49atW7pjHx8f7N27F6Io6r45PX78OOzs7ODq6qord6e2a2lpCQcHBzg4OPzhWIKCgnDt2jWcPHkSI0aMAAAcPnwYra2tGD169P1Uj0gSPa1dBQUF4ZVXXkFzc7PuC7X8/Hx4e3tzGBQZDYdCkcFpNBokJCTgwoULyMrKwrZt27BkyRIoFApYW1tj27Zt+Pbbb5GXl9duj4mVK1di3759qKioQGlpKfbv3w8fHx8AbUvrDRgwABERESgoKEBVVRWOHj2Kl156Sa/buStM8Q4iY1Mqlfjqq6+gVqtx5coVxMfHo7q6Gi+++CLOnz+Pffv2YdWqVUhISNCNAwfu3HbvRqvVori4GBUVFQCAM2fOoLi4GFevXgXQ9svXpEmToFKp8PXXX+P48eNYvHgxZs6cec/fzhJ1Bz2tXT3//POwtrbG/PnzUVpaiuzsbGzduhUJCQlG+gkRgcvNkmGNHz9ejI+PF+Pi4kS5XC7a29uLy5cv1y2tl5mZKSqVSlEmk4lBQUFiXl6eCEA8ffq0KIqiuGbNGtHHx0e0sbERHRwcxIiICPHbb7/VPb+mpkacM2eOOGDAAFEmk4nu7u6iSqUSr1+/LorivS03u2XLFr2YfX19xVWrVt3zO4i6uwsXLohjxowRbWxs/tCymHdru3eyatWqdstwAhDT09N1ZX788UcxMjJStLW1FeVyuRgdHS02NDQYq/pERtET21VJSYkYEhIiymQy0cXFRVy/fr1BfyZEvyeIoihKktEQEVG3ERoaCj8/P7z11ltSh0LUa7BdkbnhUCgiIiIiIuoyJhZERERERNRlHApFRERERERdxh4LIiIiIiLqMiYWRERERETUZUwsiIiIiIioy5hYEBERERFRlzGxICIiIiKiLmNiQUREREREXcbEgoiIiIiIuoyJBRERERERdRkTCyIiIiIi6rL/A9TUDgVpX/SCAAAAAElFTkSuQmCC", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Top-100 semantic ablation — full prediction shift:\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - "
\n", - "
Input Sentence:
\n", - "
Fact: the capital of the state containing Dallas is
\n", - " \n", - "
\n", - "
Original Top 5 Tokens
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TokenProbabilityDistribution
Austin0.414\n", - "
\n", - "
\n", - " 41.4%\n", - "
\n", - "
Texas0.056\n", - "
\n", - "
\n", - " 5.6%\n", - "
\n", - "
the0.056\n", - "
\n", - "
\n", - " 5.6%\n", - "
\n", - "
not0.056\n", - "
\n", - "
\n", - " 5.6%\n", - "
\n", - "
Fort0.044\n", - "
\n", - "
\n", - " 4.4%\n", - "
\n", - "
\n", - " \n", - "
New Top 5 Tokens
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TokenProbabilityDistribution
the0.154\n", - "
\n", - "
\n", - " 15.4%\n", - "
\n", - "
not0.106\n", - "
\n", - "
\n", - " 10.6%\n", - "
\n", - "
called0.064\n", - "
\n", - "
\n", - " 6.4%\n", - "
\n", - "
a0.057\n", - "
\n", - "
\n", - " 5.7%\n", - "
\n", - "
Texas0.044\n", - "
\n", - "
\n", - " 4.4%\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - "
Key Tokens
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TokenOriginalNewChange
▁Austin0.41410.0012\n", - "
\n", - "
\n", - " -99.7%\n", - "
\n", - "
▁Dallas0.03000.0004\n", - "
\n", - "
\n", - " -98.6%\n", - "
\n", - "
▁Texas0.05590.0442\n", - "
\n", - "
\n", - " -21.0%\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ + "from IPython.display import display, Markdown\n", + "\n", "# Progressive ablation of semantic-target features\n", "sem_groups = {\"baseline\": {\n", " \"P(Austin)\": probs_base[idx_x].item(),\n", @@ -2556,7 +694,7 @@ "\n", "# Show the full top-k comparison for the strongest ablation\n", "strongest_n = max(sem_ablation_results.keys())\n", - "print(f\"\\nTop-{strongest_n} semantic ablation — full prediction shift:\")\n", + "display(Markdown(f\"#### Top-{strongest_n} semantic ablation — full prediction shift\"))\n", "display_topk(prompt, original_logits, sem_ablation_results[strongest_n])" ] }, @@ -2575,7 +713,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -2601,40 +739,11 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": { "id": "GmKhWpuUmS8n" }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Open your graph at: http://localhost:8046/index.html\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "from circuit_tracer.frontend.local_server import serve\n", "\n", @@ -2658,19 +767,7 @@ "metadata": { "id": "uCo4FSQwqcBl" }, - "outputs": [ - { - "ename": "", - "evalue": "", - "output_type": "error", - "traceback": [ - "\u001b[1;31mThe Kernel crashed while executing code in the current cell or a previous cell. \n", - "\u001b[1;31mPlease review the code in the cell(s) to identify a possible cause of the failure. \n", - "\u001b[1;31mClick here for more info. \n", - "\u001b[1;31mView Jupyter log for further details." - ] - } - ], + "outputs": [], "source": [ "# server.stop()" ] From 440f097a4dd68b82eae6b1b35bfe3fe6871d7adc Mon Sep 17 00:00:00 2001 From: Daniel Dale Date: Thu, 19 Feb 2026 18:19:43 -0800 Subject: [PATCH 16/18] cleanup language and formatting, prettify with banner --- demos/attribution_targets_demo.ipynb | 88 +++++++++++++----- .../attribution_targets_banner.png | Bin 0 -> 37905 bytes 2 files changed, 63 insertions(+), 25 deletions(-) create mode 100644 demos/img/attribution_targets/attribution_targets_banner.png diff --git a/demos/attribution_targets_demo.ipynb b/demos/attribution_targets_demo.ipynb index 8d25c9cc..b2105835 100644 --- a/demos/attribution_targets_demo.ipynb +++ b/demos/attribution_targets_demo.ipynb @@ -1,17 +1,31 @@ { "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "![Attribution Targets](https://raw.githubusercontent.com/speediedan/circuit-tracer/attribution-targets/demos/img/attribution_targets/attribution_targets_banner.png)" + ] + }, { "cell_type": "markdown", "metadata": { "id": "Qa5r1-7RmS8j" }, "source": [ - "# Attribution Targets \n", - "\n", - "\n", - " \"Open\n", - "\n", + "# Attribution Targets\n", "\n", + "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/speediedan/circuit-tracer/blob/attribution-targets/demos/attribution_targets_demo.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ "This tutorial walks through the **attribution targets API**, demonstrating how to attribute back from arbitrary tokens, functions thereof, or abstract concept directions in the residual stream.\n", "\n", "The `AttributionTargets` class (in `circuit_tracer.attribution.targets`) accepts four input formats:\n", @@ -20,12 +34,12 @@ "|---|---|---|\n", "| `None` | Salient logits | Auto-selects the most probable next tokens via `max_n_logits` / `desired_logit_prob` (default) |\n", "| `Sequence[str]` | Token strings | Attribute from explicitly named tokens, e.g. `[\"▁Austin\", \"▁Dallas\"]` |\n", - "| `Sequence[TargetSpec]` | Custom target | Attribute from arbitrary residual-stream directions via a sequence of `CustomTarget(token_str, prob, vec)` namedtuples or raw `tuple[str, float, Tensor]` |\n", + "| `Sequence[TargetSpec]` | Custom target | Attribute from arbitrary residual-stream directions via `CustomTarget(token_str, prob, vec)` |\n", "| `torch.Tensor` | Token ID tensor | Attribute from specific vocabulary indices |\n", "\n", "See the expandable reference below for `CustomTarget` / `TargetSpec` field descriptions and examples.\n", "\n", - "We use the capital-city prompt from the other demos: the model must resolve *\"capital of the state containing Dallas\"* via multi-hop reasoning (Dallas → Texas → Austin). After comparing the top features discovered under each mode, we run some relevant causal interventions." + "We use the capital-city prompt from the other demos: the model must resolve *\"capital of the state containing Dallas\"* via multi-hop reasoning (Dallas → Texas → Austin). After comparing the top features discovered under each mode, we run causal interventions to confirm the findings." ] }, { @@ -76,6 +90,8 @@ "metadata": {}, "outputs": [], "source": [ + "#@title Colab Environment Setup { display-mode: \"form\" }\n", + "\n", "import sys\n", "\n", "IN_COLAB = \"google.colab\" in sys.modules\n", @@ -109,6 +125,8 @@ }, "outputs": [], "source": [ + "# @title Imports { display-mode: \"form\" }\n", + "\n", "from functools import partial\n", "\n", "import torch\n", @@ -149,7 +167,7 @@ "source": [ "model_name = \"google/gemma-2-2b\"\n", "transcoder_name = \"gemma\"\n", - "backend = \"nnsight\" # change to 'nnsight' for the nnsight backend!\n", + "backend = \"transformerlens\" # change to 'nnsight' for the nnsight backend!\n", "model = ReplacementModel.from_pretrained(\n", " model_name, transcoder_name, dtype=torch.bfloat16, backend=backend\n", ")" @@ -187,7 +205,7 @@ "\n", "$$\\mathbf{d}_{\\text{concept}} = \\frac{1}{n}\\sum_{i=1}^{n} \\mathbf{r}_i$$\n", "\n", - "**Intuition:** Raw capital-city vectors (Austin, Sacramento, …) are partially explained by their shared geography with their respective states (Texas, California, …). Projecting away the state component leaves a representation of *\"capital-ness\"* that is independent of specific geography. Attributing toward $\\mathbf{d}_{\\text{concept}}$ reveals features the model uses to execute the abstract *capital-of* relation — a strictly more targeted lens than a single logit difference or token string target." + "**Intuition:** Raw capital-city vectors (Austin, Sacramento, …) are partially explained by their shared geography with their respective states (Texas, California, …). Projecting away the state component leaves a representation of *\"capital-ness\"* that is independent of specific geography. Attributing toward $\\mathbf{d}_{\\text{concept}}$ reveals features the model uses to execute the abstract *capital-of* relation in this context — a strictly more targeted lens than a single logit difference or token string target." ] }, { @@ -276,11 +294,18 @@ "source": [ "## Attribution Configuration\n", "\n", - "Define the prompt, shared attribution parameters, and the three reference tokens (`▁Austin`, `▁Dallas`, `▁Texas`). Two custom targets are built here to probe distinct aspects of the model's reasoning:\n", + "Define the prompt, shared attribution parameters, and the three reference tokens (`▁Austin`, `▁Dallas`, `▁Texas`). This demo explores **four different modes** of specifying attribution targets:\n", + "\n", + "1. **Automatic Salient Logit Targets** (`None`) — the default mode; auto-selects the most probable next tokens.\n", + "2. **Token-String Targets** (`Sequence[str]`) — attribute from explicit token surface forms.\n", + "3. **Custom Logit Difference Target** (`CustomTarget`) — encodes the `logit(Austin) − logit(Dallas)` direction.\n", + "4. **Semantic Direction / Concept Target** (`CustomTarget`) — encodes an abstract *Capitals − States* concept direction.\n", + "\n", + "Two custom targets are built here to probe distinct aspects of the model’s reasoning:\n", "\n", - "- **Custom logit-difference target** (`build_custom_diff_target`) — encodes the direction `logit(Austin) − logit(Dallas)` in the residual stream. Because the attribution graph is anchored to this *contrast* direction, it surfaces features that specifically drive the model to choose Austin *over* Dallas, not merely any feature that increases Austin's probability in isolation.\n", + "- **`build_custom_diff_target`** — encodes the direction `logit(Austin) − logit(Dallas)` in the residual stream. Because the attribution graph is anchored to this *contrast* direction, it surfaces features that specifically drive the model to choose Austin *over* Dallas, not merely any feature that increases Austin’s probability in isolation.\n", "\n", - "- **Semantic concept target** (`build_semantic_concept_target`) — encodes an abstract *Capitals − States* direction built from four (capital, state) pairs via vector rejection. This target captures the general *capital-of* reasoning circuit rather than the specific Austin-vs-Dallas instance, offering a complementary view of the multi-hop chain." + "- **`build_semantic_concept_target`** — encodes an abstract *Capitals − States* direction built from four (capital, state) pairs via vector rejection. We can generate an attribution graph associated with this *capital-of* sense and exploit this target to amplify or dampen logits along this semantic axis." ] }, { @@ -347,7 +372,7 @@ "id": "RUn1YKnUmS8l" }, "source": [ - "## Automatic Target Selection — Salient Logits (`None`)\n", + "### Automatic Target Selection — Salient Logits (`None`)\n", "\n", "When `attribution_targets` is `None` (the default), `AttributionTargets` auto-selects the most probable next tokens until `desired_logit_prob` cumulative probability is reached (capped at `max_n_logits`). This is the standard mode used by `attribute_demo.ipynb`." ] @@ -378,7 +403,7 @@ "id": "w3cdLLfJmS8m" }, "source": [ - "## Token-String Targets — `Sequence[str]`\n", + "### Token-String Targets — `Sequence[str]`\n", "\n", "Pass a list of token strings (e.g., `[\"▁Austin\", \"▁Dallas\"]`) to focus attribution on exactly those logits. Internally, each string is tokenized and its softmax probability and unembedding vector are computed automatically — you only need to supply the surface forms." ] @@ -409,9 +434,13 @@ "id": "EQuFE-eimS8m" }, "source": [ - "## Custom Logit-Difference Target — `Sequence[TargetSpec]`\n", + "### Custom Targets — `Sequence[TargetSpec]`\n", "\n", - "Pass a `CustomTarget` (or any `TargetSpec` — a tuple of `(token_str, prob, vec)`) that encodes an arbitrary direction in the residual stream. Here the direction is `logit(Austin) − logit(Dallas)`, so we notionally construct an attribution graph more narrowly driving the selection of the *correct* answer over the surface-level attractor." + "Pass a `CustomTarget` (or any `TargetSpec` — a tuple of `(token_str, prob, vec)`) that encodes an arbitrary direction in the residual stream.\n", + "\n", + "#### Logit Difference Target\n", + "\n", + "Here the direction is `logit(Austin) − logit(Dallas)`, constructing an attribution graph that more narrowly surfaces features driving the selection of the *correct* answer over the surface-level attractor." ] }, { @@ -435,9 +464,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Semantic Direction — Concept Target\n", + "#### Semantic Direction (Concept Target)\n", "\n", - "Instead of a pairwise logit difference, we can attribute to an **abstract concept direction** in the residual stream. We build a `CustomTarget` for an abstract concept direction via vector rejection. For each (capital, state) pair, project the capital vector onto the state vector and subtract that projection, leaving pure \"capital-ness\"." + "Instead of a pairwise logit difference, we can attribute to an **abstract concept direction** in the residual stream. We build a `CustomTarget` via vector rejection: for each (capital, state) pair, project the capital vector onto the state vector and subtract that projection, leaving the pure 'capital-ness' component." ] }, { @@ -466,7 +495,7 @@ "source": [ "## Compare Top Features\n", "\n", - "Extract the top-10 features from each graph (ranked by multi-hop influence) and display them side by side. Feature indices link to their [Neuronpedia](https://www.neuronpedia.org/) dashboards. The *Custom Target* column highlights features that specifically drive the Austin-vs-Dallas logit difference — the multi-hop reasoning circuit (Dallas → Texas → capital → Austin)." + "Extract the top-10 features from each graph (ranked by multi-hop influence) and display them side by side. Feature indices link to their [Neuronpedia](https://www.neuronpedia.org/) dashboards. The *Custom Target* column highlights features that specifically drive the Austin-vs-Dallas logit difference — the multi-hop reasoning circuit (Dallas → Texas → capital → Austin). The *Concept Target* column surfaces features associated with the more general *capital-of* relation, which partially overlaps with the multi-hop chain but also includes distinct features that may reflect more abstract capital-related reasoning." ] }, { @@ -499,15 +528,24 @@ ")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Circuit Interventions\n", + "\n", + "Having identified the top features for each attribution mode example, we can now run interventions, manipulating the discovered features to bolster our credence in their hypothesized causal roles. We explore both amplification and ablation of the logit-difference and semantic concept circuits." + ] + }, { "cell_type": "markdown", "metadata": { "id": "98579UbGmS8m" }, "source": [ - "## Amplify the Austin-Dallas Custom Difference Circuit\n", + "### Amplify the Austin-Dallas Logit Difference Circuit\n", "\n", - "To confirm the custom-target features are causally meaningful, we amplify them by 10× and check that the Austin-vs-Dallas logit gap widens (i.e., the model becomes even more confident Austin is correct)." + "To confirm these custom-target features are causally meaningful, we amplify them by 10× and check that the Austin-vs-Dallas logit gap widens (i.e., the model becomes even more confident Austin is correct)." ] }, { @@ -544,7 +582,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Amplify the Semantic Concept Circuit\n", + "### Amplify the Semantic Concept Circuit\n", "\n", "Same amplification test for the **semantic concept** features. We compare a modest 2× boost (a gentle nudge along the concept axis) with a strong 10× boost to observe the difference in behaviour." ] @@ -595,9 +633,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Ablate the Austin-Dallas Custom Difference Circuit\n", + "### Ablate the Austin-Dallas Logit Difference Circuit\n", "\n", - "Now do the opposite: zero out progressively more features important to our custom target to dampen the Austin-driving circuit. With enough of the multi-hop reasoning path suppressed, the model can no longer resolve the correct answer and reverts to nearby concepts — the intermediate state (Texas) rather than its capital." + "Now we do the opposite: zero out progressively more features important to our custom target to dampen the Austin-driving circuit. With enough of the multi-hop reasoning path suppressed, the model can no longer resolve the correct answer and reverts to nearby concepts — e.g. the intermediate state (Texas) rather than its capital." ] }, { @@ -649,7 +687,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Ablate the Semantic Concept Circuit\n", + "### Ablate the Semantic Concept Circuit\n", "\n", "Same progressive ablation, now zeroing out features from the **semantic concept** graph. Because the concept direction captures the capital-vs-state pathway, ablation should similarly collapse the Austin signal." ] diff --git a/demos/img/attribution_targets/attribution_targets_banner.png b/demos/img/attribution_targets/attribution_targets_banner.png new file mode 100644 index 0000000000000000000000000000000000000000..c256b3f40e3ff43619d86a87e831e52f72d4eafa GIT binary patch literal 37905 zcmb@uhdbNv7d{?ZTC_!M)hbPt2x_&p)o6*mBKD?6YtPzL#TH`Ks)`vS_NMlxX4NXC zs#a0d{=K|E-{1HHe%Ix4xn7ZYp68tBKKHrLIne5=FK?4GlY>B@+lmTunjp~ia^UCQ zo20<^f2@p7AW*_rMY-qNUg=xs-f@r_zeO_tc?otw0YQO>f)Do!8jCkd#fyu+%-7u{ zzRnEBG0D_Cj9|yn6;QwK$!2=>GYjisBWGJzV>mn*W823cG7}Rf%c$sNmFOX7E2j=3 zWJ@m>kTz`eHk7q&^ov#Q4c`nnecu!5wS6&nUjP1WdTT7xXsUD5--wR+#h%vI#cMZ% zAN>FQ_3C!j3rEm%s_XxK_5E>wGF@`=mCt^>QSlAM)w2m=GWh@bv#+kT_ri1(?5KfX z9{T@0)}YPRfpt2`w~8rRU%eI`dY%6Nd}0-&)Ay;V}ymnoIH)xYrV+hVxu2M(Wc zvgrCXv7hUIrB!Vollp#qiV7!!bOA@IPe$Us`luv|Dxhe^HA~AnzLOnz`kVY}>)_`c z*_OSzwBLF1+DBEvO5XU)(#@fv?}TBuLPdUSfu?wgUl?;p+tnLFWQ_~MiyybcZl;dz z(dt16H-o8mixU6w#pGUWZ-hJym)U z2WYw;FRp1Xn+6wNq8B;9sjy2DYwDVH>adXEAH>Vs!h?T3&^GXzTRducAo1nHFmc=T zNQ&QiM{>Ur9QhbkQ*^BcbL4|>p^yEn&O5bHb-gDe-KPQXH1*`D0E^@dQBg_?aFKXL z7Cx@RbpDnLA`y!1#Kd3~Z5$mo-+*dWOAB3xRzBCq*Ptww!6bxEDGfL}b zN6S-1ztq2tM+ZoP1Mn%};yl82G_*s>fe2Cyc!?VADmJ1n;^8~G#1@weRdD2k z-=Rf`#LMg4154}_fn|exV6EZV?VVN>Rw5wh>HKADrkKCzNfLPL%B-(6+ZBo)=qs9Q zaIq=E>E!CIk{B=cMyW9d*PmAjQLkX_h`E1B#w?PD`Y%U7=%07WmAsY&nZjnVleg|U zXQ>U)g9L=;p*WBuKV2Al!A%Mj*UK)aU`j1_E$&{r9aj4}1WM57dPfr18R6K}tpuNC zY+m1-I~gVgrEf4*WSGX!vER&@Dbz3)h6Tw3i56TpV5!MpKcxIQ}NVz;z|6`CmLwd zX12q2>R7RS^(eWR7OC{km^-$osB`Fpp1JpP7r;c;a#X15{$l+TQ~C7(9Gn0wIwPm~lqU1@2Kut_^T^Xz%ZGr2ffw!!@74adVYjIxD>A{!BKGB+8PiPmEo7Ah5sX73Kv zgp4_~@ampD|#0kQh_2_A4iMETws5V*d4M{qBUcXRGzEx?^*`)GF;kTfE@>wUFUiH?4gG zmiF%E`2)Ji9#!k{YpoLAXX(Fxpbvwt(|@VypyeP8$~l@r$JCpDX3^^OR>np`vJfe{6ar1Ug|Mf-{t+R)o~^817y(bMi}-eJ zK+Tc_&3z$=1%p-Il|aK|@p~bWaFAx|DzqpagIIUGeem}YlmOz6CTY{M?CVTKy6o_6 zZ}pN)Oh`|+4Mm?HFh3n9QN#}eG3U!z_>Jt#^O?pcitZa-n5_qHA4iij40Jw(zW6u; zP4arMTJGmvADEn?34IepQiUX^0MPb^DjFB8>_z}zyZ*)9M$EUYVLeE>MqB73tU|7C zLLidk;?wRANcGL&n+Owmtgfmvoed>?{6|YUI8qZOXQZL1GueJDSsajOmg_sNT@ChR z-f(zeT!NAh~hQ@0@ulm%9L38Qp|30LsPfQy+Ivgq4?&Nh6H8R(b z!h40Y6LNo8Ho~&@_h&vnpUq%fL8b^Lxrv=LR7~sEJmomq$L{bb@&x}9hpcl?jye)O0y{R|%QL?c)0-JlCUK$(; zL5f4z2uJjuzgOOo_zLAPj?Zv4RLYMhuJV{cEf6ABXJ!f#7@Qmx${ z7U>vNSDR8`N-?1DG#-|x`_5B*mNIn2;Zh1KrF#8puPQM)g}ei>WniAYf{dlgVg3yv zm1{UccmZ&oe}{1vUTh`21nlYh`it@QQ_PU)BTwfs&MW70P{SnYn;2tbXJ-Q>h-M}b z;9~4``q5NesB{yUn>UE0;QvOK4|5I8Of6;i)hv!B!gA@KSFuX2bL-gR*vl-JVGhIt zO31!9vDyd);>err8|mJo^b6DgZ!M zfS=VYOlgz#e${*-XRS6QiH5g&Fvs2a)b+jEg)ZP`Il}6jesuXwMZ!UJ$(=dID7go) z!Xo|eJtNvMx?=pE#goNZ$~I_4lN?q{lY}3;aewsoZACQR8UrBQjWF%|J%=j$N5&Ff zPfn7khi;L3X~Uwsho6 z3@Gs9Mhk$KiUQGexZFR=FI1j*S1ZIjJKFo{9T2qYOXnO-yu4)&cI7sti6i483Iq0+ zLIQp^ZFLtJsv5BjS(ouRLUm)F&V87?B{8<5N|M@1{;-g?+mQ)9i2!^+0L+bV1D9nV zn}x-x)J`X3)$FXp1g}jedzy=xY24d6Yb*bTCIdAa=q8vOeAdM4K$949=FB;+jTh4l zKYp~QHM6~0jZ>NRcW^0JpNVxYB#HaV9{wUnU9tevLXOd`#=rRWs6rcp{6$MohCh5i zB78CQl9GF$VoiQ$#@7U?_D*V%L22vU63YsB(;uv!06(r{{=xZhcf`w(zk`8!8+C-5 z58Vwkk^cSfO%wo zoNcn6y;)OmdtQgRcA%g196?qXRent)UdP`lo}|r{kWi2VyXmjNg%!wM3o%<1xGxmpW}dIcF%q9Hy4INn_L zvqcdFKWXl~%;w>Xe=UsFxrAqevJ&Uk=+E{6E6^H{(i;`4#!3*eeA3Nied{mZtS+jP zRkcibg2Pj(*12b159jJIdeh}mBGpH@rO>OoPZYTDPjZoZt38&b>f5?lRc$1bzVLJR zx6QBu>o9?9`iP!6yR)^|(oMGbO20Q;I6;-PB=Ur)ye0M{n65QJa$IaNj|bd>uy>g}V&E^%@a*{O8u;xmy6GF@Pnh!w!%aB>`Kw@x%%C zaAvf+C7q@@?y^Ba$sSM2XQ)KNEPI~o3`xW&$p-U9XB)F z=#>td0q3lIcx5tp8^VGp4v5JqV~B0<(fMc{Z+*LGOl;Syk~-OgCXM6T?7)_O1^e=Klg~J=I+S|6W$uX{*NlTTurS zQ~Yv3Pl7e!FM>#Vp-JQdU%D8t3C{W1v>QvruoL90!`_|_X_Co5f59KU4x)lo=eeYA zPQJT6>+kF1RaKGo^M70iHu0dmJ-5tGFZ%JTgU|Y!C`h^fO-a7v=^dRlcGrRZ8jJ3C z@eWf8cR!A`1g8CuOz@`CB^E^>=4|n(-;8o->ynbJOEfx$(RIMuKl6r2JXs!pqk%zSoKzKFz5v~ zx-|nH+LaskL=~q==!Z#Jzr+JEv=rTAlCj87b_B|9&oGR|MyaJQ8f0b~r|$bzSk za^g5LPNx6maOH-~cY8UYgg~z`l$MNzX-wrDA3m2nhYpHlA>KHX7QQpIi%}71zga-> z$?w`UeXV&iPYF9gROB6iC_~1Gn-D%AmyF=;ZgXqgB(MB_UCjc>_-InUj(vSWCY{|2 zUeYYCa@5kb!~VBB2l{` zZ+6((NOWlZ}Z6PHI{j0&Ax zT>N(H_mY1uzY0K33mcL-2eXFdj-!mhN*sCWE+#5=?3FeE$z^ zH8f5nFMwETX%bUpYqlffP$dt#0#{DWCS|nIPz#^Bou4G4_=lP(y0>%Z1E)*o>YrZ4;sO; zCjPx$!9)u53+tA2mhuBy)DCZY*e*{GGD!tkOoc$p><0J!bDV&P(T$(TWd;>#7h8NT zg1n*{&Sio?k0(LMz7e~1_AH8{Qdb}a>^b?J-Pd&a#YALHGsO9 z%E!w0!H2|7V!RN2pgOy~omVH%M@TOc{|CUIy2oSFWwxRiu-*`akj@?I{rNDp0${>i zGZX7v@IVBaqWRBp#qR< zcRy-Y1vZP%sUb<&?sIFdYn!IQU=^8(ToO>a2UpQUF%qA;l*t0Jig$k^RsabTQnUpS z|J2-F@qjQcJ6~UCvbg?nd+ze&Ti^P#9?0*IiZ)Fi+^H_*8*b=_cDBXA$LpL@ywQ}F zKax<+`GmavPrO8{)#61__0WTBLtS4STJx^yGAt*nDCQBCF{p%)$-Pu^`C5m)jc z@d~Y8>Fb1W9*n*mx0#sdlL%W-pfxk-&(Cis0oPDAb%j-CNdcjGguQ^0eA_b!%A#hm zy1&r**kj1Q0v}Ybnpr$TkVql|S!{3FC|fLdbS~z*6kZlqXq@>bE(UyWc1&V@LCpQ4 zb0i__KP+oA0+(Wn_GIVQ?lv^9s7*DP*O(q8EZY{^OZFI~rU*PxR4VaSDopj|xM9>Y zx2GzAE(~YiFl^0n!sK>X&goIIg@rY`x(@6{d`w;XXGH@L`+}VFNvZmS(*A2@e4(=< z#D66CZ&Ro#b&2FToI(EY%t7Sz1yS4F@>$X5agxKBla>EN4=y)b@!3T{5aYI`7lEuCmnH`Xz%D=7mXDi75uP}f#GA+yX z0CCMMfnVYkdpC|gV0a`eBez9g4j%6E{nTFRvSj+lSYg#(SI5i7+5IycS(}r3$=3z! zX+nAPiK@<>g;Kr_%t&tDD{

IDah4V*~-vqC!&uq#sRWT$|M!e|5w@z8NFt;&PJ++{so7u?c}zi-JBl=b^sa1|xbDKYDGBhFQ`rr~#(YJf@&)J5@ut9M=5E4ow z#53irXdd5b2Q^$f28i}9I^P_Y2U`K_(A`1+vZoVRg{`$as9u) zSS(D9f^InKWf^7GjR`mQx?Bu* z(X^hg8s8#f7P|edan_+LwX^8LSkZ1*jHR!n=(`B!iwmgZ#w_X@B!^7(izVOF74yhN z!29o3Ine)kb(h;(9;MSscP;SY`e3-9;fBZf#O7h)*$1x8r!o(Vqi^lIjbEI_u7!!e zE9{pH8z_=$dRxg}Tq+S@EmmVFYHFnXRC7vd*1nWRM^hw~7j^nIQR7_l8Q+O9<9cQP zx#p^iRO}BGlpaNAH(`UxQCqv?>QeLnDB&ez3{;@`k|IkU=* z7)O4VN-jnnJP2?WH(r{65mUTM@6^2S2=V9YzO~3P1PU(8E>5H^dJqAhIrn#oOom3V z51^GDY&YcT<{YcE`WjR{|9%}O zJin;avUL8}!kw`l0WST9eHs?%M3e zVtW(cx#iz3NtY~0DdaA6!2v|kuwK!H$Akza4@(glQr7fE$|N?=qtugC7Lvm3WT zF;<}(s$)$-VrhiVsT;EnSTe2aQbF!bH)NxNnj53({2Tj(JaVQR&X?$sIXCh9 ztJC`)#|Prm@0+sP$Yp#-Mvm;gyOrmRkEqQh>|}1`iL?ErH5K3>{kxPgzn!>fff$@1 z`uJiz1UHF=t7G+Hg|HZ9WhM;v^i2s|r=^2N)~+Ys4FL$@{|i~+YSvI_h4o%vZn|jo zl}T;D`lj+-j&vQDTTAno@t5i3;jVWv-Tv(7etwzUDYPnDHj=J0LnT9RM#Sz-_@6^X z%`C^e2%8{~p?3@QTYZJ`z-RLGig4$-pUEh*9$Jf4|CEUJyn3pN^|2s_Z3Nhy^Cv$M z1_dE3Gg00Ow-}s>UtnpY`rD%XlfsURf9bnz(j#l*It1!P37DR@DgtLbolE$|R#Z1C zpfN_|J+?F!6ojugkwG8mbpLzTw_c-a=eHT_c*z7xAGl4QeuQfr%fn~1hYYFAcKQm~ z+`1NMjml4~of@*@Ewvm&aOB-mS$Nt{sZzpY1feZ1ll}bGblpl%i%uuzJ}Sm~YkTs> zYn$xD*&>T?+kBT2M9#N96o6SjXUkIl4zOyCzBE>}*1L3#S$cJ{ttZ3zu$#syv%83? z%j^unX0PIb-Z4UBgIigM0?J+?x?ioVJi7k8qKwH@u_?uwa^{Th6Unu>sl@)MotB%t z+)(u7*@TKIG+H_CeD`$c;zem$z+I5?zbsDILCu5Nh8DsCjW)TbGu~Im$X~xChf{Hh z_9#XA%ocv()6G)>l&^$C+BB0?8h-=FdmPukuICGWbVU+W$thk%1Kd>@f}A6!2sbe& zva^M+5?=V2`ub3aINs2L47rQ!~wgKZ?Eo+B|gt?u?V!5Q%xQ z!(3${;qWWuHD13}S8B1xr;4o&p>v#hAhxP=T)*?ijD%_c^f~6pR@Ssl%m5{lvdtU* zu#UO*6C6Kihjxu?Bme!>qm;7rdPX?hZ0((3^lEa`Z`B>zj4Y)KZxT>STZ!=a!Be%v z!-nn^tBS@`CSBsloh(%6f{ALtzNy70_6#+vO2p8g<9ET?6t~&)>`gY$=fpOOBbRr^ zJLW^CSZn;V3>J)Z&Kr%DDLp9#9s?lw7}@fc>J}l&>MqT#X$uxt>$NJoVIIxUdO7|+-|ibHJO8&ArpM0S?n{-er%tD>t@WCQ zlJkp>v(Bez9t~?}4-b#A;n`eWE|y0<(Ad@=*t}(SivKQYk3KH9+z6WF>iw1v#W^EX zW{mL6mhxPkd4Z?dsUtyJDI?6>E&nTP44@)x@s9zS?%x0$M|e>}s2ly345?D?+ZDBP zz#bmHb%>qk-og<{b2oFM*#{1uLe06OCcswXt(V`=O^$pFb#;>+Ww=%~@;aOmFq??} zNG(46z1RJn9PXsC5Gkg}h4sZLVaz4w$i=EoC9EHEDY?Be@t$ z40`5B*{)`u(a&BMWJ~#!uwwX8UEY)^kdt8iyNe3c#3$r?`Zf@*D8lDrx`>)F@x>qn9zqKl~6c_?Dn7vzV8SXWlNtBtt)|qs@C73MwBh8*E zsbqi?@(FVFMSH1kes?S;KC6D=mF1|v@9=t9Z6(S~lA*p{pL|~Y9*E;cwAr|2UA-n= z{L-z}Ag~Xjt8174D#>L0?!akj{cvg!(KP!VO_^Me9efF*H5MS0)f5Gma!6f(uiqk| z32D$I2`%zh`a0%18}Vj~r*bo@!f2ehF`M8&u`EeFZQ0|u4j-bj!5t7eBtSXB zu&9a_kN}Fml*NZ`l5cHp|8WJ^-Nyg4!iVp0{{Tyt6u)x=97sI8B$iGl&BZAIOVG4M55R3wpkfd<2BJbUV7uap*=+Pm&8 zo%v)pI1=8*dp#W^4a-Y8b=`esZvgwqL0a9u<*o$JV3l57(|;xvNGzm;%GmLw;L%Bs zo*Oy*^v9HDAiuI#>T4UOm=YP-23sWEZAtb?g_hQo!#D^+@5+L;jGbc+QO%VL^PLq3 z&kSZhR0yMIeAnNE|{U?PmWl# z)(TiDsdTS>qtT<3_1K{J_o5)^+069X$(>y}x^Z=CV@j4&az0CT?whHX~C5%?J1NCcpi-IjaaRruoGg)U`{aCm>hfrKRVa@9Ft^sCwLeTiV+d1^UXZuj`(n zVyl!_xQM!C+<(!IGPx7=*^ymRSRiLA5)y{hPVY1xpZ^Zt9d-F-ORSj7*rP0Cs`mra znWNe6W%~}mWf4fy9_*cU`V2^#;tS`mpJSQ@WMAKXSzrhn!s)ygr~;LYXQ-PsY!Y$y9k{9Hgq znJ(-zv3&9mnr&!da0=i{<3iL(%ldAniT%N=Tc%GOsHIo?i+B@5Q4mqe$v)5r_+d&a z#~ZkA&ojZ%op1SlFx~BvGQR(C3jZMQePbA-MT$X@QTXn0V&M%wFBh9hg9sudGetPR z1YKn(F~}rdCrKx*ROdRq(300u>#l^@V_4_CI!=z~BE0=buA;Gm%WV`$zeW$m-Nzr|CF{>iFhm<8NG5a)=n_ zy}OZ-1OKbpJU3+77~t=w%7@fFTS&IJ6|VFOcoJz8W5^h5@`UJ0>v(Jo2%{uI#^zjQ zQ?*&r#Dbx6b%gQ)@xpqqY(CEe?4IW1!6@C}n}LYfE0q}S%!Iy8dLo5KF~dSw&Z7Gc4lmLktKDJtUP#RN z^lkR#dojg*r%G5BM}NilDQ1K8URx5CX+MVFZloT3dhAhU zy}it{Y!~`lP@3NeYK+AXKNOyQeby7};mOc6rVDZsD87BIt(jq}C^+Az>SF;JTo27= z0k2wFk=h^0+7kTvu2Ee5cx6W zyuO%!V5-iFX009Ns5&kz)YzMrc$0IImKro?e8`|~JQ!n1+VzBJ-A(_*yS}mL$@AqL z@|#II02&#sG24)m8Ya}Zi5i08_}(-Yp}j_Vjfg%aF?oKvP|i}r0LNa=0tGmqWno(X zA{~NUHdai1+kMr`e?lnfqhLmI)BJ3?T+FbR9!S-y`s|ZR%`LBF>TlcE#$%53RlY6G z@*a(YS#RXs+vxg!YOwutTeb7=RWKDGZED?e_+Gl?#TT|P(;iXjR_@!XWSUWNVo11g z7qy&UWUYtH*p|H*ZaB#FCWEKqeX4ja=Q1|lZ@AJb`XVo%XL?G^&1qh2X5AenuSE{F zjr8~7#n+*;yw-KNuN^7}JN1Imbnp7u9*!VxJsoi61U-|8bMNELUm-5a;g0Ba5fDyi zw&v(af6VGS{25ex^|*L}UC9oY`|{l*2PniCUI32fgsb>xy*LZFhe^%ZZMf`I7nEXxiyD+ezR0@seF|KUdN= zULmIA;k+y;J6X3mLB-9o5SE&VS)$bm+xAxheZM&A-Y;Czz7O(UwyvmA80}@Qa@#^FzNdxcbk`=dIJ|hstdn`-**bnJP zXhAU>_cGE~MMJ~m-EZO}x!a%xAQ0by`JW7xl&skCgbl0L@=ZAYP*wuC;@*TYKqYoV{tS9qG!lW+#zd8hu8RGHcm z_!mnI9rY5%GOTjN(k}OVnmW*A*KZB0J zFhb=HQ;=pqS81ZwlRW+UolntQ`G_AxDU1^owa9BLbJh!j3_hzccE)eKEPGk1VN^NW#f?;6ih)rxzDgK{c)d)9y2$2 zLPw#vpvoKytZ7v5@2TaZSQHugUnq8yd>u`wTNCM?`JApPCG#<;W$OS-LN8wXl0OO!T8gtftbPS!*P)HdEx$HyUN?y%3--r9TxVu^QBKS92G`-Lyq^kl8 z*;7OyMZ9(3^`wx2Vv4Nu^HZ_QSWfmxLE-fN4;&rQUFZFbHl*`nCICXfzjOg5u<4ln z%{V_Bu6`clB(K;0Hmkn=uX0ZqmF(vA^^Mc-Z7~TcaU9NL@D)6&_3mbPz8+;7f12LH zFVr~WhiAFLBtoL;AC4HzOVl;L7GCvZMXQ+=mcFrr<$WlXy3O|C4&};%Klt)=aUHzT z?q#AnPGvu1V&#x^)t)ex`V**8GbH6c@?+?Jdqdc$r~23>Ao4m8FmxeU*U6ST2v*B~ z-bio1iBs6<7c;4PJ>x3;Xdk1k5#LvmNL~88A>sYqi&YJp5TEDc!n#FvGQYTK6YIbt zGi&tf%#k&)jBsT+v_PM?80dN*ClgSUe3EBY2V8MfLtMA0MD}edUk$8w8fp6GbeQn% zZygMIC4=g|r3a=4OlGS;lPga~#e)AW*AfMKHFD;9vEpSVQZ@Lab(-@}ybSb9zajvv3K=iNW_(oN+SiaT)%R>47f z(8T#)^)}Yn2q16LDwbT)TECc|sL;4lOag(qo+L%fg~;QJFm{KTm@2YYQ_;dZhbXP| zT3FZ&A(QP@+9dtU{a+!_zyD|$0tvS(dsp^ zpv&R?cV^Wt%Am!6XYou2JP(#buP5Em7)PtSa$9qFv zt5A>=f~V4w{j~!_ndGcKT~8`Y-z@WZgv$V@QiVPThJo_$s&c|o#0bRG$5&e=JeA`C z`sF~%2TEfNK)W)u@F?vh=Pu79w<``y@|aLsFO~8u!`s${b-)HuzoX-|Y#gNKdTrb|kRg!C71hL&~AwX5cRqkxm z?8le8tjpD`NvHHohn;Y_J2A1b#*tvb-!96nl}zhxJpBzgDTTsmRLWy(Jbw;6)V+W1 zGWGj2^g^Ut$Ri+wCsHOdnSO8Y^N(g^>m#jSestOwM}OWOh=|NA7XNJti#La@<5hvf z1e!zznI5cH)4upVFox1d>CfLw3I&IdTthTB)cuN3vQ)W~A141FSKD=&3nXWH@K0vg zjh4$AB~d0MTb4*U9Ce5)_GSP&m-VD1KZDZwIdh1Y?adun|aZeeW^QuH%sZd^J)aZ#G zP&Wc)cDf{ikpW4M2zL%FUl_3~Er;LV+2nn8-{NhetrAjCDrZ_97blCwRmuE&Cp++ zF!99*>iSlXy|sV2AI328kx3tiJ{?rB?#=9!aqrPF@{{@Z37hs~Z)fVZxVLb|4s*ky z@7s4zoo#SMgXU1dzrTTA;%~rU1<_f*3H%HNU2+YWzg%%QA1(K25e~-}%7{89DLchO zJJjCb%FRlApS3o^w+H14g@Mokqf^=j%bJnM=g&1fUJ!dlukx&HFAeVl^zgqa%q3{o zc|HDMk%zA1SZ1KK#L9gh4C04!k#-02Ru>1tH1YSeP~)E>Twc9w9>IXw4uhxfOKDwP z_;2<~Z5c{z%TVF}ba=X#BGb!x85{V8MMAYv7 zYJ;rk*V2jRF1PVp)xUvHHaFk9=ROs*KHtph^f6Rb z-c6hk`{1B+W@PEvYLGVJioK0|!!V}DF`}XE__l0rTshIi5V^}Ai4j^jeqKVA-$pzE zO@ghxuWBE{0i`+MMw2vV;AtOPd;wbfLCmVOakC;y8 zfm^ntPOr^8aSdR$xniYoCueA>G-^9YIHMLpo@$Hizsb#BOk0{r#rdd;N!E}Z;K=7} zLAbWX%cUDqO@v>q`x1f^Wp1luOoh=d0`NRS`-5#mz53^G6$NkmyX++DTN6U72u4kZ z4zq_OS&W3~8<8#prdV)?&Mv=}1<6aj9HTcwF$el83-;-Mit-5B>R42fp=gQ@n7Q)C zWG?DgOujqo}A_N^m_X>7m2{4yLD>Je*m-OvJxF zEx5~EDz3u8LvlS5=v~9$oZs-mz55F(ensR<)4J1%`)=Lf(mJt{Fry<+NY_J}{dy5Zu>fTvu%lRKNAAwJFq9UL8xm zToQ;&{qDaVHDt=2On}X|o0WNgewK(?OUuRz8J0m%DC$HYxWX`N&zsB7I zIaRQB98Hz4_?A|>sW+){ln4HUUg_~pN^rW|9B#SaQUy=%jRMS|yED$%X}HhQwfw;s zVG&Cb3OhPIfVio<3Z(ujf@HBkqda9Gp*lb>pfcT<3s?8JZj)PidZ(xqhP4F$0<|5` zWhzqKk4UM}Y!_H<{=C&?wZZanZm@Z*t@&&*6-c+Zu}%jwhvaI;1Pl|`yDqd~w6K_U zHP3^19aaTXt{nEONr-%MSBlffK+F7M&e|LVJbpdCRNNKsgBrv8>#7!Zg+>&B1rH1; z2FCirIct@dM5g^t)X*-GWxkx-ogH{2Y1Mn6c)FwB^rP8@(?5Q{f9Yb*fVHHD_eGi! z(BTM7!zh{(9VDa1-oXx2m6VMFMEN9CeK&u#eaAlXAh0r&vGm;my3+Dq6rbAI#B#Qo zD9c;9K%|I4g>EovMXO(jlhixBW%V)Df)9)#4<8!U4QTKvkI)Ww=T4%;jI?)i3y$taEM zNel_HK1NY!&IY@XVHq6O$RG8{Xi7dh(?~)Y0SYrEh1$6-Xwzvm1Jso~4h4 z`V(}=bZ&a@6}=+PLxWS^2)iArngP2i%q)`ceFey-_W3F_C(&NgPX;9-9V1DR26EJw!9+%r$1dWCw zU~B!3Pc!+x9j-A@9^P3Giz$w=e4|~52h-@((jy&m>^#J^c#pWBS{^;)|F2)>5w&d( z?7g!h%x0wQhW&>pBEqS)KB%q?_QI$vQ)Y^rk1B$nrEW=NV><1`Kj(k-=ejDLuI&Xh zC!m%YS>Nd4LR!e+phqXwOD{K+PyF_1bf32^mcH)e-LKD-5;5HP}(>9c%xOjxsoML8*;ZgL&_$ z(2XV08q67Z-wz9I zau(|tRG2QWH)*eaG&=yS9UaeByxpd7)x*-?zQq0y$Pkjf~YSL%Cq@iw>Ly2+>U6CzM$q6HGO*%XGpT!5% zM)oOE6Bs3>9jq%Mz>Wh-f+!DU55uVn6-U>{|th}jbK-vY#_U{-R)~!naeOT zycOxO)YQ-SU`uR@))(u~r*owD}~ z|C|x{Lf<0OG>+wzhIV)}| z)wS^sT~-4y_hv+>s&$WGM->0FwJKGP7w@sTjG(iNyDxbssx(4@kyQ3kI9SP2z>)Un z&#fQnLrNY+L^<|$y$wF!A6?A|Epvm@9^Y;yLvz-8q0P{#Cn{#dP#Oc$S@??#WnSe;tO}t#++}DWOzLW997B8AXb*L&_Xmw=| zZ({5eE3GDdZk?tV$^moXhyJ~=g^lK|c(xZN#K?xSdYp3ONj0<{idX<%2Qg3a# z-8p`1@4$sv&rU!({JxL%$t}0LJ5nC)=~HLYk|G=U3m9u4+QkuoIKq^0ocP+9<3j?` zA3CFPZ^=i+vAugZc$xK`!rcwp!HlC;0rw%Q)ydY&p;pnpaf2{3 z9!0YxMhpD6f#1D+7o`T~WPatFL$f7|HsnSf4y~cZv9(D*eyxfLPD!vp2--{QyNkzX zsj*%B_ch*KN6MvXn5)F*QsmjTb5#V#MU>j{!R~>!;U%QTA}LH;!yjV=9u$pAJXCcUhb^+f>h+HyJ8=VZaBG&Bfmj29)blLHqsg?C(kEutBDBf zs0!FxJ)*L5*{H5ar7;@Ej;ol3j%$Au4%Nz61ZIMSQafEFUqTAf|Ey$7`UccPBx3pB zU7u3A)@)%;uB42d!RSM!cb@#++nRcoXBi(9{oVM@5Rkcu>Y5impfMV>rhuvFU(HsCKtS~i1%U((z!OoNdS2Rde!)+bz*LQ!+@4ruhO{6E%ozGdIr7eOdZd- zwU=Qq-In?Z;-Um#PPSBXrgRg|(X)rGN-{%4>ZLJ{%rzd2qH49X0G9b0@h zB}JRuxm@@oIXHA9*0uZL`f4zdg5rs`yuw$GD~$a6ssUs@)OU8@bBf<&KNsGm zJapkRzbkr&qL^~;#dhTl0q zaLw$3M@purP?qx#t(UL)-*qX6?Z2B#!AHfBUCJK&OP3t!j=p-jSw6~(IUS@btDrpt=jj^T82i&PQ#cY!zcoT@YYBw7WAu>gP4aI-B z*LCiJQNXOfeFMj;u9Ppw4i7f_l^ql<{P6Fl>*0$C_QK$sAG*`Ph1! zMrkBo^uvgE>37e7<9GJvV!{|$J`S&yla2mVl;TqkV^B|cS3ygzup-CC6;J2MY%fq2 z3)+n&)b%(((W^}(=!j8Kkyp1C_Z7_eU-kjBz<|CH;&U-&ZcB92M3mE_B0F|2g^#3# zHUIr&E+b>M`-YvSve~2>21VVikqFTD=v;Ew5hEU*Ib!AKZo(fri38WEcSQ71*4+){ zQGnYc@9oTm8w92@yZxHHo&*euRaRW2C*M(xbzc=Ws!f}w;HHwfws0BJdJr~cC9Rg9 zjk~=Oz;!PzPfsE}lH;pCnSS5IU^9C}qrAso$Fqf6D{g9m@v3BS(%{~Rue*%>w4m5R zhRv zSenxl6)}k06o)>3+@>BmL8?e7fW8|egbF47b_J-6^1{ObQIW>x@?{b4CCvdOZ?E!? zpRPM-nq+;Px%(aGp-&o({})qV9Tipg{XJqJiqeXpAPqBwfFOvZpfGed$k5#&Ie;Jx zAreD5fYLK`H-l2r($WIbF_J?LzsvJo?^^F)vRtgox_9q$_CEW3g0uE`{?Fp}>C!() z%ztasgH!A{fo$Ay$w)+C$0S%?+5=$1rgYj(1GE=phz9bg?h zj9uAY4t!hj+7}wmxtQ?Byz4X^^jkIwihWTIlz-SbGx z0-0P8uYh@K!N$wQ!uB6Ax2!7d=f-IK4-qz<6b%_EWmr1H=pLDl@`N1rYrHg{3fa03 zZ#ioC&cB@iAne?@{QhbvnyZ&{lRm8g z-RNKR{>B3}MqtdQg;1|W|C*3q4)BnO|{!;kk*$DRHL}k(jy{$K$^B;hhKA>|^gX5U&R$|H-Vc8EdCpR56DrLY#hv4k_lT}9 z-Zr=7DDoM;=rYU34!$gT`Ow+eucq-YH;YGZsS94d8d;F9*EA&c^6ZuuE$o9s7WZ-}=tt z$&mDyg;m&Hm)_#Z-JK@==jE&14=u(tfb&FyOPTe1>srILjt<4Uv)vyR!%siy<6G(#II1k$ctJJIQ3g3)UPHfn;2dE zLGR)5)k`1ZBoeN)6XL1$f{b832ylLm2HoXxtC|Lo0C?Dm5f2*NY6?(DGPVGmAJTC{ zpe73RH0d0jgGT*qPkhc(y~vZB_om2#%=ry{tqTA!RarSrWFMs)QL@f{#~0#j;c2H} zCJ5#J3P!7t*`4@7R^oS#F=s8CDZb`$Xga5>XwAY+{KLf3)jdZ28pEP zpO{24J0$gkhkA?PP;pz9evf;NZ0?t{@V&uXj(gDK(s3kFjq(V0FT}l6k3xUNghKMn zl+aDnS)`?nNNvEj!s-q^*6Syrp)ut8J?7VrxA{?=+Anx;VvDlW`W0sve{~`qsjvOnpgQ|x4x4dmlB>`CGVWgDUseCgvrNtS z<`7f0Y#iI{4yc}{I_H-HzEiW|19ukdxv+N4yGQZO)@IaeTg2`gvecg{0_|UQ z=X8^mOBn`lw#)-cB{%yXW0DATzpi-i8j4pvPJ_9a^DBK11STr{?Bg{lzS2J&ka-;i z?Gq*_aI8=)5W8DAI1r!BVkq0TH2FV6l#<}$YmGazD8J&xiy$#r-*AJ8RR9tVq z&o2ShHZ4ke$G1nm%(z;9U%kYvvF&b=%EqM(yq_3eYQ#NW z2ig7_XqQ-3k)gbs*cU&{%IWoDf!5jHx>3yX_=VsLjUDaFruTIX#oTV7cNJe_BC+#E zqz}Y`m-uz|MQV0ozj-I?NP_^7#}nL2$fIdt73!*?FZ;VO#Cp^3Uz_!5wguQNe&YYC zNR|i#%WxO96P=IfSp)F0W^K)vsdreaWSM}zXmEacIW({FTchsA9T$94q~^P8+mP}c z^B6i7_d)8O$8Se_9-9#v)HXf|i#~$@^I?8Ud)zYiAGHcf397nHosGypXj2V{y_{*R z^??-)uKBg7&qNC(MVC8Z$f6fc-YC3WnfhDZhB3D!#bZb6&USIvq}I1$!KDdq&>|Th zNzI*LPq&l1wU9l`%=OKI;Zs57i6Q452Gh(mvF9#+NBp>8&wbO2{MG8JYdId8iuAwy zNMD=#k~HWjXzi|S18WyJkSt;U5yKa^f`!DVwBK2gOfcZMZymO3m>o%Z2lKCp5sOO# zC70avsaoWT(K$<8?tPM?8xgv&+`AxP-U@L-2nlA?(19fG4bdwyH|U)_54SzyBW$0H zwQ5al1@DCD!)i3BTH_7Qt*>_9X9@F@y;=;Wu<`a!9MpBAZcu@-oDK+39Id+B4FTnw zRB-_-RVR|%$I@<)lIH*mY0B0M;kldg6!rY9gVzua zVD0mI8%Dx`fK6$KDfwDwV%2Js{=AIy^Ld*6{s#kgtI{|%46jg-{A>xpvc8o70OwjG zdSInbHrel_jF=1Ku5OYj0uDO&<8klv_w8h&`EM2X-lm2%3>$kUva(_1uIjde%gG31` z1}{53daW6+VvO@Y^;^W5$Ux*_%TA{Ml%z!;_0k2rlhTW;7GB`VUW9sEl8WC1sfRvE zHf9!FGS-<+$@hx__HO;Hj6dsz4+Y)BO#?5NHV3qC`?-G@{Azo=G=HvG`cM0I+>ocS zppxx--r0BWZD)VM8_JnxeqeSNR2-F0a#L?)XE!Zk73dui)0tzSJh-ggAgHK}J z1C@*;FRV-RE}4YkwN-{T*)zj;w!)L~R0>L?iv zj^OOj5!-DXjW~x3S<(yf5ut?a2BGhKsu`;HZmQBT79B^w$clAaSb4!DDf-vgYAm5! zOJ#sQ^%v#MoaEoW_>Q^of$2|p@38oHS$%Gw@?YurM8t=X`dJ5b2@j?GG$-XWMTZ$h zhFLPl@^{iY@$_`PAA7$jzzhX&F$JM*MI zx?)&!FGrL5JzXzw$osz8{Bss{@iVnO9{D6Pc306{Zecm2d3}|Bh21nvEm7ad$T8QC zQMC8=$;s%4%fb0$h_69-62Hc+XiCz1(fpfb{k=p{S0`i@LO*=BYxRvgan8M&n{jM; zC1A-e6pZg?4%%t3RGv@CE$(JJYe&!dtghRo5DCv+N(UK8mIuQS`E5e#@gssk{0i>qq15B`I- zYU{@h4Mj`#M7p>FOdfrh>oreqnF)`Fmm+c6T5VbBikqgnb*GL5X=z$I<^P zNVZ8gdM0L)tbP9Oq>_rQ*jac+kIr6_tNGfO{0|IE_`sYU_^>AJ;kI+eXGAG9q=!FG z306;-n^pZym^;t|+BN+uQGHX_v(WvT@@v_uf*!BtVPDd|nL}pr@`63+LN_imIRhkH zQr%3==v9Z z-_808CQ?JY;&RHf1pK}hw$i$6BP_oU#@1UV_f>d5n(mmra57xPSWGcmzoffma-r6e zmC0F~@+-VI6m5F;uiij^v7EqEX|u?IE7oQG9cI47QbG*XL3u=$SRwUdc-fkchoyC2 zpC#};@ott(b?c^mn(3m*F-fP!J?TeYpCCUsyhMnPb)ge{BLsS}vQ70Bt!_T*^Mx^R zv4hIAHN93MY#E-6F7L5Fwed7WAINH3pr46m#qcL^$(Nf5ap@#^^u7KV zaRXY;&d24TgUAJ!iKxWgtg{@ir!fDkqYsnY4rX%;Y^SZ?t+F}mO9vk?3Me<$4L8n; z|3DS`*BD3^rBf|fCdk)*tCxMd@?ACTfr?ZBlthk+=0R$!!Ivhl=RJDsVwvY9P)u*^ z;tAu_oY+`~-hhdEekOe3<9XasY41fjP6_Qay}p`R8qMXCu`T9jx3FJ}8pJmKksPdC zv5@-wGoGZdUu%$MHDD8{TYSyOTRi>0p5dG7*2dbK<9~n^i-u2nGx>M?&;7_+cK6|O zg2w?M+OOB?1Rt3YzFaQOUn}c{WVBFf>XoqXj5~6sj?a{zd$m&(fH2IEo)k=oVvT^? zPjv#F;0 z^CzExh6hHYFQsjD!bD{}fY3)EJ-&i#P%?_#h}1KiIWITgb$PGf+(~K{0U4GEfqFKo zD@bv&!?4@o^+Ot*NI!dd``)Ya$+`rQFR{+Jt=}phJA97v96p-nZ*pb(Mb}g51Z7l1 zo7$1v{C6l7wT;lweAY9Q?UM-hf!Pxw(}kRMx-1lVqi^tS?UyFDV9=k0d-@-Y8y%)J zcQ`!HCO1YVKj>5$O$coz`Q{RB=bJE`-P^@q=}*cZI=3T~0{eL!;bT7Z4~>C?8CZ$> zcU?D7y#4?bG}=D-@>x3NCJn z)SpD$scgW}@{eT*6-(z@|7u6i5lz$vqEImVV#hqZ`>QuG{;~mEK`}!<&s0`pN-$CW zLGmXcMUMg`5S!J_9#?l}CVuRq&E=W%!)E;F0JcC}sADp8vX;-=eC0am_oyFZPuFRj2RGr>KzG{*`-$r-zL@=2 zt4hwBVT}%zweU6c#0ByU7!~#mHr%I8s=^%9EGiVa5p8}y&QwXyfVZ)OF1If?VBTMoHRroW%Ac^%g)1orwRS8K&mef~ zO8dm*t`z0Vf#7`JVHh zapySUkfR#O+V{AKpL^QtdfP|mN(|q4Zcd-+ zGUB*RL=w5ACNSt7*#jTvP4t6C-D$@DG0egbNsUZ9)TGxj@7NH9Gt;4 z;RF_P1!C`nT*eF$v>t$t{kG2i7HdGQQ=`?{Xv0qvS|8X-dlv1EJaHe4x{IWg zG3I8fV%2e7e4D8_d(!S*-ShNEO5Z7V5B^LrgKh|9G^0&P>e zsg@1kvyAHw7%P1#1d^h%7-&D1_?Q17WStW=Jk(rzW0`SsS9A8VR)@j>NHckeJU1vp z)1tp!#0q5&5C{BtvLkCWy5W<)62ZGLYio|fBR|z#OGMTTWr8WY(#6KvpiJ|d(#PA3 zi~p&kU~)!S*`h%%?=1Ysd<^p%#ax8wXr3Fl4GcK_%neTe&|c2=iHT~aZD)HNjB0wZ zGvJQ9kwDclA9QOJTp}V!Ji_9?X(zt$nXN=imCh)UN>@eH^K2dZuKwuc`fFlfhOiMD z1C{&%EkxegE?1uR3aN2)WkEZIFA^wCUj#zNK=5rxhh~1uvleE}}&BV)dd+ zgnuSmXGIV8uA#BNwGGL$tX%^fv7qKL25vm^7HFr580*|tSCv=`8ci7c=lH}%uG`P> zH9{sZSCl}MVv#|Of|6nRK72RX8*0VfJ!dA7cW43^d)l03YrZ zX$Dt$%$Vgg!PnN_n#ZON$eT0&wrIJRz?5v;Ls(79waFLOeFJ%?!fFvCLS+cf4IOa% z{#2#e+jbJ-vx58@PndO+QA~8mDp(X@i|d&OU_b-q&4f=vCWoHXnH)$R6;`gI$u?tP z;F|oPuF37_J6Pp>%^f|RdF-^D^P84r2Ec;JC`F?G*{`MRH+E?#{sIO z`L%8YcdMxt`WYmejh?UM@8r~}uk*6A|37~M(tOga(&99UjYulc=J$RM2NHU+aH<7>_|=DMB>v?pjg*Zt#%26sK^Tj z_@df5{-!!o*`PmjwR0Q2!UVX0``oG0EGPgu@>c;EUC z$mNjb>`va*j}0aIio(RLY{=-H(E&m=M`M&pJXH4K%IJ$sUdhHiycJ>TD^|JvgHn*O z>2EjW%gC%vqd9}nZ|rf%7hNBKneXbp!}*H475%0~%S1ZpF1az)P5ZDe5nzB$2Owsy z28sp3!uU~%xm0xoNR!+cPzw?<%x}T_fPUswdEAZz@aTOrXMNszozX+}`PB;i0|zPm z2F}53!@a@*Up!;2Vz%7F_8Mfc1}v`6q!dp~=QeupNjBHg&WQ0tCAqOme+%?h&YxH*W2KKCoDLi_XI1?6=f z@B{kalAeF|%`%doUnL2E*#d+dJG!p_h~n1v$=Tcx6ZTxZ=()+}PugbsCXC6l?hC1e z(OgX~4f|U&LuK5`s|@8&F#mi#N0;JtI$>eqhIDfu1TC-p@ou*w0B8&UL3t1;8?Rho zT%gnWYV=8puolDSyNL)vPfNKqynTGNUt?qNvepYmKFOOw-LKo+%yQP(hHH<~1)w(p ziQvN8L`|a;1@K9p9^;)<09jCz7-$JP3Z(Mt<+0^ zDBgcJ;Jj8L>pRFs2HwK_hM;kPECBRWwnV;DCL3$DpzwtL1h^Fm$7@9#wSGTW{v2`U z$^A`r~yH>W-lXXHRLeua}j$rsf-2X^*Gkdq=H9v)`TU;i&WOnWXnue+qr#2pi?Dm^uHL>gv?CroNmZ4kycL8~i;c@UQZzTE z-o39#)(5|S$IBePIoRlzZNvQ@&Q2>~DuIOhEA4t?(^pc}y+_3@3#Pp!HPFYy8Y;rI z!p8AnQjDXsi~@07 z!uaL5At(sK=rk=Gmye?C;*H`S=wK1QX_E$#qxpC@Tre!)=h+lu3)c{)WiStZAmn!% z$Et13qUd4l*Rl@v05DBisI|z~7FXA6Kyf&ZvCeZuX|hykC1ckj3js|brP@w}f`we1 zp5ZE??V)9@HK};BW>L~_PoD!9HxhQk;YGFK;5w?-5Aj7rac&>ochxpfxgoo!dXrKSU2 zOa8gR<5L<^XL%rAWN9f=D$u3~$*fmbj5JEBay#mX6iD(@I(*hzUq17__+cT^9UZIE zVrgjmr8J})3_wmV=FQ=JJNKg-%E*ur8Q1Ny6r+pfH7Fg~|_FZjItVeAn#3a`vC0AYg- z8}GgdQLX6G_Gf+i<<%VHKu5NCMl97lA9iApE8k&BQ9={=^#?y93K1Wiq&wZIR78dF z3yXrF6bTJlZ}G0OOP0V($(Ki%4Po*dU0e+%;`0YvBZoiZ{O>XIB7{WqVho3Wk_shh zc_3+1%#w}G6zPLpY>Q$>;OsOisOWFGZ3kW(&HE$sdkxNZeYA~)99lafSF56V;|3c6 z8)LivP3<)NxF+n7`h`qX!B7!{;uIp5MjPg68^Fsig(%4A&Qz2d&R@elHE0;v1ap-N)ST z4^O|KwL27kM0USh(>8FLdL?I|{-WBUa;y3uJ)ezWt#HxqgBLXupm&a^Q|h~cgnG() zmmJ8B!Rf zqWnZ=d!=MT|420XLWJM`hI{q;ht8C|&6Fs9g{@QiJIuX`N}|qwWo2yQt0{C7UDy7= zj}Pi_q&o0ErD_EA^P;MvW^KxqihH z!xU<*lqY~*2jjEOAdK4HOKCi%Hraz^F?vZAnwbPr<%#mLx)d5`MSH`J+WC2-8$s*Y zEF~lXFQ1e+lq=59Sx4W5QhZW^`G|jFe0af7`ZDAA{jr1d?_IF@se|>ei{UPNyI9@H zxu~~(chd4sKIMZcodc^54u;F;WIq4W3XbCD_XL1PL5=GH$t~JNiTKoJuGJ;%GoP)A z^jh_hNSj+0=#s)g;Tbfqw1*rIS0WQItGucP?y8t26vK9cFa|4n%267D#iezK@8Y9N za=oG7m%S#>G7qU$#vCJcR5)28+vEAgOJa?I_HW>-{aZQg3uRZBI2NKF*uwrxR8QiJ z%;y(nRF#7{ZU*2lh7DAL*%CC&b`$i@B5k=SXb>H73dGYjPm1yyvTj2jRfdmbJI9>Y z*w2)+e#^n+4|$wJ$w15VryepLT6zJLDt%6+RDzayEKs$T$& zgb|N>?y6Tpff3x3%?`oe_K+0 zj&3Jn)`QDw$tF2;vDza)E*6 zJJFGlG1vT0)QKI5I4|VTbDn20`5F>7_pJKAYw22yK7oMT6y#u$Hg7GmHhY#H@($eu zktP2~|M@0PZ!^DTzj9!@A?Dr5D16m@o*JHv;O(S+SJ(S6n>EEyA9i!7O?Uv*YJ;7- z^lv_pY2usDeB>cvA~@5V$LlzyJ011#vE_j8Gx^fP||Q+&uMT}K~-Qla`W+l z=Np-_PgFl@!dw@qbp#NDjGMK|-`+gwtx12|_?GdN$&;e!ZVX%Tz^KXRrivJPD-(8k z2*K2bF0xYc)c+KSaGNeO3P_9o?)P+sM3u#|dPu#H0)dVjNuGv}_iLZ^F(|~cF_nXa z;(*oi(mg+SXV4iMyF5aC;Ho_FnxjA)?M)YTLx<`g5)L3BN(1ZY3tMf45H!|wn=^Hd zySV_Wvd+svO8fuGINR^sS|@w(SBxnReZ8Sry1~gNu3d`*&@-ek@)BmXH1iRiC?;*I09oN-6rF+~M-C#5iR$&qgq^pu1v8Zwq-93Kr5N!5PK zzxJ-SK=>Qu`+q0SJx<>2zzkQkBxX>Ycd}DdFvHWdRUK&JdCTh}Fo1aN6%eD9wkomu z!wx-dQj&yY2_NH)pxl^Qp~(x>w$@Q#JcP=(n~Sf($$waQH?}abOu*zuFdKyHZwL@c z*QOiDh~$51;wXlIfA1ZCZThIkYk>j27l!C>H^v}0CHn5$1|VYPgjqAo1l=D!sqK-G zB7|cp@Y#Ux1P#HDXA#PZYM04Hh&?no6Z~KFTH173{uYuCFznEcsX2CNa3%6_lmg;< zj1|SQKy6}IQhcYa`?%@M!#E@5C5mCh^PcN%_{4Ai{PLWA>DU^+|8%FrHOg5Fny>cq zOES$)XV%%Z^cy+}zo3-%M=lcufV4a~=H|~U3H5RH#b(hZR* zZfP%E;WPayDT>eJ#pQS-eN^e=S|`6vV@8Uj z__W(p3gMq&_gI3PMR2NRwNxmF2Ets^fz)x&u0!zM+1j~TJuR9z;m1EVqDJ^Gd?5jI zBw49G+XEB4;Q=bQQoot1T<%FyJNunw3w>FO)JS?JR1zyVB3sO?$bK~i;;n4|n)@f- z*vya!M4*J2x2PqlXX`h^B|<#*9`i}T9Lrn$z)J^qU##Dp#Sm6E<#6TDW<2Q6%r_Ja z&GOZIlHUE4=SQX3Zh)%7(iX*XqJ|l|wtbIEXQ1onso6CGf;-iPLe$gcolv0kUtMG-La=-KGq&JrdG1omHxhh)kV{7Ja)x>bQhv=PxMbn{`}f!Q`+@mW zP0idTpl^62d*6Kf$~I_6LjO#DideB?>MO8|dv(McAo=}R#c%!3y5W}hqxoiQ`n{@A zCUao2+9$63=6OeYXZ&IPjUd)9O$n`b%<-S~fZadV&OoMuEAM%d1Nxml=_qjN76#my zwg>>UKGJ^2u4XTpplTK(Q1gu2;>WK0Xk%7w9@Ke^N&c)xq&nJ^2YgNx+8hqN# z&0!R@n|7MnR9Svpo?JW~xcBK)MBrEiRo4~e+=p=>3Z1yI(DCjU^F33PxXMNDZD#hY z2u{%Z{TW@8SU*F69~fEY2Kn?{=SmdV&WI69P#?Y_rVhfBVI@rohV z;dmoCRgo1`0HM<}e=X7gy4L7O0_Em2qZ#d+Pi+F`00fq^`x6?-~ZrE5EvHg7!Kswg9>wur1-*@>vx@1a!La>3}3}`jOZRA zZ}V8idc@c^eB5L-IAi4Zv4k*@u9kq?9~r2nlMxZze_ZX6G5^2M#%OL^{k+@Uc*MRp zrJnK|O3J%7N($dRI5_n&hXV;h=#(_^VIcsCRsuok$t`RJ+qI1(F7Pf~Uz{d!Z7*y} zEAh?&M2uaB58gV@op1!ZeeMy4uic z+S5@RVG868up6L!E)<}~_(J)ki0t^nT#Rn^?d$~y{MXGlouJtk%%9O(w`a8RRP2GS^Wgk+TaB*Je$^)GgBLN)n!w`lLFje2i~Q zJxT8>92T7nOBR5-`Ck4YYe{K2?RM6V*Ij1Nb0OirH=|@MG9~VHk$tf`z@P^#6#MB| zub69ltszQEwt1{PhX-LE5+d#*LM4w3xVz{c0s6!i;;cf(_H_KYcSwrZbM#rq>py(h zNl(8q+tzp0Y>YBr(UQd$zf`2hRkIA+Pf>^}Qb-&PlPh2zNa@UR?!8TRPR>m+@^bre zuxLjOliJ`jem{x)NcnA$TRJqGN>+<9;_OfDoT=yGeB|);cD|S~$C6CVC)TW;!_7m1 z&6g(zo<;sxS&wTOS@v&nwegx1^o_lz*1H-q+6rvu&PT*FTu24mVF6*uVQ07uIN$1i zepg8#xhlr~NZxmAPq@@l+Ky7NYcStbynqFy;U4KgU&KQOMC!sk1Fe^U#E{SJ4QuUqMWK>GAQ>f1~?h zNz~wkk|$ty@D!mPr>j!6ce(4Xy31N+bn?uv&R%ImUBP@TQsi=J?X%Xe_Yvr_J_g0E zq;upacsmpjVh4zTzRPRx6cqsXl~y~A#m{=HWc4=_2b$>Z#p*BOP6u{=43+RJPf%EU z%pfv{$NhMZW53Q|B(!+6PG)3HYWcK^2qd{4eIi0x}-P zeAe4o@~rO^G`%d}6za{{T8_=1yG$*t*rmPhbu=M<)4~=iO?AXyQ%3G=7hs)_%b7@G23T z9|4JI|Gz}+RJ>vg>3HOSbqc6srcp~a8EE%n{=-TV=ehH>aUtJqE!Y-gKyhZY_E9B1 z^6BlYn@kjAr~d1k$;Jq7zRr)t`KVe)nwUWmXL{P7@PoMoj>)p=MP%ocG`1p|s7Rp8 zo^k7K#@fN!msQX^kJgMeq9F4nVqsM5L%E6c>+=mD&=r94n6@xfIRmxP@W2U{CM#Ja zGGM$!u9vGjU!@Sr5I%Yhbg@@B513T|{~#xxO;<1Jm-yJds`XtKzjQ>2U{WXxzun(l zD94$Q!zE7$t2x8PT&@R;GgCEx)tm?am=h}HonsAY>pzn6J07Kd5bgHbMm=IwI+Qf6 zd?5AvH*Qx+I&!ZE$VwxTaq+x$sTpQ_9f66c*CvR4aqBVCE=8pEA2F_jsG(F2EKx@<;n^_4tVeo0qGex7Tb>kx8y_vGTSgK5E=^`j=u>_x@FO zRk~Sc!eJ><(8Ft`*wqw1OPInx1d+QrpF3{R73bO(3E?=DH%H&$cHLQ3Qkgw*=@*`F zJa+R|j+cx8gu{>ieRu-{GhNvin4Wm!{5b~dD&;k7xyys|bXgoF`Dq>ilT3Nbq~wCE z^xM+pvH`%$6Ihw@;zRE8Nm?vNIZGJST+nB;e~&$KZrM;$EABH9XYfMluEI}ys_}SO zYIOf&g&5O-L5cAbmJd5_4{i>P^-*pXmrG_I`bBf1n||hb4-^^K5#m#REDnR^HBKmi z5$0b?NOW0z>|_&VA=VF13W_GnJIyRMSkEgM?or^O?ByzPSc0khC~?*}58pNAV5$J9PU;%#eHkb~w-w_^ z?75&fQD?P=*bV~yLH%Ns5qu8S1Pl;aZzBo@vXD8{Z{<`v!Z_=BPX&=*7)!Yl(fim~9Nmmg_#1kw~>uC{D@u-dZ>A(N*4{4&a3RU?F+ zC7f=K?re@5Tl~OvnZd9*=idwy-Y!F6hmkNcS;^SED|{&q=pit5KTpzC_y)vC)N+X_ zcnrph?A!x$k+Psi2`Y}vo2YEW`ey!JRy5##2zy%enVDjYp(-Q=lOLhMwB^nAp^yAh ze;UU?9OSNgIXo>TRIhEX%B>nV7zv@+ zKYW4xNfiKi+wH%dq3jXO++RE*w%c!7+>|_>)LoUNHzXR35@AbnPzOdcT*_n2KzWb` zi~y+OEko`Zm+EVv0Yaa=1`wt8@C}PEp;~@JMKH{y1vR<2@>=)?UO~ypIIwKZcQETY zj}sdrm*}A}J~m)gWBJWTsP6U%W55EK-09E(w5I5o`H21*_+Hhucjoi)q!kQ$V74%+ z3)ZB}f<3yzCpVI~ZEwEsg`B1AhJ!3@i#QkWl>Mc-JVCBz)~_6j0{ckuUy~Tz_!(sQ zqAM3L)@YgNn1CNtqI(%HSHhATI)#ki#xp|-a@5%KOeD3xN@zLWA}M;%QF`?s&+EMH zYQ??m9>#jd+BbYEB-sgSkEqYH|1U%`tPh4`ty2qE`~qID5RXS>x6-X}-P-!D)4+y|?`-a30kgdhqnyjl6j%7F;E1}@J_oR#bg zUyW}Kt=T5!Drf)H@Zp+3%?n*Jkd(94 zvw!=po6XBT-oMTdVj=U_F8EA519te*w-AX&THka4*AYD_4+KD#1|1(iN)w}W0=AE9 z|7{;JIPCFTh3%Ph*#`<>z-U#pI!7wv=qU+*hnxn=y!Hd?&VGQ{zm*P2nOG~lXVAl+EAMtV#jTLAZg0e;u48@+NauVJDsO1af*gGV=JkS zabI3Za4Y+Y?%J8~Ep>i#R*t}KCeN;8e2bLy&QccGk~oV*_`EP~P;L%Vs4<|LCEv5% zRClsdQTp%6+N?NmEp0gKZ6Z1;7$~HkhfEZ4c)^rcZeBcw-PWi6bu(a%6szd3>Ut-gyZ-A}ju7lq7$th!Zg}k{P6x>dsGUMSv-51<(FMFIKhu>dZ43Uw6 z>v(MgBvY9N0Bn`W#wY4Y9Y0S}GD(*N>!(Oh04u*7ke0hgLJ4?v-2`R6lC%>0p8bRr z=m>8hIGH1II<#is*^k;WY`$<8>xeIg(-7La5)-Udcw4rZI+y`^y3eVKlzt*y|l=|>u{69fckA`k7RVKct`-a7rtai#}WO7|aj32XLi7j2-PmG*V zedi~x7$GJSA)AJeswn!ybg>q%Rr}<1RHO~{mErHxh`ivm6e`Rp3lDO1-u~8!#-=w9 zuk_7hY)$_^1J65wI9qD{n}AGCH?uSfk3P8b%Y#RUKbNQ*k~MT?XBzU9S=yE> z>%rpZH;2@kV0bA*`(d-9jGAkjvWtg6bp-fpyvHqEzFu)Mf2C#+&4n#DZJaX;QMvDa|Yyr}6h(i3vYEsTuN?WV%L$+0w zMU)hm=?ag-?P$!w8l)fsYzMt3p=9aV3|dtzRmQTF&5EMv zGg}Xta{U`fc|j#3vGg2SnqLe!%g0v_`BG=8Ts%FzHi$X)RojSsY5>5$fnZQP<&qoy z;0p-W%*X82ipi;<9bRW+@vMb(ijSA!Qr1neuD3ZTZ#$(a7`W2$lyZ8?kr$I8$tiD0 z-+tcKo?-*1W6N9o{V|7wv48K0gO@bhFiCv2zJi9kHoKcB?F2ndpN- zb7)4im{|)qR4Fqb`7-ikDIF->e?I~pjvN`-!z4(2(UOpVPM-*@qAaEYrO5Rl1y*U6 zn>5|%DH~6P4yNuQT04d7-CyzQpEhJOLy?ljoW((%_{OYw=@1)d&R{um*{#ngW;%sz z#Z9^kyl63dzN|s@U)5U*pE^IcTNh}D$u!@l)L<<-w+#=FYs@VP8c5PjY++8LfC7t# zf)2dRLZ=L5dfY|+OJkL5+l^$l;UqSNGV7Sm1-3kSy_+K{8dY(7VDvQ_%Ez3vgjPS-|{J&Gw6bKVs9U4;JHH{h2WyYF^qjTCrV1*W_m zS(1h4msnJ3sutVkEEd@Ym%cP^*8pN&bQNoZM6cA_RO!LV+*7B2hU7}8>)Y+LBr#?1 z+Vqx$boK&N)xJgLB!b0Q4vn$+Rsj}zX(l<$>4QA5r(ap*lO{g|_73Ui6p2X~$%=!) zTKHjdi_s;IE!mITEPOs}bESJGqpvlp6Htp)y^(>gzeuS1t#Zd%$#OCA;lnz19#huk zkN#>?!ZUC14B6N?Ena1>>=T==Zs%qS?-keWc1z-jy303Wly6PJ*onFB|Na0fC&vzt zvlc5}d-#yGi0(DzI@lK%AACe_=drO`PO*n!C>8211Z&CsrnPaR1bk6L3Mi1>d{01G z38SBkubARWN>O$-H2LLOX~r^tro&`P~GbLRb=Y zZz6On?Ms0)Tj-62>)q%hS?!FbUHkcCUoi$wI~pyi0M*}uw*YH)n-_$iKJ(5O=bQ6^ zS95rvjG}C-0e34>S! zrOW!!f#g45g8ee~5pw?YWCCm4yWb|0E4J{h+cE&LDG0?G?ltWaBWG(%xb{c7O6DV%VB*-I*AU3`!PoYn~&!MW6eEQ4h zJ8otju7Tc$yBEr^OD(c`YVUZZk3HD7O>sEz?z^7WZG<=Q0kLa9;3Nj{&*V)Iu}ca? zDu9l(2pv;t{J9@%>FuoQZ`!vJ4X@X^m`RJ9?wIk!V^UQL>W|44=iB37aF}7S75J>U zy~rEB9$KuEvyHK@df$H0HmJF)@Z5kn%^p6+b*%iO8A@#F5!uRYoBOe{>dRm4RjKvD zPNswKdk&@2s|rABq%eufvC54Giek)PqN_P#@AP{R@c<&PxmqesrBBWThav-GvS~EE zi*ExgV@6DZGo{{xgUNeuvw7L$n(Xcac>RaFA(<&A`R+OIfUoiGs|C@JQ$a>qH74iI z>42L)Tgl>tQgggrn9hN{N*a&n#ChFhlf^qv zeb`=8>#DR!ULX?=XOdK;<%}ro)Tr?Hga1-X>s1+;VhYf2Cox{d{;gU_Emk=LVqf^^ zHxF$;D$9`VM3pR;(+x(Gz@;7g7X;5=v`zkM?1kr@&d*tk4d~-_WjUl5!GSLU*V7#; zx_HeSW`Ix;GD6UJ3`5*)5YI+k&pri&-t$&96q2fTidlk%nR1!^p3{waU~u50lHvRK zwStKd1NML>Ttlz=^&(hw%HUnKBGvN^F*$NKgWSJ-PTeybxj_nv=heN0j5Y4Xt)Vwm z=i@=xRWPm`^V|&>WB}Ku*{*)#J@?lIjp6Z4YpQz~(Wk*k?n6r5#k1eGxQ9q`VOrp0 zhDvTTqq72I4fS=@uX~0R#OD+d*|^R3g{CSI=N8GHx6^Q-wQnVGixuUsJ>6oR#Q|S# z?9R^{o~Y-V3T}@_1s&O_KA+d9Q0O~C%FatV_s=$6wKIk_Z%`-|CZ4lh1USj*%iS9# zkx_XZP$-fpYb&nBF{GY5sC)TukG|i_-^IxU4$=(~UG#QkJdUOxPt z!}inO9ecuRE6pCC!<{Ygz4KQMTc|YPMY9UNP67^5%TFEAGSfXMMC!zA7c-*)WX2FD zr?DJpC`bEuHxjymNu{{l4j3dmxc5rpH1-5{AnMFW$26;8GP#?ulwNhmjIv^@y0J7M z_40gW@&%T6E;;8}_MU1~5{;g;XC`pbqm$Z1AuISRIvz9U27Lp0uJ0TkkM%OG`e5^l zwea?*{mMUwk0P?GO2#^9x=vEzzPtVKzy3;%$cLVEG4dmEO%=BrOV|HOK-F(Ujo8w; z_Z0_RUoHeVhz8Si(uUh^C;}7PQQN$Dtkq{tl{9f zlhD(x0=_)R%B9o;kGo37$gMio!&4joueNJ(XS#jk(&SLy7~_}n#tgMdC7o0>ZL-Ni zJ4l3*P}0cRrk38e^`@Co$&y3N7}6Fol(!R7&8c)C%W;t$qH^f>Ouy^>4}QD8*REaP z@BMwA`?>G?xu5&~+}EC`DKVc5Wt6oJE8UN?mn*sbljK0>`Utze2YB%koW@}&c?8XC zd9ckgMYN_w;q!+}>i2#|FRA?1Kn>mYhBzGL!^Ua`Q}IJCt~yah2geJzIUyZ-R|1C* zweO<|>z|CKN=zDgX|6FhswJ$Gq z=BkAMpeklsP6?>kLD3MrC+t|9b2*5ZAx!fi9=1JNC3G)Dl7J^_#(U&cT(=l*#$TJy zy=+k|7CsFhzHI*mB13jBf~tHw*Go#^B!w;$S(Gph0JJjsCi7%CQf^*}oNgr~1woBW z3%#~u&2u{gNSkg8i7a?2IG2)HiNjCbYYeD4$c#`M%+tu2?TCkVYRQE@wdx2B`GsE6 z$gAV!R8D1xk6k`1{+0SfQDV%GCi+Y5yH*w>2~4kGOoQ)^JVAhMYHknr*Z~m|Ck>KFx=2 ze8`b%snhZ^7DFi`dzyd)Sc$&PLi_5FIXEx*YAXa+ z6a{w9s{gK8vcVUDkHqfjD6kAUvcIoaIz7lj`9IZ;SAB(?6Ua~XzSB6^PT(S++dpER z%z{QQFJAL*?@ZNH_Ma$-s_n+o3%8a=MUUG5#J1m&>w5}YgefzZ$U2nC4{il|eHb$b z)a;I~=L0?nyBkZvkhC?6mq4$+4|gng2@N-P3b=5;+$zLLuZXD*g(9T!y;he)_} z0vqI)(Ree8>B;ohXPHj#>)8RMZHxe(A$_JN&Z113N7}@`5coL)8(j1~I679gY+Ng- zHljT6efJqVM2Q{P$kr7vy3G=bk7^#hhLfDMS$9Jtl=gKnM{!MP*)Os#lrq7Qse8MH zf%^_T=Cg%cFY9}{n+)A{%4{GJTIz)dEPLFs`kd$VEe#|M66lRaxv@49+kN$Ct9AJK z-ZXwxtnWK*D21;E09o_G$#g>zSHZT)w)VdNpP1c$n3cV_xob>)cA(!*>xvrNRMptVVeMO|OLmPPZARmtD8~z_-p-To{=U*Ci6ArJd6a zmkB+BWBrEW>9=0~xK?xL@dd%B(SkdJ z(Z6mC)Q-n-Yj(Y5ZBSjBc>Hr|Y3N8d=R@D?Yg?C{fjWxKs8_du69nb+HbLEklKm&! zl5PYLBYMn5UdQS_?31(eNHa14Lg$yaq*@9OZa3mo!ZQIF!NYl-Vx^kc52b7-P9GVQ z!}^}ubv~t+Hhv0EBz~QZE};eA$-W)moYei9*fV-^E>gww&^xaKD^dwo8gM&wJ?ipg zy2A6t-0eoXELP?pF+7w%amT;*Q*(zj)2+Y;cWfBWek@!TdWV*f+o-lSw7nCt&L_i^ zmd2Cjv^?lwJc9M8X)R1ERNkBk*VI1hu7e@y?vK^;H&GAv_6hV9kZh^zfplTIX=_^Y z`Moe)QAg4vShf}z)J9^{uY2|}XJnsi%Np|6Kri)@=3+pkei-M^k;0k%aIlZ2=*`Nq z^qX;_-7Tq`>mD0rwg`HV^%?Ki@7`s`cm~H(^7$LnZ%TeO#(@$?o$6a z(xR!w0lXy-z>)O;KVv3$+qa$YT`^!UH4e7)?|+R7b$Fb7?~+)K_%OisAY*br&X{*) zo&Go`OI$TDu)i|S zuk;_9#Zh)QEv{F=g8{@NKR!0}cKUrRQq~^86(-lGAVf^E$q>emHwi0f z$8PgS%h>{TC4i;Nk6QoBOi9LI-l%L;E5vYuV1sw@2sIokrwi0-bk~ru%8m`%Hr!-7 zt|~;~59x}WpyPO+K_;pa`t%j%(r)g#B72)Xl%-f<`ikV8!OMy=|vNO=aAG7ovn z?iGISMp=bfV)f90&6eU&vx~dj%Dqw z9^3n3CO2U#5dtb{Q|BK6Rk*35RMPYU=PSE%EgfNrtvKBU@naI&xK(f}R;=nf_fJz8 zNW%vW`4w5a;^oU#{OE?hZ`bx%yS*EDV|J{sX}X{eioAdw7sdhuRU$kJ6+T3XsDe5$ zm~S~gUs0pg@zb*PT{Sb$ChS!{8n)#oq<`WV2$hp!0^I_#R6)`P6|iLc8E+VTbWu5I z?!(b*`})rFU3lSWMHtqWEto5)e~u$gUv&2%`*T=UP)f71@w8hI^53r-mjC9O#^hGz zh+!$Vk}b|xb)7)W5|1m6Yt-VBnzaEj%oG4#N{exMU>P6}Naj-bdp)VxU=-Y%(X0O| z0d6wHgURB6cl26;G$o!T+bh9oP-58WuXJD7By-jzQIzEB%gR(r!r4viAT*40_HI|N zdX{d5DAUibUAZuBoy+LWk)YlBD}nk%bQ`=ZD7mFe@M1p>kdCW;11J{k$3nVaW6z^U z_WyndV@rGl!=WMr2Fgdy{PB*>V4HByg6UEMbOaxvtL|2^4v{%ClqTuMj|#phV=>rZ z()}IvgHW+_lHRr)?}t6hRpO`MYWR2+$bu9@RCCUKdGyd7v>mwZ?h6*0-_Nz5x_GCE z*@QQf1THuWuJr)*F7m@I=aL)TTqeTi=b4;aAv@dFWN^?CE&YtHD*^vbWCr11)()_p|Xj3==yT(iE|vD-*>F+2Qwn4i#*DRWD1{z*s%{ zaD?+hCrs7NRLkGpF}IjM4@p88YJ!#m|5^aA7u1w5z^~;}z)t`$EfianhBOEY;lOf# fE8ze9eMaY3BK3nF!?nC_wF>y)umntrqaWkH6kS@6 literal 0 HcmV?d00001 From 82190730097f9e676e827f204238ab57e92360e6 Mon Sep 17 00:00:00 2001 From: Daniel Dale Date: Fri, 20 Feb 2026 16:24:37 -0800 Subject: [PATCH 17/18] streamlined serial backend testing with `models_cpu` fixture and `clean_cuda` context manager to allow simple orchestration of sequential backend parity tests. Useful for VRAM constrained environments or to enable parity testing of larger models that may not fit on GPU simultaneously. --- tests/test_tutorial_notebook_backends.py | 345 ++++++++++++----------- 1 file changed, 181 insertions(+), 164 deletions(-) diff --git a/tests/test_tutorial_notebook_backends.py b/tests/test_tutorial_notebook_backends.py index 3786ef3d..1a3d3ce4 100644 --- a/tests/test_tutorial_notebook_backends.py +++ b/tests/test_tutorial_notebook_backends.py @@ -14,8 +14,8 @@ from circuit_tracer.utils.demo_utils import get_unembed_vecs from tests.conftest import has_32gb -# Mark all tests in this module as requiring 32GB+ VRAM -pytestmark = [pytest.mark.skipif(not has_32gb, reason="Requires >=32GB VRAM")] +# decorator used to gate individual tests on available VRAM +skip32gb = pytest.mark.skipif(not has_32gb, reason="Requires >=32GB VRAM") def _move_replacement_model(model, device): @@ -48,30 +48,46 @@ def _move_replacement_model(model, device): model.cfg.device = device -def _gpu_cleanup(): - """Run garbage collection and free CUDA memory.""" - gc.collect() - torch.cuda.empty_cache() - - @contextmanager -def _swap_backend(model_off, model_on): - """Context manager: move *model_off* to CPU, move *model_on* to CUDA. - - On exit (whether or not an exception occurred) restores *model_on* → CPU - and *model_off* → CUDA so the fixture is left in its original state. +def clean_cuda(model, min_bytes: int = 1 << 20): + """Move *model* to CUDA; on exit automatically free large transient CUDA tensors. + + Snapshots data_ptrs of all large CUDA tensors after the model moves to CUDA + (capturing model weights as 'known'). On exit, any new large CUDA tensor not + in the snapshot has its storage replaced via ``set_(torch.empty(0))``, freeing + VRAM even while Python references remain alive. Then ``gc.collect()`` + + ``empty_cache()`` flush remaining allocations before the model moves back to CPU. + Callers do not need explicit ``del`` statements for large GPU-resident objects. """ - _move_replacement_model(model_off, "cpu") - gc.collect() - torch.cuda.empty_cache() - _move_replacement_model(model_on, "cuda") + _move_replacement_model(model, "cuda") + + def _is_large_dense_cuda(t: object) -> bool: + return ( + isinstance(t, torch.Tensor) + and t.is_cuda + and t.layout == torch.strided + and t.nbytes >= min_bytes + ) + + known_ptrs: set[int] = {obj.data_ptr() for obj in gc.get_objects() if _is_large_dense_cuda(obj)} try: yield finally: - _move_replacement_model(model_on, "cpu") + freed_ptrs: set[int] = set() + for obj in gc.get_objects(): + if ( + _is_large_dense_cuda(obj) + and obj.data_ptr() not in known_ptrs + and obj.data_ptr() not in freed_ptrs + ): + freed_ptrs.add(obj.data_ptr()) + try: + obj.set_(torch.empty(0)) + except Exception: + pass gc.collect() torch.cuda.empty_cache() - _move_replacement_model(model_off, "cuda") + _move_replacement_model(model, "cpu") @pytest.fixture(autouse=True) @@ -92,14 +108,19 @@ def models(): @pytest.fixture(scope="module") -def models_sequential(): - """Load models for memory-constrained tests: NNSight on CUDA, TL on CPU. +def models_cpu(): + """Load both models on CPU for memory-constrained sequential backend tests. - Tests using this fixture should call ``_move_replacement_model`` to swap - which model lives on CUDA before each backend phase. + Tests using this fixture should wrap each backend run in ``clean_cuda`` + to move the active model to CUDA and restore it to CPU when done, + automatically freeing transient GPU-resident objects between backend phases. """ model_nnsight = ReplacementModel.from_pretrained( - "google/gemma-2-2b", "gemma", backend="nnsight", dtype=torch.float32 + "google/gemma-2-2b", + "gemma", + backend="nnsight", + dtype=torch.float32, + device=torch.device("cpu"), ) model_tl = ReplacementModel.from_pretrained( "google/gemma-2-2b", "gemma", dtype=torch.float32, device=torch.device("cpu") @@ -202,6 +223,7 @@ def small_big_prompts(): } +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_dallas_austin_activations(models, dallas_austin_prompt): """Test get_activations consistency for Dallas-Austin prompt.""" @@ -221,6 +243,7 @@ def test_dallas_austin_activations(models, dallas_austin_prompt): ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_dallas_austin_attribution(models, dallas_austin_prompt): """Test attribution consistency for Dallas-Austin prompt.""" @@ -247,6 +270,7 @@ def test_dallas_austin_attribution(models, dallas_austin_prompt): ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_dallas_intervention_say_capital_ablation( models, dallas_austin_prompt, dallas_supernode_features @@ -284,6 +308,7 @@ def test_dallas_intervention_say_capital_ablation( ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_dallas_intervention_capital_ablation( models, dallas_austin_prompt, dallas_supernode_features @@ -320,6 +345,7 @@ def test_dallas_intervention_capital_ablation( ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_dallas_intervention_texas_ablation( models, dallas_austin_prompt, dallas_supernode_features @@ -356,6 +382,7 @@ def test_dallas_intervention_texas_ablation( ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_dallas_intervention_state_ablation( models, dallas_austin_prompt, dallas_supernode_features @@ -392,6 +419,7 @@ def test_dallas_intervention_state_ablation( ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_dallas_intervention_replace_texas_with_california( models, dallas_austin_prompt, dallas_supernode_features, oakland_supernode_features @@ -441,6 +469,7 @@ def test_dallas_intervention_replace_texas_with_california( ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_dallas_intervention_replace_texas_with_china( models, dallas_austin_prompt, dallas_supernode_features, shanghai_supernode_features @@ -489,6 +518,7 @@ def test_dallas_intervention_replace_texas_with_china( ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_dallas_intervention_replace_texas_with_bc( models, dallas_austin_prompt, dallas_supernode_features, vancouver_supernode_features @@ -536,6 +566,7 @@ def test_dallas_intervention_replace_texas_with_bc( ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_oakland_sacramento_activations(models, oakland_sacramento_prompt): """Test get_activations consistency for Oakland-Sacramento prompt.""" @@ -555,6 +586,7 @@ def test_oakland_sacramento_activations(models, oakland_sacramento_prompt): ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_oakland_sacramento_attribution(models, oakland_sacramento_prompt): """Test attribution consistency for Oakland-Sacramento prompt.""" @@ -581,6 +613,7 @@ def test_oakland_sacramento_attribution(models, oakland_sacramento_prompt): ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_multilingual_english_activations(models, small_big_prompts): """Test get_activations consistency for English opposite prompt.""" @@ -603,6 +636,7 @@ def test_multilingual_english_activations(models, small_big_prompts): ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_multilingual_french_activations(models, small_big_prompts): """Test get_activations consistency for French opposite prompt.""" @@ -623,6 +657,7 @@ def test_multilingual_french_activations(models, small_big_prompts): ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_multilingual_chinese_activations(models, small_big_prompts): """Test get_activations consistency for Chinese opposite prompt.""" @@ -643,6 +678,7 @@ def test_multilingual_chinese_activations(models, small_big_prompts): ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_multilingual_french_attribution(models, small_big_prompts): """Test attribution consistency for French opposite prompt.""" @@ -670,6 +706,7 @@ def test_multilingual_french_attribution(models, small_big_prompts): ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_multilingual_french_ablation(models, small_big_prompts, multilingual_supernode_features): """Test ablating French language features (-2x).""" @@ -705,6 +742,7 @@ def test_multilingual_french_ablation(models, small_big_prompts, multilingual_su ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_multilingual_french_to_chinese(models, small_big_prompts, multilingual_supernode_features): """Test replacing French with Chinese (French -2x, Chinese +2x).""" @@ -752,6 +790,7 @@ def test_multilingual_french_to_chinese(models, small_big_prompts, multilingual_ ) +@skip32gb @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_multilingual_replace_small_with_big( models, small_big_prompts, multilingual_supernode_features @@ -800,6 +839,7 @@ def test_multilingual_replace_small_with_big( @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@skip32gb def test_setup_attribution_consistency(models, dallas_austin_prompt): """Test that attribution contexts are consistent between backends.""" model_nnsight, model_tl = models @@ -887,28 +927,27 @@ def _build_demo_semantic_target(model, prompt, group_a_tokens, group_b_tokens, l @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") -def test_attribution_targets_string(models_sequential, dallas_austin_prompt): +def test_attribution_targets_string(models_cpu, dallas_austin_prompt): """Test attribution with Sequence[str] targets consistency between TL and NNSight.""" - model_nnsight, model_tl = models_sequential + model_nnsight, model_tl = models_cpu str_targets = ["▁Austin", "▁Dallas"] - # --- NNSight backend (already on CUDA from fixture) --- - graph_nnsight = attribute_nnsight( - dallas_austin_prompt, - model_nnsight, - attribution_targets=str_targets, - verbose=False, - batch_size=256, - ) - nn_active = graph_nnsight.active_features.cpu() - nn_selected = graph_nnsight.selected_features.cpu() - nn_tokens = [t.token_str for t in graph_nnsight.logit_targets] - nn_adj = graph_nnsight.adjacency_matrix.cpu() - del graph_nnsight - _gpu_cleanup() + # --- NNSight backend --- + with clean_cuda(model_nnsight): + graph_nnsight = attribute_nnsight( + dallas_austin_prompt, + model_nnsight, + attribution_targets=str_targets, + verbose=False, + batch_size=256, + ) + nn_active = graph_nnsight.active_features.cpu() + nn_selected = graph_nnsight.selected_features.cpu() + nn_tokens = [t.token_str for t in graph_nnsight.logit_targets] + nn_adj = graph_nnsight.adjacency_matrix.cpu() # --- TL backend --- - with _swap_backend(model_nnsight, model_tl): + with clean_cuda(model_tl): graph_tl = attribute_transformerlens( dallas_austin_prompt, model_tl, @@ -920,8 +959,6 @@ def test_attribution_targets_string(models_sequential, dallas_austin_prompt): tl_selected = graph_tl.selected_features.cpu() tl_tokens = [t.token_str for t in graph_tl.logit_targets] tl_adj = graph_tl.adjacency_matrix.cpu() - del graph_tl - _gpu_cleanup() # --- Compare CPU tensors --- assert (nn_active == tl_active).all(), ( @@ -937,30 +974,29 @@ def test_attribution_targets_string(models_sequential, dallas_austin_prompt): @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") -def test_attribution_targets_logit_diff(models_sequential, dallas_austin_prompt): +def test_attribution_targets_logit_diff(models_cpu, dallas_austin_prompt): """Test attribution with CustomTarget consistency between TL and NNSight.""" - model_nnsight, model_tl = models_sequential - - # --- NNSight backend (already on CUDA from fixture) --- - custom_nnsight, _, _ = _build_demo_custom_target( - model_nnsight, dallas_austin_prompt, "▁Austin", "▁Dallas", backend="nnsight" - ) - graph_nnsight = attribute_nnsight( - dallas_austin_prompt, - model_nnsight, - attribution_targets=[custom_nnsight], - verbose=False, - batch_size=256, - ) - nn_active = graph_nnsight.active_features.cpu() - nn_selected = graph_nnsight.selected_features.cpu() - nn_tokens = [t.token_str for t in graph_nnsight.logit_targets] - nn_adj = graph_nnsight.adjacency_matrix.cpu() - del graph_nnsight, custom_nnsight - _gpu_cleanup() + model_nnsight, model_tl = models_cpu + + # --- NNSight backend --- + with clean_cuda(model_nnsight): + custom_nnsight, _, _ = _build_demo_custom_target( + model_nnsight, dallas_austin_prompt, "▁Austin", "▁Dallas", backend="nnsight" + ) + graph_nnsight = attribute_nnsight( + dallas_austin_prompt, + model_nnsight, + attribution_targets=[custom_nnsight], + verbose=False, + batch_size=256, + ) + nn_active = graph_nnsight.active_features.cpu() + nn_selected = graph_nnsight.selected_features.cpu() + nn_tokens = [t.token_str for t in graph_nnsight.logit_targets] + nn_adj = graph_nnsight.adjacency_matrix.cpu() # --- TL backend --- - with _swap_backend(model_nnsight, model_tl): + with clean_cuda(model_tl): custom_tl, _, _ = _build_demo_custom_target( model_tl, dallas_austin_prompt, "▁Austin", "▁Dallas", backend="transformerlens" ) @@ -975,8 +1011,6 @@ def test_attribution_targets_logit_diff(models_sequential, dallas_austin_prompt) tl_selected = graph_tl.selected_features.cpu() tl_tokens = [t.token_str for t in graph_tl.logit_targets] tl_adj = graph_tl.adjacency_matrix.cpu() - del graph_tl, custom_tl - _gpu_cleanup() # --- Compare CPU tensors --- assert (nn_active == tl_active).all(), ( @@ -992,9 +1026,9 @@ def test_attribution_targets_logit_diff(models_sequential, dallas_austin_prompt) @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") -def test_attribution_targets_logit_diff_intervention(models_sequential, dallas_austin_prompt): +def test_attribution_targets_logit_diff_intervention(models_cpu, dallas_austin_prompt): """Test custom-target feature amplification consistency between TL and NNSight.""" - model_nnsight, model_tl = models_sequential + model_nnsight, model_tl = models_cpu n_top = 10 def _get_top_features(graph, n): @@ -1008,42 +1042,39 @@ def _get_top_features(graph, n): _, top_idx = torch.topk(node_influence[:n_features], min(n, n_features)) return [tuple(graph.active_features[graph.selected_features[i]].tolist()) for i in top_idx] - # --- NNSight backend (already on CUDA from fixture) --- - custom_nnsight, idx_x_nn, idx_y_nn = _build_demo_custom_target( - model_nnsight, dallas_austin_prompt, "▁Austin", "▁Dallas", backend="nnsight" - ) - graph_nnsight = attribute_nnsight( - dallas_austin_prompt, - model_nnsight, - attribution_targets=[custom_nnsight], - verbose=False, - batch_size=256, - ) - top_feats_nn = _get_top_features(graph_nnsight, n_top) - del graph_nnsight, custom_nnsight - _gpu_cleanup() + # --- NNSight backend --- + with clean_cuda(model_nnsight): + custom_nnsight, idx_x_nn, idx_y_nn = _build_demo_custom_target( + model_nnsight, dallas_austin_prompt, "▁Austin", "▁Dallas", backend="nnsight" + ) + graph_nnsight = attribute_nnsight( + dallas_austin_prompt, + model_nnsight, + attribution_targets=[custom_nnsight], + verbose=False, + batch_size=256, + ) + top_feats_nn = _get_top_features(graph_nnsight, n_top) - input_ids_nn = model_nnsight.ensure_tokenized(dallas_austin_prompt) - orig_logits_nn, acts_nn = model_nnsight.get_activations(input_ids_nn, sparse=True) + input_ids_nn = model_nnsight.ensure_tokenized(dallas_austin_prompt) + orig_logits_nn, acts_nn = model_nnsight.get_activations(input_ids_nn, sparse=True) - interv_nn = [(ly, p, f, 10.0 * acts_nn[ly, p, f]) for (ly, p, f) in top_feats_nn] - new_logits_nn, _ = model_nnsight.feature_intervention(input_ids_nn, interv_nn) + interv_nn = [(ly, p, f, 10.0 * acts_nn[ly, p, f]) for (ly, p, f) in top_feats_nn] + new_logits_nn, _ = model_nnsight.feature_intervention(input_ids_nn, interv_nn) - orig_gap_nn = ( - (orig_logits_nn.squeeze(0)[-1, idx_x_nn] - orig_logits_nn.squeeze(0)[-1, idx_y_nn]) - .cpu() - .item() - ) - new_gap_nn = ( - (new_logits_nn.squeeze(0)[-1, idx_x_nn] - new_logits_nn.squeeze(0)[-1, idx_y_nn]) - .cpu() - .item() - ) - del orig_logits_nn, acts_nn, new_logits_nn - _gpu_cleanup() + orig_gap_nn = ( + (orig_logits_nn.squeeze(0)[-1, idx_x_nn] - orig_logits_nn.squeeze(0)[-1, idx_y_nn]) + .cpu() + .item() + ) + new_gap_nn = ( + (new_logits_nn.squeeze(0)[-1, idx_x_nn] - new_logits_nn.squeeze(0)[-1, idx_y_nn]) + .cpu() + .item() + ) # --- TL backend --- - with _swap_backend(model_nnsight, model_tl): + with clean_cuda(model_tl): custom_tl, idx_x_tl, idx_y_tl = _build_demo_custom_target( model_tl, dallas_austin_prompt, "▁Austin", "▁Dallas", backend="transformerlens" ) @@ -1055,8 +1086,6 @@ def _get_top_features(graph, n): batch_size=128, ) top_feats_tl = _get_top_features(graph_tl, n_top) - del graph_tl, custom_tl - _gpu_cleanup() input_ids_tl = model_tl.ensure_tokenized(dallas_austin_prompt) orig_logits_tl, acts_tl = model_tl.get_activations(input_ids_tl, sparse=True) @@ -1074,8 +1103,6 @@ def _get_top_features(graph, n): .cpu() .item() ) - del orig_logits_tl, acts_tl, new_logits_tl - _gpu_cleanup() # --- Compare on CPU --- assert new_gap_nn > orig_gap_nn, ( @@ -1091,33 +1118,32 @@ def _get_top_features(graph, n): @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") -def test_attribution_targets_semantic(models_sequential, dallas_austin_prompt): +def test_attribution_targets_semantic(models_cpu, dallas_austin_prompt): """Test attribution with semantic concept CustomTarget consistency between TL and NNSight.""" - model_nnsight, model_tl = models_sequential + model_nnsight, model_tl = models_cpu capitals = ["▁Austin", "▁Sacramento", "▁Olympia", "▁Atlanta"] states = ["▁Texas", "▁California", "▁Washington", "▁Georgia"] label = "Concept: Capitals − States" - # --- NNSight backend (already on CUDA from fixture) --- - sem_nnsight = _build_demo_semantic_target( - model_nnsight, dallas_austin_prompt, capitals, states, label, backend="nnsight" - ) - graph_nnsight = attribute_nnsight( - dallas_austin_prompt, - model_nnsight, - attribution_targets=[sem_nnsight], - verbose=False, - batch_size=256, - ) - nn_active = graph_nnsight.active_features.cpu() - nn_selected = graph_nnsight.selected_features.cpu() - nn_tokens = [t.token_str for t in graph_nnsight.logit_targets] - nn_adj = graph_nnsight.adjacency_matrix.cpu() - del graph_nnsight, sem_nnsight - _gpu_cleanup() + # --- NNSight backend --- + with clean_cuda(model_nnsight): + sem_nnsight = _build_demo_semantic_target( + model_nnsight, dallas_austin_prompt, capitals, states, label, backend="nnsight" + ) + graph_nnsight = attribute_nnsight( + dallas_austin_prompt, + model_nnsight, + attribution_targets=[sem_nnsight], + verbose=False, + batch_size=256, + ) + nn_active = graph_nnsight.active_features.cpu() + nn_selected = graph_nnsight.selected_features.cpu() + nn_tokens = [t.token_str for t in graph_nnsight.logit_targets] + nn_adj = graph_nnsight.adjacency_matrix.cpu() # --- TL backend --- - with _swap_backend(model_nnsight, model_tl): + with clean_cuda(model_tl): sem_tl = _build_demo_semantic_target( model_tl, dallas_austin_prompt, capitals, states, label, backend="transformerlens" ) @@ -1132,8 +1158,6 @@ def test_attribution_targets_semantic(models_sequential, dallas_austin_prompt): tl_selected = graph_tl.selected_features.cpu() tl_tokens = [t.token_str for t in graph_tl.logit_targets] tl_adj = graph_tl.adjacency_matrix.cpu() - del graph_tl, sem_tl - _gpu_cleanup() # --- Compare CPU tensors --- assert (nn_active == tl_active).all(), ( @@ -1151,9 +1175,9 @@ def test_attribution_targets_semantic(models_sequential, dallas_austin_prompt): @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") -def test_attribution_targets_semantic_intervention(models_sequential, dallas_austin_prompt): +def test_attribution_targets_semantic_intervention(models_cpu, dallas_austin_prompt): """Test semantic-target feature amplification consistency between TL and NNSight.""" - model_nnsight, model_tl = models_sequential + model_nnsight, model_tl = models_cpu n_top = 10 capitals = ["▁Austin", "▁Sacramento", "▁Olympia", "▁Atlanta"] states = ["▁Texas", "▁California", "▁Washington", "▁Georgia"] @@ -1170,45 +1194,42 @@ def _get_top_features(graph, n): _, top_idx = torch.topk(node_influence[:n_features], min(n, n_features)) return [tuple(graph.active_features[graph.selected_features[i]].tolist()) for i in top_idx] - # --- NNSight backend (already on CUDA from fixture) --- - sem_nnsight = _build_demo_semantic_target( - model_nnsight, dallas_austin_prompt, capitals, states, label, backend="nnsight" - ) - idx_x_nn = model_nnsight.tokenizer.encode("▁Austin", add_special_tokens=False)[-1] - idx_y_nn = model_nnsight.tokenizer.encode("▁Dallas", add_special_tokens=False)[-1] + # --- NNSight backend --- + with clean_cuda(model_nnsight): + sem_nnsight = _build_demo_semantic_target( + model_nnsight, dallas_austin_prompt, capitals, states, label, backend="nnsight" + ) + idx_x_nn = model_nnsight.tokenizer.encode("▁Austin", add_special_tokens=False)[-1] + idx_y_nn = model_nnsight.tokenizer.encode("▁Dallas", add_special_tokens=False)[-1] - graph_nnsight = attribute_nnsight( - dallas_austin_prompt, - model_nnsight, - attribution_targets=[sem_nnsight], - verbose=False, - batch_size=256, - ) - top_feats_nn = _get_top_features(graph_nnsight, n_top) - del graph_nnsight, sem_nnsight - _gpu_cleanup() + graph_nnsight = attribute_nnsight( + dallas_austin_prompt, + model_nnsight, + attribution_targets=[sem_nnsight], + verbose=False, + batch_size=256, + ) + top_feats_nn = _get_top_features(graph_nnsight, n_top) - input_ids_nn = model_nnsight.ensure_tokenized(dallas_austin_prompt) - orig_logits_nn, acts_nn = model_nnsight.get_activations(input_ids_nn, sparse=True) + input_ids_nn = model_nnsight.ensure_tokenized(dallas_austin_prompt) + orig_logits_nn, acts_nn = model_nnsight.get_activations(input_ids_nn, sparse=True) - interv_nn = [(ly, p, f, 10.0 * acts_nn[ly, p, f]) for (ly, p, f) in top_feats_nn] - new_logits_nn, _ = model_nnsight.feature_intervention(input_ids_nn, interv_nn) + interv_nn = [(ly, p, f, 10.0 * acts_nn[ly, p, f]) for (ly, p, f) in top_feats_nn] + new_logits_nn, _ = model_nnsight.feature_intervention(input_ids_nn, interv_nn) - orig_gap_nn = ( - (orig_logits_nn.squeeze(0)[-1, idx_x_nn] - orig_logits_nn.squeeze(0)[-1, idx_y_nn]) - .cpu() - .item() - ) - new_gap_nn = ( - (new_logits_nn.squeeze(0)[-1, idx_x_nn] - new_logits_nn.squeeze(0)[-1, idx_y_nn]) - .cpu() - .item() - ) - del orig_logits_nn, acts_nn, new_logits_nn - _gpu_cleanup() + orig_gap_nn = ( + (orig_logits_nn.squeeze(0)[-1, idx_x_nn] - orig_logits_nn.squeeze(0)[-1, idx_y_nn]) + .cpu() + .item() + ) + new_gap_nn = ( + (new_logits_nn.squeeze(0)[-1, idx_x_nn] - new_logits_nn.squeeze(0)[-1, idx_y_nn]) + .cpu() + .item() + ) # --- TL backend --- - with _swap_backend(model_nnsight, model_tl): + with clean_cuda(model_tl): sem_tl = _build_demo_semantic_target( model_tl, dallas_austin_prompt, capitals, states, label, backend="transformerlens" ) @@ -1223,8 +1244,6 @@ def _get_top_features(graph, n): batch_size=128, ) top_feats_tl = _get_top_features(graph_tl, n_top) - del graph_tl, sem_tl - _gpu_cleanup() input_ids_tl = model_tl.ensure_tokenized(dallas_austin_prompt) orig_logits_tl, acts_tl = model_tl.get_activations(input_ids_tl, sparse=True) @@ -1242,8 +1261,6 @@ def _get_top_features(graph, n): .cpu() .item() ) - del orig_logits_tl, acts_tl, new_logits_tl - _gpu_cleanup() # --- Compare on CPU --- assert new_gap_nn > orig_gap_nn, ( From 100af4db210cd0fd5de02789f8579ca5db3c8a06 Mon Sep 17 00:00:00 2001 From: Daniel Dale Date: Sat, 21 Feb 2026 12:02:51 -0800 Subject: [PATCH 18/18] restructured the demo to lead with the simpler target modes and extracted the `CustomTarget` examples and helper functions discussion to a distinct section. added a torch.Tensor version of the `Sequence[str]` example for completeness. --- demos/attribution_targets_demo.ipynb | 404 +++++++++++++---------- tests/test_tutorial_notebook_backends.py | 54 +++ 2 files changed, 281 insertions(+), 177 deletions(-) diff --git a/demos/attribution_targets_demo.ipynb b/demos/attribution_targets_demo.ipynb index b2105835..5c154ef5 100644 --- a/demos/attribution_targets_demo.ipynb +++ b/demos/attribution_targets_demo.ipynb @@ -4,10 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "\n", - "![Attribution Targets](https://raw.githubusercontent.com/speediedan/circuit-tracer/attribution-targets/demos/img/attribution_targets/attribution_targets_banner.png)" + "![Attribution Targets](https://raw.githubusercontent.com/safety-research/circuit-tracer/main/demos/img/attribution_targets/attribution_targets_banner.png)" ] }, { @@ -18,8 +15,7 @@ "source": [ "# Attribution Targets\n", "\n", - "\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/speediedan/circuit-tracer/blob/attribution-targets/demos/attribution_targets_demo.ipynb)" + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/safety-research/circuit-tracer/blob/main/demos/attribution_targets_demo.ipynb)" ] }, { @@ -34,54 +30,10 @@ "|---|---|---|\n", "| `None` | Salient logits | Auto-selects the most probable next tokens via `max_n_logits` / `desired_logit_prob` (default) |\n", "| `Sequence[str]` | Token strings | Attribute from explicitly named tokens, e.g. `[\"▁Austin\", \"▁Dallas\"]` |\n", - "| `Sequence[TargetSpec]` | Custom target | Attribute from arbitrary residual-stream directions via `CustomTarget(token_str, prob, vec)` |\n", "| `torch.Tensor` | Token ID tensor | Attribute from specific vocabulary indices |\n", + "| `Sequence[TargetSpec]` | Custom target | Attribute from arbitrary residual-stream directions via `CustomTarget(token_str, prob, vec)` |\n", "\n", - "See the expandable reference below for `CustomTarget` / `TargetSpec` field descriptions and examples.\n", - "\n", - "We use the capital-city prompt from the other demos: the model must resolve *\"capital of the state containing Dallas\"* via multi-hop reasoning (Dallas → Texas → Austin). After comparing the top features discovered under each mode, we run causal interventions to confirm the findings." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "

\n", - "TargetSpec / CustomTarget — field reference & examples\n", - "\n", - "The `attribution_targets` argument to `attribute()` accepts a `Sequence[TargetSpec]` for fully custom residual-stream directions. Two convenience types are involved:\n", - "\n", - "**`CustomTarget(token_str, prob, vec)`** is a `NamedTuple` with three fields:\n", - "\n", - "| Field | Type | Description |\n", - "|---|---|---|\n", - "| `token_str` | `str` | Human-readable label for this target (e.g. `\"logit(Austin)−logit(Dallas)\"`) |\n", - "| `prob` | `float` | Scalar weight — typically the softmax probability of the token, or \\|p(x)−p(y)\\| for a contrast direction |\n", - "| `vec` | `Tensor (d_model,)` | The direction in residual-stream space to attribute toward |\n", - "\n", - "**`TargetSpec`** is a type alias for `CustomTarget | tuple[str, float, torch.Tensor]`. Either form is accepted — a raw 3-tuple is coerced to a `CustomTarget` namedtuple automatically before processing.\n", - "\n", - "**Example — raw tuple (coerced automatically):**\n", - "\n", - "```python\n", - "raw: TargetSpec = (\"my-direction\", 0.05, some_tensor) # plain 3-tuple → TargetSpec\n", - "graph = attribute(prompt=prompt, model=model, attribution_targets=[raw])\n", - "```\n", - "\n", - "**Example — explicit `CustomTarget` namedtuple:**\n", - "\n", - "```python\n", - "from circuit_tracer.attribution.targets import CustomTarget\n", - "\n", - "target = CustomTarget(\n", - " token_str=\"logit(Austin)−logit(Dallas)\",\n", - " prob=abs(p_austin - p_dallas), # scalar weight\n", - " vec=unembed_austin - unembed_dallas, # shape: (d_model,)\n", - ")\n", - "graph = attribute(prompt=prompt, model=model, attribution_targets=[target])\n", - "```\n", - "\n", - "
" + "We will demo the use of all four input formats using the capital-city prompt you may be familiar with from other demos: the model must resolve *\"capital of the state containing Dallas\"* via multi-hop reasoning (Dallas → Texas → Austin). After comparing the top features discovered under each mode, we will apply interventions to elucidate two `CustomTarget` examples." ] }, { @@ -176,8 +128,218 @@ { "cell_type": "markdown", "metadata": { - "id": "dcZNR0egmS8l" + "id": "VXfD-5GrmS8l" + }, + "source": [ + "## Basic Attribution Target Modes" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This section explores the three simplest ways to specify attribution targets:\n", + "\n", + "1. **Automatic Salient Logit Targets** (`None`) — the default mode; auto-selects the most probable next tokens.\n", + "2. **Token-String Targets** (`Sequence[str]`) — attribute from explicit token surface forms.\n", + "3. **Token-ID Targets** (`torch.Tensor`) — attribute from specific vocabulary indices (pre-tokenized equivalent of string targets).\n", + "\n", + "> **Coming up:** After these basic modes, we explore two **custom attribution target** examples that let you attribute back from arbitrary residual-stream directions — a logit *difference* (`logit(Austin) − logit(Dallas)`) and an abstract *semantic concept* (`Capitals − States`)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "wx2XiXVjmS8l" + }, + "outputs": [], + "source": [ + "# Define the prompt, shared attribution parameters, and the three reference tokens (`▁Austin`, `▁Dallas`, `▁Texas`). \n", + "\n", + "prompt = \"Fact: the capital of the state containing Dallas is\"\n", + "token_x, token_y = \"▁Austin\", \"▁Dallas\"\n", + "\n", + "# Shared attribution kwargs (apply to all runs)\n", + "# Note: max_n_logits / desired_logit_prob only apply to salient-logit mode\n", + "attr_kwargs = dict(\n", + " batch_size=256,\n", + " max_feature_nodes=8192,\n", + " offload=\"disk\" if IN_COLAB else \"cpu\",\n", + " verbose=True,\n", + ")\n", + "\n", + "# Resolve token ids for key tokens\n", + "tokenizer = model.tokenizer\n", + "idx_x = tokenizer.encode(token_x, add_special_tokens=False)[-1]\n", + "idx_y = tokenizer.encode(token_y, add_special_tokens=False)[-1]\n", + "idx_texas = tokenizer.encode(\"▁Texas\", add_special_tokens=False)[-1]\n", + "\n", + "# Bind the tokenizer and key tokens for display helpers\n", + "display_topk = partial(\n", + " display_topk_token_predictions,\n", + " tokenizer=tokenizer,\n", + " key_tokens=[(token_x, idx_x), (token_y, idx_y), (\"▁Texas\", idx_texas)],\n", + ")\n", + "\n", + "# Show baseline token probabilities\n", + "input_ids = model.ensure_tokenized(prompt)\n", + "with torch.no_grad():\n", + " baseline_logits, _ = model.get_activations(input_ids)\n", + "\n", + "key_ids = [idx_x, idx_y, idx_texas]\n", + "key_labels = [token_x, token_y, \"▁Texas\"]\n", + "display_token_probs(baseline_logits, key_ids, key_labels, title=\"Baseline probabilities\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RUn1YKnUmS8l" + }, + "source": [ + "### Automatic Target Selection — Salient Logits (`None`)\n", + "\n", + "When `attribution_targets` is `None` (the default), `AttributionTargets` auto-selects the most probable next tokens until `desired_logit_prob` cumulative probability is reached (capped at `max_n_logits`). This is the standard mode used by `attribute_demo.ipynb`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "2tLE4FzdmS8m" + }, + "outputs": [], + "source": [ + "graph_salient = attribute(\n", + " prompt=prompt, model=model,\n", + " max_n_logits=10, desired_logit_prob=0.95,\n", + " **attr_kwargs,\n", + ")\n", + "print(f\"Salient-logits graph: {len(graph_salient.logit_targets)} targets, \"\n", + " f\"{graph_salient.active_features.shape[0]} active features\")\n", + "\n", + "# Free CUDA memory before next run\n", + "cleanup_cuda()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "w3cdLLfJmS8m" }, + "source": [ + "### Token-String Targets — `Sequence[str]`\n", + "\n", + "Pass a list of token strings (e.g., `[\"▁Austin\", \"▁Dallas\"]`) to focus attribution on exactly those logits. Internally, each string is tokenized and its softmax probability and unembedding vector are computed automatically — you only need to supply the surface forms." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Vh8HPtimmS8m" + }, + "outputs": [], + "source": [ + "graph_str = attribute(\n", + " prompt=prompt, model=model,\n", + " attribution_targets=[token_x, token_y],\n", + " **attr_kwargs,\n", + ")\n", + "print(f\"String-targets graph: {len(graph_str.logit_targets)} targets, \"\n", + " f\"{graph_str.active_features.shape[0]} active features\")\n", + "\n", + "# Free CUDA memory before next run\n", + "cleanup_cuda()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Token-ID Targets — `torch.Tensor`\n", + "\n", + "Pass a tensor of vocabulary token IDs to attribute from specific indices. This is the pre-tokenized equivalent of the string-target mode above — internally, the same probabilities and unembedding vectors are computed. Use this mode when you already have token IDs (e.g., from a prior tokenization step) and want to skip the string→ID lookup." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Use the same token IDs as the string-target example above\n", + "tensor_targets = torch.tensor([idx_x, idx_y])\n", + "\n", + "graph_tensor = attribute(\n", + " prompt=prompt, model=model,\n", + " attribution_targets=tensor_targets,\n", + " **attr_kwargs,\n", + ")\n", + "print(f\"Tensor-targets graph: {len(graph_tensor.logit_targets)} targets, \"\n", + " f\"{graph_tensor.active_features.shape[0]} active features\")\n", + "\n", + "# Free CUDA memory before next run\n", + "cleanup_cuda()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom Attribution Targets\n", + "\n", + "Beyond the basic modes above, `AttributionTargets` also accepts a `Sequence[TargetSpec]` — fully specified custom targets that let you attribute toward **arbitrary directions** in the residual stream. This makes a vast experimental surface more accessible but we'll explore a couple examples in this tutorial:\n", + "\n", + "- **Logit Difference Target** — encodes the direction `logit(Austin) − logit(Dallas)`, surfacing features that drive the model to prefer one token *over* another rather than boosting either in isolation.\n", + "- **Semantic Concept Target** — encodes an abstract *Capitals − States* direction built from multiple (capital, state) pairs via vector rejection, isolating the *capital-of* relation from shared geography.\n", + "\n", + "See the expandable section below if you want a more detailed look at `CustomTarget` definition before we proceed with the examples below.\n", + "\n", + "
\n", + "TargetSpec / CustomTarget — field reference\n", + "\n", + "The `attribution_targets` argument to `attribute()` accepts a `Sequence[TargetSpec]` for fully custom residual-stream directions. Two convenience types are involved:\n", + "\n", + "**`CustomTarget(token_str, prob, vec)`** is a `NamedTuple` with three fields:\n", + "\n", + "| Field | Type | Description |\n", + "|---|---|---|\n", + "| `token_str` | `str` | Human-readable label for this target (e.g. `\"logit(Austin)−logit(Dallas)\"`) |\n", + "| `prob` | `float` | Scalar weight — typically the softmax probability of the token, or \\|p(x)−p(y)\\| for a contrast direction |\n", + "| `vec` | `Tensor (d_model,)` | The direction in residual-stream space to attribute toward |\n", + "\n", + "**`TargetSpec`** is a type alias for `CustomTarget | tuple[str, float, torch.Tensor]`. Either form is accepted — a raw 3-tuple is coerced to a `CustomTarget` namedtuple automatically before processing.\n", + "\n", + "**Example — raw tuple (coerced automatically):**\n", + "\n", + "```python\n", + "raw: TargetSpec = (\"my-direction\", 0.05, some_tensor) # plain 3-tuple → TargetSpec\n", + "graph = attribute(prompt=prompt, model=model, attribution_targets=[raw])\n", + "```\n", + "\n", + "**Example — explicit `CustomTarget` namedtuple:**\n", + "\n", + "```python\n", + "from circuit_tracer.attribution.targets import CustomTarget\n", + "\n", + "target = CustomTarget(\n", + " token_str=\"logit(Austin)−logit(Dallas)\",\n", + " prob=abs(p_austin - p_dallas), # scalar weight\n", + " vec=unembed_austin - unembed_dallas, # shape: (d_model,)\n", + ")\n", + "graph = attribute(prompt=prompt, model=model, attribution_targets=[target])\n", + "```\n", + "\n", + "
\n", + "\n", + "We first define two helper functions for building these custom targets, then construct and attribute from each one." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, "source": [ "### Target Builder Helpers\n", "\n", @@ -211,9 +373,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "id": "5XBwNyq4mS8l" - }, + "metadata": {}, "outputs": [], "source": [ "def _get_last_position_probs(model, prompt):\n", @@ -288,48 +448,21 @@ }, { "cell_type": "markdown", - "metadata": { - "id": "VXfD-5GrmS8l" - }, + "metadata": {}, "source": [ - "## Attribution Configuration\n", - "\n", - "Define the prompt, shared attribution parameters, and the three reference tokens (`▁Austin`, `▁Dallas`, `▁Texas`). This demo explores **four different modes** of specifying attribution targets:\n", - "\n", - "1. **Automatic Salient Logit Targets** (`None`) — the default mode; auto-selects the most probable next tokens.\n", - "2. **Token-String Targets** (`Sequence[str]`) — attribute from explicit token surface forms.\n", - "3. **Custom Logit Difference Target** (`CustomTarget`) — encodes the `logit(Austin) − logit(Dallas)` direction.\n", - "4. **Semantic Direction / Concept Target** (`CustomTarget`) — encodes an abstract *Capitals − States* concept direction.\n", + "### Custom Target Configuration\n", "\n", - "Two custom targets are built here to probe distinct aspects of the model’s reasoning:\n", - "\n", - "- **`build_custom_diff_target`** — encodes the direction `logit(Austin) − logit(Dallas)` in the residual stream. Because the attribution graph is anchored to this *contrast* direction, it surfaces features that specifically drive the model to choose Austin *over* Dallas, not merely any feature that increases Austin’s probability in isolation.\n", - "\n", - "- **`build_semantic_concept_target`** — encodes an abstract *Capitals − States* direction built from four (capital, state) pairs via vector rejection. We can generate an attribution graph associated with this *capital-of* sense and exploit this target to amplify or dampen logits along this semantic axis." + "Build the two custom targets and display a summary of all attribution configurations." ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "id": "wx2XiXVjmS8l" - }, + "metadata": {}, "outputs": [], "source": [ - "prompt = \"Fact: the capital of the state containing Dallas is\"\n", - "token_x, token_y = \"▁Austin\", \"▁Dallas\"\n", - "\n", - "# Shared attribution kwargs (apply to all runs)\n", - "# Note: max_n_logits / desired_logit_prob only apply to salient-logit mode\n", - "attr_kwargs = dict(\n", - " batch_size=256,\n", - " max_feature_nodes=8192,\n", - " offload=\"disk\" if IN_COLAB else \"cpu\",\n", - " verbose=True,\n", - ")\n", - "\n", "# Build the custom diff-target: logit(Austin) − logit(Dallas)\n", - "custom_target, idx_x, idx_y = build_custom_diff_target(\n", + "custom_target, _, _ = build_custom_diff_target(\n", " model, prompt, token_x, token_y, backend=backend\n", ")\n", "\n", @@ -341,106 +474,21 @@ " label=\"Capitals − States\", backend=backend,\n", ")\n", "\n", - "# Also track Texas — the intermediate hop in the multi-hop chain\n", - "idx_texas = model.tokenizer.encode(\"▁Texas\", add_special_tokens=False)[-1]\n", - "\n", - "# Bind the tokenizer and key tokens for display helpers\n", - "display_topk = partial(\n", - " display_topk_token_predictions,\n", - " tokenizer=model.tokenizer,\n", - " key_tokens=[(token_x, idx_x), (token_y, idx_y), (\"▁Texas\", idx_texas)],\n", - ")\n", - "\n", - "# Show baseline token probabilities\n", - "input_ids = model.ensure_tokenized(prompt)\n", - "with torch.no_grad():\n", - " baseline_logits, _ = model.get_activations(input_ids)\n", - "\n", - "key_ids = [idx_x, idx_y, idx_texas]\n", - "key_labels = [token_x, token_y, \"▁Texas\"]\n", - "display_token_probs(baseline_logits, key_ids, key_labels, title=\"Baseline probabilities\")\n", - "\n", "display_attribution_config(\n", " token_pairs=[(token_x, idx_x), (token_y, idx_y), (\"▁Texas\", idx_texas)],\n", " target_pairs=[(\"Logit diff\", custom_target), (\"Semantic concept\", semantic_target)],\n", ")" ] }, - { - "cell_type": "markdown", - "metadata": { - "id": "RUn1YKnUmS8l" - }, - "source": [ - "### Automatic Target Selection — Salient Logits (`None`)\n", - "\n", - "When `attribution_targets` is `None` (the default), `AttributionTargets` auto-selects the most probable next tokens until `desired_logit_prob` cumulative probability is reached (capped at `max_n_logits`). This is the standard mode used by `attribute_demo.ipynb`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "2tLE4FzdmS8m" - }, - "outputs": [], - "source": [ - "graph_salient = attribute(\n", - " prompt=prompt, model=model,\n", - " max_n_logits=10, desired_logit_prob=0.95,\n", - " **attr_kwargs,\n", - ")\n", - "print(f\"Salient-logits graph: {len(graph_salient.logit_targets)} targets, \"\n", - " f\"{graph_salient.active_features.shape[0]} active features\")\n", - "\n", - "# Free CUDA memory before next run\n", - "cleanup_cuda()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "w3cdLLfJmS8m" - }, - "source": [ - "### Token-String Targets — `Sequence[str]`\n", - "\n", - "Pass a list of token strings (e.g., `[\"▁Austin\", \"▁Dallas\"]`) to focus attribution on exactly those logits. Internally, each string is tokenized and its softmax probability and unembedding vector are computed automatically — you only need to supply the surface forms." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Vh8HPtimmS8m" - }, - "outputs": [], - "source": [ - "graph_str = attribute(\n", - " prompt=prompt, model=model,\n", - " attribution_targets=[token_x, token_y],\n", - " **attr_kwargs,\n", - ")\n", - "print(f\"String-targets graph: {len(graph_str.logit_targets)} targets, \"\n", - " f\"{graph_str.active_features.shape[0]} active features\")\n", - "\n", - "# Free CUDA memory before next run\n", - "cleanup_cuda()" - ] - }, { "cell_type": "markdown", "metadata": { "id": "EQuFE-eimS8m" }, "source": [ - "### Custom Targets — `Sequence[TargetSpec]`\n", - "\n", - "Pass a `CustomTarget` (or any `TargetSpec` — a tuple of `(token_str, prob, vec)`) that encodes an arbitrary direction in the residual stream.\n", + "### Logit Difference Target\n", "\n", - "#### Logit Difference Target\n", - "\n", - "Here the direction is `logit(Austin) − logit(Dallas)`, constructing an attribution graph that more narrowly surfaces features driving the selection of the *correct* answer over the surface-level attractor." + "Pass a `CustomTarget` (or any `TargetSpec` — a tuple of `(token_str, prob, vec)`) that encodes a contrast direction in the residual stream. Here the direction is `logit(Austin) − logit(Dallas)`, constructing an attribution graph that more narrowly surfaces features driving the selection of the *correct* answer over the surface-level attractor." ] }, { @@ -464,7 +512,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### Semantic Direction (Concept Target)\n", + "### Semantic Direction (Concept Target)\n", "\n", "Instead of a pairwise logit difference, we can attribute to an **abstract concept direction** in the residual stream. We build a `CustomTarget` via vector rejection: for each (capital, state) pair, project the capital vector onto the state vector and subtract that projection, leaving the pure 'capital-ness' component." ] @@ -495,7 +543,9 @@ "source": [ "## Compare Top Features\n", "\n", - "Extract the top-10 features from each graph (ranked by multi-hop influence) and display them side by side. Feature indices link to their [Neuronpedia](https://www.neuronpedia.org/) dashboards. The *Custom Target* column highlights features that specifically drive the Austin-vs-Dallas logit difference — the multi-hop reasoning circuit (Dallas → Texas → capital → Austin). The *Concept Target* column surfaces features associated with the more general *capital-of* relation, which partially overlaps with the multi-hop chain but also includes distinct features that may reflect more abstract capital-related reasoning." + "Extract the top-10 features from each graph (ranked by multi-hop influence) and display them side by side. Feature indices link to their [Neuronpedia](https://www.neuronpedia.org/) dashboards. The *Custom Target* column highlights features that specifically drive the Austin-vs-Dallas logit difference — the multi-hop reasoning circuit (Dallas → Texas → capital → Austin). The *Concept Target* column surfaces features associated with the more general *capital-of* relation, which partially overlaps with the multi-hop chain but also includes distinct features that may reflect more abstract capital-related reasoning.\n", + "\n", + "> **Note:** The `torch.Tensor` target example is omitted from this comparison because it uses the same token IDs as the `Sequence[str]` example — the resulting graphs are identical." ] }, { diff --git a/tests/test_tutorial_notebook_backends.py b/tests/test_tutorial_notebook_backends.py index 1a3d3ce4..4148c5c9 100644 --- a/tests/test_tutorial_notebook_backends.py +++ b/tests/test_tutorial_notebook_backends.py @@ -973,6 +973,60 @@ def test_attribution_targets_string(models_cpu, dallas_austin_prompt): ) +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_attribution_targets_tensor(models_cpu, dallas_austin_prompt): + """Test attribution with torch.Tensor targets consistency between TL and NNSight. + + Uses the same token IDs as the string-target test (pre-tokenized equivalent). + """ + model_nnsight, model_tl = models_cpu + # Resolve token IDs for Austin and Dallas (same as string-target test) + tok = model_nnsight.tokenizer + idx_austin = tok.encode("▁Austin", add_special_tokens=False)[-1] + idx_dallas = tok.encode("▁Dallas", add_special_tokens=False)[-1] + tensor_targets = torch.tensor([idx_austin, idx_dallas]) + + # --- NNSight backend --- + with clean_cuda(model_nnsight): + graph_nnsight = attribute_nnsight( + dallas_austin_prompt, + model_nnsight, + attribution_targets=tensor_targets, + verbose=False, + batch_size=256, + ) + nn_active = graph_nnsight.active_features.cpu() + nn_selected = graph_nnsight.selected_features.cpu() + nn_tokens = [t.token_str for t in graph_nnsight.logit_targets] + nn_adj = graph_nnsight.adjacency_matrix.cpu() + + # --- TL backend --- + with clean_cuda(model_tl): + graph_tl = attribute_transformerlens( + dallas_austin_prompt, + model_tl, + attribution_targets=tensor_targets, + verbose=False, + batch_size=128, + ) + tl_active = graph_tl.active_features.cpu() + tl_selected = graph_tl.selected_features.cpu() + tl_tokens = [t.token_str for t in graph_tl.logit_targets] + tl_adj = graph_tl.adjacency_matrix.cpu() + + # --- Compare CPU tensors --- + assert (nn_active == tl_active).all(), ( + "Tensor-target active features don't match between backends" + ) + assert (nn_selected == tl_selected).all(), ( + "Tensor-target selected features don't match between backends" + ) + assert nn_tokens == tl_tokens, f"Tensor-target logit tokens differ: {nn_tokens} vs {tl_tokens}" + assert torch.allclose(nn_adj, tl_adj, atol=5e-4, rtol=1e-5), ( + f"Tensor-target adjacency matrices differ by max {(nn_adj - tl_adj).abs().max()}" + ) + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_attribution_targets_logit_diff(models_cpu, dallas_austin_prompt): """Test attribution with CustomTarget consistency between TL and NNSight."""