-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathmodel.py
More file actions
executable file
·1580 lines (1339 loc) · 63.1 KB
/
model.py
File metadata and controls
executable file
·1580 lines (1339 loc) · 63.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from typing import Dict, List, Tuple
from torch.nn.utils.parametrizations import weight_norm
from torch.nn import TransformerEncoder, TransformerEncoderLayer
import esm
import pandas as pd
from tqdm import tqdm
import tempfile
from pathlib import Path
import mdtraj as md
import os
from egnn_pytorch import EGNN
from transformers import AutoTokenizer, EsmForProteinFolding
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from utils import *
from dataset import *
class EarlyStopping:
def __init__(self, patience=10, verbose=True, delta=0.0, save_path='checkpoint.pt'):
"""
Early stopping based on both val_loss and val_auc.
The model is saved whenever EITHER:
- val_loss decreases by more than delta, OR
- val_auc increases by more than delta.
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.early_stop = False
self.delta = delta
self.save_path = save_path
self.best_loss = np.inf
self.best_auc = -np.inf
def __call__(self, val_auc, model):
improved = False
# Check auc improvement
if val_auc > self.best_auc + self.delta:
self.best_auc = val_auc
improved = True
if improved:
self.save_checkpoint(model, val_auc)
self.counter = 0
else:
self.counter += 1
if self.verbose:
print(f"EarlyStopping counter: {self.counter} out of {self.patience}")
if self.counter >= self.patience:
self.early_stop = True
def save_checkpoint(self, model, val_auc):
"""Save current best model."""
if self.verbose:
print(f"Validation improved → Saving model (Score={val_auc:.4f}) to {self.save_path}")
torch.save(model.state_dict(), self.save_path)
# ============================================================================
# ESM2 Embedding via HuggingFace
# ============================================================================
class ESM2Encoder(nn.Module):
def __init__(self,
device="cuda:0",
layer=33,
cache_dir='cache'):
"""
Initialize an ESM2 encoder.
Args:
model_name (str): Name of the pretrained ESM2 model (e.g., 'esm2_t33_650M_UR50D').
device (str): Device to run on, e.g. 'cuda:0', 'cuda:1', or 'cpu'.
layer (int): Layer number from which to extract representations.
"""
super().__init__()
self.device = device
self.layer = layer
if cache_dir is None:
cache_dir = os.path.dirname(os.path.abspath(__file__))
self.cache_dir = cache_dir
os.makedirs(self.cache_dir, exist_ok=True)
self.model, self.alphabet = esm.pretrained.esm2_t33_650M_UR50D()
self.batch_converter = self.alphabet.get_batch_converter()
self.model = self.model.eval().to(device)
def _cache_path(self, prefix):
base_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = base_dir + "/" + self.cache_dir
os.makedirs(base_dir, exist_ok=True)
return os.path.join(base_dir, f"{prefix}_esm2_layer{self.layer}.pt")
def save_obj(self, obj, path):
"""Save object to a file (no compression)."""
torch.save(obj, path)
def load_obj(self, path):
"""Load object from a file (no compression)."""
return torch.load(path, map_location="cpu", weights_only=False)
@torch.no_grad()
def _embed_batch(self, batch_data):
batch_labels, batch_strs, batch_tokens = self.batch_converter(batch_data)
batch_tokens = batch_tokens.to(self.device)
results = self.model(batch_tokens, repr_layers=[self.layer], return_contacts=False)
token_representations = results["representations"][self.layer]
batch_lens = (batch_tokens != self.alphabet.padding_idx).sum(1)
seq_reprs = []
for i, tokens_len in enumerate(batch_lens):
seq_repr = token_representations[i, 1:tokens_len-1].cpu()
seq_reprs.append(seq_repr)
return seq_reprs
@torch.no_grad()
def forward(self, df, seq_col, prefix, batch_size=64, re_embed=False, cache_save=True):
"""
Add or update embeddings for sequences in a DataFrame.
- If there are new sequences, automatically update the dictionary and save.
- If re_embed=True, force re-computation of all sequences.
"""
cache_path = self._cache_path(prefix)
emb_dict = {}
if os.path.exists(cache_path) and not re_embed:
print(f"[ESM2] Loading cached embeddings from {cache_path}")
emb_dict = self.load_obj(cache_path)
else:
if re_embed:
print(f"[ESM2] Re-embedding all sequences for {prefix}")
else:
print(f"[ESM2] No existing cache for {prefix}, will create new.")
seqs = [str(s).strip().upper() for s in df[seq_col].tolist() if isinstance(s, str)]
unique_seqs = sorted(set(seqs))
new_seqs = [s for s in unique_seqs if s not in emb_dict]
if new_seqs:
print(f"[ESM2] Found {len(new_seqs)} new sequences → computing embeddings...")
data = [(str(i), s) for i, s in enumerate(new_seqs)]
for i in tqdm(range(0, len(data), batch_size), desc=f"ESM2 update ({prefix})"):
batch = data[i:i+batch_size]
embs = self._embed_batch(batch)
for (_, seq), emb in zip(batch, embs):
emb_dict[seq] = emb.clone()
if cache_save:
print(f"[ESM2] Updating cache with new sequences")
self.save_obj(emb_dict, cache_path)
else:
print(f"[ESM2] No new sequences for {prefix}, using existing cache")
return emb_dict
# ============================================================================
# ESMFold (transformers)
# ============================================================================
class ESMFoldPredictorHF(nn.Module):
def __init__(self,
model_name="facebook/esmfold_v1",
cache_dir=None,
device='cpu',
allow_tf32=True):
super().__init__()
self.model_name = model_name
self.cache_dir = cache_dir
self.device = device
if allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
# tokenizer and model
print(f"Loading ESMFold model {model_name} on {device}... {'with' if cache_dir else 'without'} cache_dir: {cache_dir}")
self.tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
self.model = EsmForProteinFolding.from_pretrained(
model_name, low_cpu_mem_usage=True, cache_dir=cache_dir
).eval().to(self.device)
@torch.no_grad()
def infer_pdb_str(self, seq: str) -> str:
pdb_str = self.model.infer_pdb(seq)
return pdb_str
@torch.no_grad()
def forward_raw(self, seq: str):
inputs = self.tokenizer([seq], return_tensors="pt", add_special_tokens=False)
inputs = {k: v.to(self.device) for k, v in inputs.items()}
outputs = self.model(**inputs)
return outputs # ESMFoldOutput
MAX_ASA_TIEN = {
"ALA": 129.0, "ARG": 274.0, "ASN": 195.0, "ASP": 193.0, "CYS": 167.0,
"GLN": 225.0, "GLU": 223.0, "GLY": 104.0, "HIS": 224.0, "ILE": 197.0,
"LEU": 201.0, "LYS": 236.0, "MET": 224.0, "PHE": 240.0, "PRO": 159.0,
"SER": 155.0, "THR": 172.0, "TRP": 285.0, "TYR": 263.0, "VAL": 174.0,
}
SS8_INDEX = {"H":0,"B":1,"E":2,"G":3,"I":4,"T":5,"S":6,"C":7,"-":7}
class StructureFeatureExtractorNoDSSP(nn.Module):
def __init__(self, device="cpu"):
super().__init__()
self.device = device
self.in_dim = 6 + 8 + 1 + 1 + 1 # 17
self.to(torch.device(self.device))
@torch.no_grad()
def _angles(self, traj):
L = traj.n_residues
sphi = np.zeros(L, dtype=np.float32); cphi = np.zeros(L, dtype=np.float32)
spsi = np.zeros(L, dtype=np.float32); cpsi = np.zeros(L, dtype=np.float32)
someg = np.zeros(L, dtype=np.float32); comeg = np.zeros(L, dtype=np.float32)
# 1) phi: (C_{i-1}, N_i, CA_i, C_i) —— Current residue i can be located using atoms[1] (N_i)
phi_idx, phi_vals = md.compute_phi(traj) # phi_vals: (1, n_phi)
if phi_vals.size > 0:
for k, atoms in enumerate(phi_idx):
res_i = traj.topology.atom(int(atoms[1])).residue.index # N_i residue index
if 0 <= res_i < L:
ang = float(phi_vals[0, k])
sphi[res_i] = np.sin(ang); cphi[res_i] = np.cos(ang)
# 2) psi: (N_i, CA_i, C_i, N_{i+1}) —— Current residue i can be located using atoms[1] (CA_i)
psi_idx, psi_vals = md.compute_psi(traj)
if psi_vals.size > 0:
for k, atoms in enumerate(psi_idx):
res_i = traj.topology.atom(int(atoms[1])).residue.index # CA_i
if 0 <= res_i < L:
ang = float(psi_vals[0, k])
spsi[res_i] = np.sin(ang); cpsi[res_i] = np.cos(ang)
# 3) omega: (CA_i, C_i, N_{i+1}, CA_{i+1}) —— Current residue i can be located using atoms[0] (CA_i)
omg_idx, omg_vals = md.compute_omega(traj)
if omg_vals.size > 0:
for k, atoms in enumerate(omg_idx):
res_i = traj.topology.atom(int(atoms[0])).residue.index # CA_i
if 0 <= res_i < L:
ang = float(omg_vals[0, k])
someg[res_i] = np.sin(ang); comeg[res_i] = np.cos(ang)
angles_feat = np.stack([sphi, cphi, spsi, cpsi, someg, comeg], axis=-1) # [L, 6]
return angles_feat.astype(np.float32)
@torch.no_grad()
def _ss8(self, traj: md.Trajectory):
ss = md.compute_dssp(traj, simplified=False)[0]
L = traj.n_residues
onehot = np.zeros((L, 8), dtype=np.float32)
for i, ch in enumerate(ss):
onehot[i, SS8_INDEX.get(ch, 7)] = 1.0
return onehot
@torch.no_grad()
def _rsa(self, traj: md.Trajectory):
asa = md.shrake_rupley(traj, mode="residue")[0] # (L,)
rsa = np.zeros_like(asa, dtype=np.float32)
for i, res in enumerate(traj.topology.residues):
max_asa = MAX_ASA_TIEN.get(res.name.upper(), None)
rsa[i] = 0.0 if not max_asa else float(asa[i] / max_asa)
return np.clip(rsa, 0.0, 1.0)[:, None]
@torch.no_grad()
def _contact_count(self, traj: md.Trajectory, cutoff_nm=0.8):
L = traj.n_residues
ca_atoms = traj.topology.select("name CA")
if len(ca_atoms) == L:
coors = traj.xyz[0, ca_atoms, :] # nm
else:
xyz = traj.xyz[0]
coors = []
for res in traj.topology.residues:
idxs = [a.index for a in res.atoms]
coors.append(xyz[idxs, :].mean(axis=0))
coors = np.array(coors, dtype=np.float32)
diff = coors[:, None, :] - coors[None, :, :]
dist = np.sqrt((diff**2).sum(-1)) # nm
mask = (dist < cutoff_nm).astype(np.float32)
np.fill_diagonal(mask, 0.0)
cnt = mask.sum(axis=1)
return cnt[:, None].astype(np.float32)
@torch.no_grad()
def _plddt(self, pdb_file: str):
# Use Biopython to read PDB B-factor (ESMFold/AlphaFold writes pLDDT here)
from Bio.PDB import PDBParser
import numpy as np
parser = PDBParser(QUIET=True)
structure = parser.get_structure("prot", pdb_file)
model = structure[0]
res_plddt = []
for chain in model:
for residue in chain:
atoms = list(residue.get_atoms())
if len(atoms) == 0:
res_plddt.append(0.0)
continue
# Mean B-factor of atoms in the residue
bvals = [float(atom.get_bfactor()) for atom in atoms]
res_plddt.append(float(np.mean(bvals)))
# Normalize to [0,1]
plddt = np.array(res_plddt, dtype=np.float32) / 100.0
plddt = np.clip(plddt, 0.0, 1.0)
return plddt[:, None] # [L,1]
@torch.no_grad()
def _parse_and_features(self, pdb_file: str):
traj = md.load(pdb_file)
L = traj.n_residues
angles = self._angles(traj) # [L,6]
ss8 = self._ss8(traj) # [L,8]
rsa = self._rsa(traj) # [L,1]
cnt = self._contact_count(traj) # [L,1]
plddt = self._plddt(pdb_file) # [L,1]
feats = np.concatenate([angles, ss8, rsa, cnt, plddt], axis=1).astype(np.float32) # [L,17]
ca_atoms = traj.topology.select("name CA")
if len(ca_atoms) == L:
coors_nm = traj.xyz[0, ca_atoms, :]
else:
xyz = traj.xyz[0]
res_coords = []
for res in traj.topology.residues:
idxs = [a.index for a in res.atoms]
res_coords.append(xyz[idxs, :].mean(axis=0))
coors_nm = np.array(res_coords, dtype=np.float32)
coors_ang = coors_nm * 10.0 # nm -> Å
return coors_ang.astype(np.float32), feats # [L,3], [L,17]
@torch.no_grad()
def forward(self, pdb_file: str):
coors_ang, scalars = self._parse_and_features(pdb_file)
coors = torch.tensor(coors_ang, dtype=torch.float32, device=self.device) # [N,3]
scalars = torch.tensor(scalars, dtype=torch.float32, device=self.device) # [N,17]
return scalars, coors # [N,17], [N,3]
import uuid
class ResiduePipelineWithHFESM:
def __init__(self,
esm_model_name="facebook/esmfold_v1",
cache_dir=None,
esm_device='cpu',
allow_tf32=True
):
self.esm = ESMFoldPredictorHF(esm_model_name, cache_dir, esm_device, allow_tf32)
self.struct_encoder = StructureFeatureExtractorNoDSSP(device=esm_device)
self.cache_dir = cache_dir
@torch.no_grad()
# def __call__(self, seq: str, save_pdb_path: str = None) -> torch.Tensor:
# pdb_str = self.esm.infer_pdb_str(seq)
# if save_pdb_path is None:
# tmpdir = self.cache_dir if self.cache_dir is not None else tempfile.gettempdir()
# save_pdb_path = str(Path(tmpdir) / "esmfold_pred_fold15.pdb")
# Path(save_pdb_path).write_text(pdb_str)
# struct_emb, struct_coords = self.struct_encoder(save_pdb_path)
# return struct_emb, struct_coords
def __call__(self, seq: str, save_pdb_path: str = None) -> torch.Tensor:
pdb_str = self.esm.infer_pdb_str(seq)
# Temporary file cleanup logic
created_temp = False
if save_pdb_path is None:
tmpdir = self.cache_dir if self.cache_dir is not None else tempfile.gettempdir()
# use uuid to generate unique filename to prevent concurrency conflicts
unique_name = f"esmfold_{uuid.uuid4().hex}.pdb"
save_pdb_path = str(Path(tmpdir) / unique_name)
created_temp = True
Path(save_pdb_path).write_text(pdb_str)
struct_emb, struct_coords = self.struct_encoder(save_pdb_path)
if created_temp and os.path.exists(save_pdb_path):
os.remove(save_pdb_path)
return struct_emb, struct_coords
def sanitize_protein_seq(seq: str) -> str:
if not isinstance(seq, str):
return ""
s = "".join(seq.split()).upper()
allowed = set("ACDEFGHIKLMNPQRSTVWYXBZJUO")
return "".join([c for c in s if c in allowed])
@torch.no_grad()
def batch_embed_to_dicts(
df: pd.DataFrame,
seq_col: str,
pipeline,
show_progress: bool = True,
) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor], List[Tuple[str, str]]]:
"""
Returns:
- emb_dict: {seq -> z(torch.Tensor[L, D])}
- coord_dict:{seq -> coords(torch.Tensor[L, 3])}
- failures: [(seq, err_msg), ...]
"""
raw_list = df[seq_col].astype(str).tolist()
seqs = []
for s in raw_list:
ss = sanitize_protein_seq(s)
if ss:
seqs.append(ss)
uniq_seqs = sorted(set(seqs))
logger.info(f"Total rows: {len(df)}, valid seqs: {len(seqs)}, unique: {len(uniq_seqs)}")
emb_dict: Dict[str, torch.Tensor] = {}
coord_dict: Dict[str, torch.Tensor] = {}
failures: List[Tuple[str, str]] = []
iterator = tqdm(uniq_seqs, desc="ESMfold Predicting structure...") if show_progress else uniq_seqs
for seq in tqdm(iterator):
if seq in emb_dict:
continue
try:
z_t, c_t = pipeline(seq) # z: [L, D], coords: [L, 3] (torch.Tensor)
emb_dict[seq] = z_t.detach().float().cpu()
coord_dict[seq] = c_t.detach().float().cpu()
except Exception as e:
failures.append((seq, repr(e)))
continue
logger.info(f"[DONE] OK: {len(emb_dict)}, Failed: {len(failures)}")
if failures[:3]:
logger.error("[SAMPLE failures]", failures[:3])
return emb_dict, coord_dict, failures
class ESMFoldEncoder(nn.Module):
def __init__(self, model_name="facebook/esmfold_v1", esm_cache_dir="esm_cache", cache_dir="cache"):
super(ESMFoldEncoder, self).__init__()
self.model_name = model_name
self.esm_cache_dir = esm_cache_dir
self.cache_dir = cache_dir
def save_obj(self, obj, path):
"""Save object to a file (no compression)."""
torch.save(obj, path)
def load_obj(self, path):
"""Load object from a file (no compression)."""
return torch.load(path, map_location='cpu', weights_only=False)
def load_esm_dict(self, device, df_data, chain, re_embed, cache_save):
def _clean_unique(series: pd.Series) -> list:
cleaned = []
for s in series.astype(str).tolist():
ss = sanitize_protein_seq(s)
if ss:
cleaned.append(ss)
return sorted(set(cleaned))
def _retry_embed_df(
df: pd.DataFrame,
chain: str,
max_retries: int = 2,
show_progress: bool = True,
):
"""
Try to embed protein sequences with retries on failures.
Args:
df (pd.DataFrame): A DataFrame containing a column `chain` with sequences.
chain (str): The column name containing the sequences (e.g., "alpha", "beta").
pipeline: An embedding pipeline, should return (embedding, coords) for a sequence.
max_retries (int): Maximum number of retries for failed sequences.
show_progress (bool): Whether to display tqdm progress bars.
Returns:
feat_dict (Dict[str, torch.Tensor]): {sequence -> embedding tensor [L, D]}.
coord_dict (Dict[str, torch.Tensor]): {sequence -> coordinate tensor [L, 3]}.
failures (List[Tuple[str, str]]): List of (sequence, error_message) that still failed after retries.
"""
pipeline = ResiduePipelineWithHFESM(
esm_model_name=self.model_name,
cache_dir=self.esm_cache_dir,
esm_device=device
)
# 1. First attempt
feat_dict, coord_dict, failures = batch_embed_to_dicts(
df, chain, pipeline, show_progress=show_progress
)
# 2. Retry loop for failed sequences
tries = 0
while failures and tries < max_retries:
tries += 1
retry_seqs = [s for s, _ in failures]
logger.info(f"[retry {tries}/{max_retries}] {len(retry_seqs)} sequences")
retry_df = pd.DataFrame({chain: retry_seqs})
f2, c2, failures = batch_embed_to_dicts(
retry_df, chain, pipeline, show_progress=show_progress
)
feat_dict.update(f2)
coord_dict.update(c2)
return feat_dict, coord_dict, failures
def update_with_new_seqs(feat_dict, coord_dict, chain):
base_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = base_dir + "/" + self.cache_dir
os.makedirs(base_dir, exist_ok=True)
path_feat = os.path.join(base_dir, f"{chain}_feat_dict.pt")
path_coords = os.path.join(base_dir, f"{chain}_coord_dict.pt")
all_seqs_clean = _clean_unique(df_data[chain])
new_seqs = [s for s in all_seqs_clean if s not in feat_dict]
if not new_seqs:
logger.info(f"No new {chain} sequences found")
return feat_dict, coord_dict
logger.info(f"Found new {chain} sequences, embedding...")
df_new = pd.DataFrame({chain: new_seqs})
new_feat_dict, new_coord_dict, failures = _retry_embed_df(df_new, chain, max_retries=100)
feat_dict.update(new_feat_dict)
coord_dict.update(new_coord_dict)
if cache_save:
self.save_obj(feat_dict, path_feat)
self.save_obj(coord_dict, path_coords)
logger.info(f"Updated and saved {path_feat} and {path_coords}")
if failures:
for seq, err in failures:
logger.error(f"[create] failed: {seq} | {err}")
return feat_dict, coord_dict
def get_or_create_dict(chain):
base_dir = os.path.dirname(os.path.abspath(__file__)) + "/" + self.cache_dir
os.makedirs(base_dir, exist_ok=True)
path_feat = os.path.join(base_dir, f"{chain}_feat_dict.pt")
path_coords = os.path.join(base_dir, f"{chain}_coord_dict.pt")
failures = []
if os.path.exists(path_feat) and not re_embed:
logger.info(f"Loading {path_feat} and {path_coords}")
feat_dict = self.load_obj(path_feat)
coord_dict = self.load_obj(path_coords)
else:
logger.info(f"{path_feat} and {path_coords} not found or re_embed=True, generating...")
unique_seqs = _clean_unique(df_data[chain])
df_uniq = pd.DataFrame({chain: unique_seqs})
feat_dict, coord_dict, failures = _retry_embed_df(
df_uniq, chain, show_progress=True, max_retries=100
)
if cache_save:
self.save_obj(feat_dict, path_feat)
self.save_obj(coord_dict, path_coords)
logger.info(f"Saved {path_feat} and {path_coords}")
for seq, err in failures:
logger.error(f"[create] failed: {seq} | {err}")
return feat_dict, coord_dict
self.dict[chain+'_feat'], self.dict[chain+'_coord'] = update_with_new_seqs(*get_or_create_dict(chain), chain)
def pad_and_stack(self, batch_feats, L_max, batch_coors):
"""
batch_feats: list of [L_i, D] tensors
batch_coors: list of [L_i, 3] tensors
return:
feats: [B, L_max, D]
coors: [B, L_max, 3]
mask : [B, L_max] (True for real tokens)
"""
assert len(batch_feats) == len(batch_coors)
B = len(batch_feats)
D = batch_feats[0].shape[-1]
feats_pad = []
coors_pad = []
masks = []
for x, c in zip(batch_feats, batch_coors):
L = x.shape[0]
pad_L = L_max - L
# pad feats/coors with zeros
feats_pad.append(torch.nn.functional.pad(x, (0, 0, 0, pad_L))) # [L_max, D]
coors_pad.append(torch.nn.functional.pad(c, (0, 0, 0, pad_L))) # [L_max, 3]
m = torch.zeros(L_max, dtype=torch.bool)
m[:L] = True
masks.append(m)
feats = torch.stack(feats_pad, dim=0) # [B, L_max, D]
coors = torch.stack(coors_pad, dim=0) # [B, L_max, 3]
mask = torch.stack(masks, dim=0) # [B, L_max]
return feats, coors, mask
def forward(self, df_data, chain, device='cpu', re_embed=False, cache_save=False):
"""
df_data: pd.DataFrame with a column `chain` containing sequences
chain: str, e.g. "alpha" or "beta"
device: str, e.g. 'cpu' or 'cuda:0'
re_embed: bool, whether to re-embed even if cached files exist
"""
self.dict = {}
self.load_esm_dict(device, df_data, chain, re_embed, cache_save)
batch_feats = []
batch_coors = []
for seq in df_data[chain].astype(str).tolist():
ss = sanitize_protein_seq(seq)
if ss in self.dict[chain+'_feat'] and ss in self.dict[chain+'_coord']:
batch_feats.append(self.dict[chain+'_feat'][ss])
batch_coors.append(self.dict[chain+'_coord'][ss])
else:
raise ValueError(f"Sequence not found in embedding dict: {ss}")
# L_max = max(x.shape[0] for x in batch_feats)
return batch_feats, batch_coors
class ResidueProjector(nn.Module):
"""Align channel dimensions of different branches to the same D"""
def __init__(self, in_dim, out_dim):
super().__init__()
self.proj = nn.Linear(in_dim, out_dim) if in_dim != out_dim else nn.Identity()
def forward(self, x): # x: [B,L,Di]
return self.proj(x)
class ResidueDoubleFusion(nn.Module):
"""
ResidueDoubleFusion:
A residue-level two-branch fusion module that combines two modalities (x1, x2)
using cross-attention followed by gated residual fusion and linear projection.
Typical usage:
- x1: physicochemical features
- x2: ESM embeddings (or structure features)
"""
def __init__(self, dim, num_heads=8, dropout=0.1):
super().__init__()
self.dim = dim
# Cross-attention: allows information flow between two modalities
self.cross_attn = nn.MultiheadAttention(
embed_dim=dim, num_heads=num_heads, dropout=dropout, batch_first=True
)
# Gating mechanism: adaptively weight two modalities per residue
self.gate = nn.Sequential(
nn.Linear(dim * 2, dim),
nn.ReLU(),
nn.Linear(dim, 1),
nn.Sigmoid()
)
# Optional projection after fusion
self.out_proj = nn.Linear(dim, dim)
# Layer norms for stable training
self.norm_x1 = nn.LayerNorm(dim)
self.norm_x2 = nn.LayerNorm(dim)
self.norm_out = nn.LayerNorm(dim)
def forward(self, x1, x2):
"""
Args:
x1: Tensor [B, L, D] - first modality (e.g., physicochemical)
x2: Tensor [B, L, D] - second modality (e.g., ESM embeddings)
Returns:
fused: Tensor [B, L, D] - fused residue-level representation
"""
# 1) Normalize both branches
x1_norm = self.norm_x1(x1)
x2_norm = self.norm_x2(x2)
# 2) Cross-attention (x1 queries, x2 keys/values)
# This allows x1 to attend to x2 at each residue position
attn_out, _ = self.cross_attn(
query=x1_norm,
key=x2_norm,
value=x2_norm
) # [B, L, D]
# 3) Gating between original x1 and attention-enhanced x2
gate_val = self.gate(torch.cat([x1, attn_out], dim=-1)) # [B, L, 1]
fused = gate_val * x1 + (1 - gate_val) * attn_out
# 4) Optional projection + normalization
fused = self.out_proj(fused)
fused = self.norm_out(fused)
return fused
class ResidueTripleFusion(nn.Module):
"""
ResidueTripleFusion:
A hierarchical three-branch feature fusion module for residue-level representations.
Step 1: Fuse physicochemical features and protein language model embeddings.
Step 2: Fuse the intermediate representation with structure-based features.
Each fusion step uses ResidueDoubleFusion (cross-attention + gating + linear projection).
"""
def __init__(self, dim, num_heads=8, dropout=0.1):
super().__init__()
# Fuse physicochemical + ESM embeddings
self.fuse_phys_esm = ResidueDoubleFusion(dim, num_heads=num_heads, dropout=dropout)
# Fuse the fused phys+esm representation with structure embeddings
self.fuse_f12_struct = ResidueDoubleFusion(dim, num_heads=num_heads, dropout=dropout)
def forward(self, phys, esm, struct):
"""
Args:
phys: Tensor [B, L, D], physicochemical features (e.g., AAindex-based)
esm: Tensor [B, L, D], protein language model embeddings (e.g., ESM2, ProtT5)
struct: Tensor [B, L, D], structure-derived features (e.g., torsion, RSA)
Returns:
fused: Tensor [B, L, D], final fused representation
"""
# Step 1: Fuse physicochemical and ESM embeddings
f12 = self.fuse_phys_esm(phys, esm)
# Step 2: Fuse the intermediate fused representation with structure features
fused = self.fuse_f12_struct(f12, struct)
return fused
class BANLayer(nn.Module):
"""
Bilinear Attention Network Layer with proper 2D masked-softmax.
v_mask: [B, L_v] True=valid
q_mask: [B, L_q] True=valid
"""
def __init__(self, v_dim, q_dim, h_dim, h_out, act='ReLU', dropout=0.2, k=3):
super().__init__()
self.c = 32
self.k = k
self.v_dim = v_dim
self.q_dim = q_dim
self.h_dim = h_dim
self.h_out = h_out
self.v_net = FCNet([v_dim, h_dim * self.k], act=act, dropout=dropout)
self.q_net = FCNet([q_dim, h_dim * self.k], act=act, dropout=dropout)
if 1 < k:
self.p_net = nn.AvgPool1d(self.k, stride=self.k)
if h_out <= self.c:
self.h_mat = nn.Parameter(torch.Tensor(1, h_out, 1, h_dim * self.k).normal_())
self.h_bias = nn.Parameter(torch.Tensor(1, h_out, 1, 1).normal_())
else:
self.h_net = weight_norm(nn.Linear(h_dim * self.k, h_out), dim=None)
self.bn = nn.BatchNorm1d(h_dim)
def attention_pooling(self, v, q, att_map): # att_map: [B, L_v, L_q]
logits = torch.einsum('bvk,bvq,bqk->bk', (v, att_map, q))
if 1 < self.k:
logits = self.p_net(logits.unsqueeze(1)).squeeze(1) * self.k
return logits
def _masked_softmax_2d(self, logits, v_mask, q_mask):
"""
logits: [B, h_out, L_v, L_q]
v_mask: [B, L_v] or None
q_mask: [B, L_q] or None
return: probs [B, h_out, L_v, L_q] (masked entries=0, 在有效的二维子矩阵内归一化)
"""
B, H, Lv, Lq = logits.shape
device = logits.device
if v_mask is None:
v_mask = torch.ones(B, Lv, dtype=torch.bool, device=device)
if q_mask is None:
q_mask = torch.ones(B, Lq, dtype=torch.bool, device=device)
mask2d = (v_mask[:, :, None] & q_mask[:, None, :]) # [B, Lv, Lq]
mask2d = mask2d[:, None, :, :].expand(B, H, Lv, Lq) # [B, H, Lv, Lq]
logits = logits.masked_fill(~mask2d, -float('inf'))
# Perform softmax over the joint Lv*Lq space
flat = logits.view(B, H, -1) # [B, H, Lv*Lq]
# Handle extreme cases: some samples may have no valid cells, avoid NaN
flat = torch.where(torch.isinf(flat), torch.full_like(flat, -1e9), flat)
flat = F.softmax(flat, dim=-1)
flat = torch.nan_to_num(flat, nan=0.0) # Safety fallback
probs = flat.view(B, H, Lv, Lq)
# Zero out masked positions (for numerical stability & easier visualization)
probs = probs * mask2d.float()
return probs
def forward(self, v, q, v_mask=None, q_mask=None, softmax=True):
"""
v: [B, L_v, Dv], q: [B, L_q, Dq]
"""
B, L_v, _ = v.size()
_, L_q, _ = q.size()
v_ = self.v_net(v) # [B, L_v, h_dim*k]
q_ = self.q_net(q) # [B, L_q, h_dim*k]
if self.h_out <= self.c:
att_maps = torch.einsum('xhyk,bvk,bqk->bhvq', (self.h_mat, v_, q_)) + self.h_bias # [B,H,Lv,Lq]
else:
v_t = v_.transpose(1, 2).unsqueeze(3) # [B, K, Lv, 1]
q_t = q_.transpose(1, 2).unsqueeze(2) # [B, K, 1, Lq]
d_ = torch.matmul(v_t, q_t) # [B, K, Lv, Lq]
att_maps = self.h_net(d_.permute(0, 2, 3, 1)) # [B, Lv, Lq, H]
att_maps = att_maps.permute(0, 3, 1, 2) # [B, H, Lv, Lq]
if softmax:
att_maps = self._masked_softmax_2d(att_maps, v_mask, q_mask)
else:
# Even if not softmax, zero out invalid cells to prevent leakage
if v_mask is not None:
att_maps = att_maps.masked_fill(~v_mask[:, None, :, None], 0.0)
if q_mask is not None:
att_maps = att_maps.masked_fill(~q_mask[:, None, None, :], 0.0)
# Note: at this point v_ / q_ are still [B, L, K], aligned with att_maps [B,H,Lv,Lq]
logits = self.attention_pooling(v_, q_, att_maps[:, 0, :, :])
for i in range(1, self.h_out):
logits = logits + self.attention_pooling(v_, q_, att_maps[:, i, :, :])
logits = self.bn(logits)
return logits, att_maps
class FCNet(nn.Module):
def __init__(self, dims, act='ReLU', dropout=0.2):
super(FCNet, self).__init__()
layers = []
for i in range(len(dims) - 2):
in_dim = dims[i]
out_dim = dims[i + 1]
if 0 < dropout:
layers.append(nn.Dropout(dropout))
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
if '' != act:
layers.append(getattr(nn, act)())
if 0 < dropout:
layers.append(nn.Dropout(dropout))
layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
if '' != act:
layers.append(getattr(nn, act)())
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x)
class StackedEGNN(nn.Module):
def __init__(self, dim, layers, update_coors=False, **egnn_kwargs):
super().__init__()
self.layers = nn.ModuleList([
EGNN(dim=dim, update_coors=update_coors, **egnn_kwargs)
for _ in range(layers)
])
def forward(self, feats, coors, mask=None):
# feats: [B, L_max, D], coors: [B, L_max, 3], mask: [B, L_max] (bool)
for layer in self.layers:
feats, coors = layer(feats, coors, mask=mask)
return feats, coors
class FocalLoss(nn.Module):
def __init__(self, alpha=0.5, gamma=2, reduction='mean'):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
def forward(self, inputs, targets):
bce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
p_t = torch.exp(-bce_loss)
alpha_weight = self.alpha * targets + (1 - self.alpha) * (1 - targets)
loss = alpha_weight * (1 - p_t) ** self.gamma * bce_loss
if self.reduction == 'mean':
return torch.mean(loss)
elif self.reduction == 'sum':
return torch.sum(loss)
else:
return loss
# ===================================== Main Model (Full Version) ===========================================
class PeptideHLABindingPredictor(nn.Module):
def __init__(
self,
phys_dim=20, # Dimension of Physicochemical features
pep_dim=256, # Unified peptide channel dimension
hla_dim=256, # Unified HLA channel dimension
bilinear_dim=256,
pseudo_seq_pos=None, # Pocket positions (assumed 0-based and within [0,179])
device="cuda:0",
loss_fn='bce',
alpha=0.5,
gamma=2.0,
dropout=0.2,
pos_weights=None
):
super().__init__()
self.device = device
self.pep_dim = pep_dim
self.hla_dim = hla_dim
self.bilinear_dim = bilinear_dim
self.alpha = alpha
self.gamma = gamma
self.dropout = dropout
if loss_fn == 'bce':
self.loss_fn = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weights]) if pos_weights is not None else None)
elif loss_fn == 'focal':
self.loss_fn = FocalLoss(alpha=alpha, gamma=gamma)
else:
raise ValueError(f"Unknown loss function: {loss_fn}")
self.se3_model = StackedEGNN(
dim=17, layers=3
)
self.max_pep_len = 20
self.max_hla_len = 180
self.pep_pos_embed = nn.Parameter(torch.randn(self.max_pep_len, pep_dim))
self.hla_pos_embed = nn.Parameter(torch.randn(self.max_hla_len, hla_dim))
# —— Branch projection to unified dimension (per residue) ——
# peptide branch (Physicochem -> pep_dim, ESM2(1280) -> pep_dim)
self.proj_pep_phys = ResidueProjector(in_dim=phys_dim, out_dim=pep_dim) # Your PhysEnc output dim set to pep_dim
self.proj_pep_esm = ResidueProjector(in_dim=1280, out_dim=pep_dim)
# HLA branch (Physicochem -> hla_dim, ESM2(1280) -> hla_dim, Struct(17/or se3_out) -> hla_dim)
self.proj_hla_phys = ResidueProjector(in_dim=phys_dim, out_dim=hla_dim) # Your PhysEnc output dim set to hla_dim
self.proj_hla_esm = ResidueProjector(in_dim=1280, out_dim=hla_dim)
self.proj_hla_se3 = ResidueProjector(in_dim=17, out_dim=hla_dim) # Let se3_model output dim be hla_dim
# —— Gate fusion (per residue) ——
self.gate_pep = ResidueDoubleFusion(pep_dim) # pep_phys × pep_esm
self.gate_hla = ResidueTripleFusion(hla_dim) # hla_phys × hla_esm × hla_struct
d_model = self.pep_dim
n_heads = 8
# 1. For "Peptide queries HLA" (pep_q_hla_kv)
self.cross_attn_pep_hla = nn.MultiheadAttention(
embed_dim=d_model,
num_heads=n_heads,
dropout=self.dropout,
batch_first=True
)
self.norm_cross_pep = nn.LayerNorm(d_model)
# 2. For "HLA queries Peptide" (hla_q_pep_kv)
self.cross_attn_hla_pep = nn.MultiheadAttention(
embed_dim=d_model,
num_heads=n_heads,
dropout=self.dropout,
batch_first=True
)
self.norm_cross_hla = nn.LayerNorm(d_model)
# —— Interaction module (Bilinear attention map) ——
self.bi_attn = BANLayer(v_dim=pep_dim, q_dim=hla_dim, h_dim=bilinear_dim, h_out=4, k=3)
# —— Head ——
self.head = nn.Sequential(
nn.Linear(bilinear_dim, bilinear_dim),
nn.ReLU(),
nn.Linear(bilinear_dim, 1)
)
# —— Pocket positions ——
if pseudo_seq_pos is None:
pseudo_seq_pos = [i-2 for i in [7, 9, 24, 45, 59, 62, 63, 66, 67, 69, 70, 73, 74, 76, 77, 80, 81, 84, 95, 97, 99, 114, 116, 118, 143, 147, 150, 152, 156, 158, 159, 163, 167, 171]]
self.register_buffer("contact_idx", torch.tensor(pseudo_seq_pos, dtype=torch.long))